Lines Matching refs:q
33 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) in blk_queue_prep_rq() argument
35 q->prep_rq_fn = pfn; in blk_queue_prep_rq()
50 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) in blk_queue_unprep_rq() argument
52 q->unprep_rq_fn = ufn; in blk_queue_unprep_rq()
72 void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) in blk_queue_merge_bvec() argument
74 q->merge_bvec_fn = mbfn; in blk_queue_merge_bvec()
78 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) in blk_queue_softirq_done() argument
80 q->softirq_done_fn = fn; in blk_queue_softirq_done()
84 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) in blk_queue_rq_timeout() argument
86 q->rq_timeout = timeout; in blk_queue_rq_timeout()
90 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) in blk_queue_rq_timed_out() argument
92 q->rq_timed_out_fn = fn; in blk_queue_rq_timed_out()
96 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) in blk_queue_lld_busy() argument
98 q->lld_busy_fn = fn; in blk_queue_lld_busy()
176 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) in blk_queue_make_request() argument
181 q->nr_requests = BLKDEV_MAX_RQ; in blk_queue_make_request()
183 q->make_request_fn = mfn; in blk_queue_make_request()
184 blk_queue_dma_alignment(q, 511); in blk_queue_make_request()
185 blk_queue_congestion_threshold(q); in blk_queue_make_request()
186 q->nr_batching = BLK_BATCH_REQ; in blk_queue_make_request()
188 blk_set_default_limits(&q->limits); in blk_queue_make_request()
193 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); in blk_queue_make_request()
208 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) in blk_queue_bounce_limit() argument
213 q->bounce_gfp = GFP_NOIO; in blk_queue_bounce_limit()
222 q->limits.bounce_pfn = max(max_low_pfn, b_pfn); in blk_queue_bounce_limit()
226 q->limits.bounce_pfn = b_pfn; in blk_queue_bounce_limit()
230 q->bounce_gfp = GFP_NOIO | GFP_DMA; in blk_queue_bounce_limit()
231 q->limits.bounce_pfn = b_pfn; in blk_queue_bounce_limit()
272 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors() argument
274 blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); in blk_queue_max_hw_sectors()
291 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors() argument
294 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
303 void blk_queue_max_discard_sectors(struct request_queue *q, in blk_queue_max_discard_sectors() argument
306 q->limits.max_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
315 void blk_queue_max_write_same_sectors(struct request_queue *q, in blk_queue_max_write_same_sectors() argument
318 q->limits.max_write_same_sectors = max_write_same_sectors; in blk_queue_max_write_same_sectors()
331 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) in blk_queue_max_segments() argument
339 q->limits.max_segments = max_segments; in blk_queue_max_segments()
352 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) in blk_queue_max_segment_size() argument
360 q->limits.max_segment_size = max_size; in blk_queue_max_segment_size()
374 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) in blk_queue_logical_block_size() argument
376 q->limits.logical_block_size = size; in blk_queue_logical_block_size()
378 if (q->limits.physical_block_size < size) in blk_queue_logical_block_size()
379 q->limits.physical_block_size = size; in blk_queue_logical_block_size()
381 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_logical_block_size()
382 q->limits.io_min = q->limits.physical_block_size; in blk_queue_logical_block_size()
396 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) in blk_queue_physical_block_size() argument
398 q->limits.physical_block_size = size; in blk_queue_physical_block_size()
400 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
401 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
403 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
404 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
419 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) in blk_queue_alignment_offset() argument
421 q->limits.alignment_offset = in blk_queue_alignment_offset()
422 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
423 q->limits.misaligned = 0; in blk_queue_alignment_offset()
464 void blk_queue_io_min(struct request_queue *q, unsigned int min) in blk_queue_io_min() argument
466 blk_limits_io_min(&q->limits, min); in blk_queue_io_min()
502 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) in blk_queue_io_opt() argument
504 blk_limits_io_opt(&q->limits, opt); in blk_queue_io_opt()
713 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) in blk_queue_dma_pad() argument
715 q->dma_pad_mask = mask; in blk_queue_dma_pad()
729 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) in blk_queue_update_dma_pad() argument
731 if (mask > q->dma_pad_mask) in blk_queue_update_dma_pad()
732 q->dma_pad_mask = mask; in blk_queue_update_dma_pad()
757 int blk_queue_dma_drain(struct request_queue *q, in blk_queue_dma_drain() argument
761 if (queue_max_segments(q) < 2) in blk_queue_dma_drain()
764 blk_queue_max_segments(q, queue_max_segments(q) - 1); in blk_queue_dma_drain()
765 q->dma_drain_needed = dma_drain_needed; in blk_queue_dma_drain()
766 q->dma_drain_buffer = buf; in blk_queue_dma_drain()
767 q->dma_drain_size = size; in blk_queue_dma_drain()
778 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) in blk_queue_segment_boundary() argument
786 q->limits.seg_boundary_mask = mask; in blk_queue_segment_boundary()
800 void blk_queue_dma_alignment(struct request_queue *q, int mask) in blk_queue_dma_alignment() argument
802 q->dma_alignment = mask; in blk_queue_dma_alignment()
820 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) in blk_queue_update_dma_alignment() argument
824 if (mask > q->dma_alignment) in blk_queue_update_dma_alignment()
825 q->dma_alignment = mask; in blk_queue_update_dma_alignment()
838 void blk_queue_flush(struct request_queue *q, unsigned int flush) in blk_queue_flush() argument
845 q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); in blk_queue_flush()
849 void blk_queue_flush_queueable(struct request_queue *q, bool queueable) in blk_queue_flush_queueable() argument
851 q->flush_not_queueable = !queueable; in blk_queue_flush_queueable()