Lines Matching refs:q

57 	struct request_queue	*q;	/* the queue this rl belongs to */  member
107 struct request_queue *q; member
236 typedef void (request_fn_proc) (struct request_queue *q);
237 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
252 typedef int (lld_busy_fn) (struct request_queue *q);
526 static inline void queue_lockdep_assert_held(struct request_queue *q) in queue_lockdep_assert_held() argument
528 if (q->queue_lock) in queue_lockdep_assert_held()
529 lockdep_assert_held(q->queue_lock); in queue_lockdep_assert_held()
533 struct request_queue *q) in queue_flag_set_unlocked() argument
535 __set_bit(flag, &q->queue_flags); in queue_flag_set_unlocked()
539 struct request_queue *q) in queue_flag_test_and_clear() argument
541 queue_lockdep_assert_held(q); in queue_flag_test_and_clear()
543 if (test_bit(flag, &q->queue_flags)) { in queue_flag_test_and_clear()
544 __clear_bit(flag, &q->queue_flags); in queue_flag_test_and_clear()
552 struct request_queue *q) in queue_flag_test_and_set() argument
554 queue_lockdep_assert_held(q); in queue_flag_test_and_set()
556 if (!test_bit(flag, &q->queue_flags)) { in queue_flag_test_and_set()
557 __set_bit(flag, &q->queue_flags); in queue_flag_test_and_set()
564 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) in queue_flag_set() argument
566 queue_lockdep_assert_held(q); in queue_flag_set()
567 __set_bit(flag, &q->queue_flags); in queue_flag_set()
571 struct request_queue *q) in queue_flag_clear_unlocked() argument
573 __clear_bit(flag, &q->queue_flags); in queue_flag_clear_unlocked()
576 static inline int queue_in_flight(struct request_queue *q) in queue_in_flight() argument
578 return q->in_flight[0] + q->in_flight[1]; in queue_in_flight()
581 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) in queue_flag_clear() argument
583 queue_lockdep_assert_held(q); in queue_flag_clear()
584 __clear_bit(flag, &q->queue_flags); in queue_flag_clear()
587 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) argument
588 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) argument
589 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) argument
590 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) argument
591 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) argument
592 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) argument
593 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) argument
594 #define blk_queue_noxmerges(q) \ argument
595 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
596 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) argument
597 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) argument
598 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) argument
599 #define blk_queue_stackable(q) \ argument
600 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
601 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) argument
602 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ argument
603 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
630 static inline bool queue_is_rq_based(struct request_queue *q) in queue_is_rq_based() argument
632 return q->request_fn || q->mq_ops; in queue_is_rq_based()
635 static inline unsigned int blk_queue_cluster(struct request_queue *q) in blk_queue_cluster() argument
637 return q->limits.cluster; in blk_queue_cluster()
741 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
747 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) in blk_queue_bounce() argument
795 extern void blk_rq_init(struct request_queue *q, struct request *rq);
805 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
806 extern int blk_lld_busy(struct request_queue *q);
812 extern int blk_insert_cloned_request(struct request_queue *q,
829 static inline void blk_clear_queue_congested(struct request_queue *q, int sync) in blk_clear_queue_congested() argument
831 clear_bdi_congested(&q->backing_dev_info, sync); in blk_clear_queue_congested()
838 static inline void blk_set_queue_congested(struct request_queue *q, int sync) in blk_set_queue_congested() argument
840 set_bdi_congested(&q->backing_dev_info, sync); in blk_set_queue_congested()
843 extern void blk_start_queue(struct request_queue *q);
844 extern void blk_stop_queue(struct request_queue *q);
845 extern void blk_sync_queue(struct request_queue *q);
846 extern void __blk_stop_queue(struct request_queue *q);
847 extern void __blk_run_queue(struct request_queue *q);
849 extern void blk_run_queue_async(struct request_queue *q);
903 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, in blk_queue_get_max_sectors() argument
907 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); in blk_queue_get_max_sectors()
910 return q->limits.max_write_same_sectors; in blk_queue_get_max_sectors()
912 return q->limits.max_sectors; in blk_queue_get_max_sectors()
919 static inline unsigned int blk_max_size_offset(struct request_queue *q, in blk_max_size_offset() argument
922 if (!q->limits.chunk_sectors) in blk_max_size_offset()
923 return q->limits.max_sectors; in blk_max_size_offset()
925 return q->limits.chunk_sectors - in blk_max_size_offset()
926 (offset & (q->limits.chunk_sectors - 1)); in blk_max_size_offset()
931 struct request_queue *q = rq->q; in blk_rq_get_max_sectors() local
934 return q->limits.max_hw_sectors; in blk_rq_get_max_sectors()
936 if (!q->limits.chunk_sectors) in blk_rq_get_max_sectors()
937 return blk_queue_get_max_sectors(q, rq->cmd_flags); in blk_rq_get_max_sectors()
939 return min(blk_max_size_offset(q, blk_rq_pos(rq)), in blk_rq_get_max_sectors()
940 blk_queue_get_max_sectors(q, rq->cmd_flags)); in blk_rq_get_max_sectors()
957 extern struct request *blk_peek_request(struct request_queue *q);
959 extern struct request *blk_fetch_request(struct request_queue *q);
1009 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1011 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1015 extern void blk_queue_alignment_offset(struct request_queue *q,
1018 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1020 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1032 extern int blk_queue_dma_drain(struct request_queue *q,
1035 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1045 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
1046 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1062 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1063 extern int blk_pre_runtime_suspend(struct request_queue *q);
1064 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1065 extern void blk_pre_runtime_resume(struct request_queue *q);
1066 extern void blk_post_runtime_resume(struct request_queue *q, int err);
1068 static inline void blk_pm_runtime_init(struct request_queue *q, in blk_pm_runtime_init() argument
1070 static inline int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
1074 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} in blk_post_runtime_suspend() argument
1075 static inline void blk_pre_runtime_resume(struct request_queue *q) {} in blk_pre_runtime_resume() argument
1076 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} in blk_post_runtime_resume() argument
1194 static inline unsigned long queue_bounce_pfn(struct request_queue *q) in queue_bounce_pfn() argument
1196 return q->limits.bounce_pfn; in queue_bounce_pfn()
1199 static inline unsigned long queue_segment_boundary(struct request_queue *q) in queue_segment_boundary() argument
1201 return q->limits.seg_boundary_mask; in queue_segment_boundary()
1204 static inline unsigned int queue_max_sectors(struct request_queue *q) in queue_max_sectors() argument
1206 return q->limits.max_sectors; in queue_max_sectors()
1209 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) in queue_max_hw_sectors() argument
1211 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
1214 static inline unsigned short queue_max_segments(struct request_queue *q) in queue_max_segments() argument
1216 return q->limits.max_segments; in queue_max_segments()
1219 static inline unsigned int queue_max_segment_size(struct request_queue *q) in queue_max_segment_size() argument
1221 return q->limits.max_segment_size; in queue_max_segment_size()
1224 static inline unsigned short queue_logical_block_size(struct request_queue *q) in queue_logical_block_size() argument
1228 if (q && q->limits.logical_block_size) in queue_logical_block_size()
1229 retval = q->limits.logical_block_size; in queue_logical_block_size()
1239 static inline unsigned int queue_physical_block_size(struct request_queue *q) in queue_physical_block_size() argument
1241 return q->limits.physical_block_size; in queue_physical_block_size()
1249 static inline unsigned int queue_io_min(struct request_queue *q) in queue_io_min() argument
1251 return q->limits.io_min; in queue_io_min()
1259 static inline unsigned int queue_io_opt(struct request_queue *q) in queue_io_opt() argument
1261 return q->limits.io_opt; in queue_io_opt()
1269 static inline int queue_alignment_offset(struct request_queue *q) in queue_alignment_offset() argument
1271 if (q->limits.misaligned) in queue_alignment_offset()
1274 return q->limits.alignment_offset; in queue_alignment_offset()
1287 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() local
1289 if (q->limits.misaligned) in bdev_alignment_offset()
1295 return q->limits.alignment_offset; in bdev_alignment_offset()
1298 static inline int queue_discard_alignment(struct request_queue *q) in queue_discard_alignment() argument
1300 if (q->limits.discard_misaligned) in queue_discard_alignment()
1303 return q->limits.discard_alignment; in queue_discard_alignment()
1331 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() local
1336 return q->limits.discard_alignment; in bdev_discard_alignment()
1339 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) in queue_discard_zeroes_data() argument
1341 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) in queue_discard_zeroes_data()
1354 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same() local
1356 if (q) in bdev_write_same()
1357 return q->limits.max_write_same_sectors; in bdev_write_same()
1362 static inline int queue_dma_alignment(struct request_queue *q) in queue_dma_alignment() argument
1364 return q ? q->dma_alignment : 511; in queue_dma_alignment()
1367 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned() argument
1370 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; in blk_rq_aligned()
1390 static inline bool queue_flush_queueable(struct request_queue *q) in queue_flush_queueable() argument
1392 return !q->flush_not_queueable; in queue_flush_queueable()
1518 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments() argument
1521 q->limits.max_integrity_segments = segs; in blk_queue_max_integrity_segments()
1525 queue_max_integrity_segments(struct request_queue *q) in queue_max_integrity_segments() argument
1527 return q->limits.max_integrity_segments; in queue_max_integrity_segments()
1541 static inline int blk_rq_count_integrity_sg(struct request_queue *q, in blk_rq_count_integrity_sg() argument
1546 static inline int blk_rq_map_integrity_sg(struct request_queue *q, in blk_rq_map_integrity_sg() argument
1572 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments() argument
1576 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) in queue_max_integrity_segments() argument