Lines Matching refs:request_queue

31 struct request_queue;
57 struct request_queue *q; /* the queue this rl belongs to */
107 struct request_queue *q;
236 typedef void (request_fn_proc) (struct request_queue *q);
237 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
238 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
239 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
248 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
252 typedef int (lld_busy_fn) (struct request_queue *q);
312 struct request_queue { struct
526 static inline void queue_lockdep_assert_held(struct request_queue *q) in queue_lockdep_assert_held()
533 struct request_queue *q) in queue_flag_set_unlocked()
539 struct request_queue *q) in queue_flag_test_and_clear()
552 struct request_queue *q) in queue_flag_test_and_set()
564 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) in queue_flag_set()
571 struct request_queue *q) in queue_flag_clear_unlocked()
576 static inline int queue_in_flight(struct request_queue *q) in queue_in_flight()
581 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) in queue_flag_clear()
630 static inline bool queue_is_rq_based(struct request_queue *q) in queue_is_rq_based()
635 static inline unsigned int blk_queue_cluster(struct request_queue *q) in blk_queue_cluster()
741 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
747 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) in blk_queue_bounce()
795 extern void blk_rq_init(struct request_queue *q, struct request *rq);
797 extern void __blk_put_request(struct request_queue *, struct request *);
798 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
799 extern struct request *blk_make_request(struct request_queue *, struct bio *,
802 extern void blk_requeue_request(struct request_queue *, struct request *);
805 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
806 extern int blk_lld_busy(struct request_queue *q);
812 extern int blk_insert_cloned_request(struct request_queue *q,
814 extern void blk_delay_queue(struct request_queue *, unsigned long);
815 extern void blk_recount_segments(struct request_queue *, struct bio *);
819 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
821 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
829 static inline void blk_clear_queue_congested(struct request_queue *q, int sync) in blk_clear_queue_congested()
838 static inline void blk_set_queue_congested(struct request_queue *q, int sync) in blk_set_queue_congested()
843 extern void blk_start_queue(struct request_queue *q);
844 extern void blk_stop_queue(struct request_queue *q);
845 extern void blk_sync_queue(struct request_queue *q);
846 extern void __blk_stop_queue(struct request_queue *q);
847 extern void __blk_run_queue(struct request_queue *q);
848 extern void blk_run_queue(struct request_queue *);
849 extern void blk_run_queue_async(struct request_queue *q);
850 extern int blk_rq_map_user(struct request_queue *, struct request *,
854 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
855 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
858 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
860 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
863 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue()
903 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, in blk_queue_get_max_sectors()
919 static inline unsigned int blk_max_size_offset(struct request_queue *q, in blk_max_size_offset()
931 struct request_queue *q = rq->q; in blk_rq_get_max_sectors()
957 extern struct request *blk_peek_request(struct request_queue *q);
959 extern struct request *blk_fetch_request(struct request_queue *q);
996 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
998 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
999 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
1001 extern void blk_cleanup_queue(struct request_queue *);
1002 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1003 extern void blk_queue_bounce_limit(struct request_queue *, u64);
1005 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1006 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1007 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1008 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1009 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1011 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1013 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
1014 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1015 extern void blk_queue_alignment_offset(struct request_queue *q,
1018 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1020 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1029 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1030 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
1031 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1032 extern int blk_queue_dma_drain(struct request_queue *q,
1035 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1036 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1037 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1038 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1039 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
1040 extern void blk_queue_dma_alignment(struct request_queue *, int);
1041 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1042 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1043 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1044 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1045 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
1046 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1049 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1053 bool __must_check blk_get_queue(struct request_queue *);
1054 struct request_queue *blk_alloc_queue(gfp_t);
1055 struct request_queue *blk_alloc_queue_node(gfp_t, int);
1056 extern void blk_put_queue(struct request_queue *);
1062 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1063 extern int blk_pre_runtime_suspend(struct request_queue *q);
1064 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1065 extern void blk_pre_runtime_resume(struct request_queue *q);
1066 extern void blk_post_runtime_resume(struct request_queue *q, int err);
1068 static inline void blk_pm_runtime_init(struct request_queue *q, in blk_pm_runtime_init()
1070 static inline int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend()
1074 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} in blk_post_runtime_suspend()
1075 static inline void blk_pre_runtime_resume(struct request_queue *q) {} in blk_pre_runtime_resume()
1076 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} in blk_post_runtime_resume()
1140 extern int blk_queue_start_tag(struct request_queue *, struct request *);
1141 extern struct request *blk_queue_find_tag(struct request_queue *, int);
1142 extern void blk_queue_end_tag(struct request_queue *, struct request *);
1143 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1144 extern void blk_queue_free_tags(struct request_queue *);
1145 extern int blk_queue_resize_tags(struct request_queue *, int);
1146 extern void blk_queue_invalidate_tags(struct request_queue *);
1194 static inline unsigned long queue_bounce_pfn(struct request_queue *q) in queue_bounce_pfn()
1199 static inline unsigned long queue_segment_boundary(struct request_queue *q) in queue_segment_boundary()
1204 static inline unsigned int queue_max_sectors(struct request_queue *q) in queue_max_sectors()
1209 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) in queue_max_hw_sectors()
1214 static inline unsigned short queue_max_segments(struct request_queue *q) in queue_max_segments()
1219 static inline unsigned int queue_max_segment_size(struct request_queue *q) in queue_max_segment_size()
1224 static inline unsigned short queue_logical_block_size(struct request_queue *q) in queue_logical_block_size()
1239 static inline unsigned int queue_physical_block_size(struct request_queue *q) in queue_physical_block_size()
1249 static inline unsigned int queue_io_min(struct request_queue *q) in queue_io_min()
1259 static inline unsigned int queue_io_opt(struct request_queue *q) in queue_io_opt()
1269 static inline int queue_alignment_offset(struct request_queue *q) in queue_alignment_offset()
1287 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1298 static inline int queue_discard_alignment(struct request_queue *q) in queue_discard_alignment()
1331 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
1339 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) in queue_discard_zeroes_data()
1354 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
1362 static inline int queue_dma_alignment(struct request_queue *q) in queue_dma_alignment()
1367 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned()
1390 static inline bool queue_flush_queueable(struct request_queue *q) in queue_flush_queueable()
1494 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1496 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1497 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1499 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1518 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments()
1525 queue_max_integrity_segments(struct request_queue *q) in queue_max_integrity_segments()
1541 static inline int blk_rq_count_integrity_sg(struct request_queue *q, in blk_rq_count_integrity_sg()
1546 static inline int blk_rq_map_integrity_sg(struct request_queue *q, in blk_rq_map_integrity_sg()
1572 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments()
1576 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) in queue_max_integrity_segments()
1580 static inline bool blk_integrity_merge_rq(struct request_queue *rq, in blk_integrity_merge_rq()
1586 static inline bool blk_integrity_merge_bio(struct request_queue *rq, in blk_integrity_merge_bio()