Lines Matching refs:blk_mq_hw_ctx

36 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
41 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending()
52 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, in get_bm()
64 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending()
73 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending()
140 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters()
155 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_can_queue()
236 struct blk_mq_hw_ctx *hctx; in blk_mq_alloc_request()
271 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, in __blk_mq_free_request()
286 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) in blk_mq_free_hctx_request()
298 struct blk_mq_hw_ctx *hctx; in blk_mq_free_request()
594 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, in blk_mq_check_expired()
635 struct blk_mq_hw_ctx *hctx; in blk_mq_rq_timer()
688 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) in flush_busy_ctxs()
724 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) in __blk_mq_run_hw_queue()
837 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_next_cpu()
858 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_run_hw_queue()
881 struct blk_mq_hw_ctx *hctx; in blk_mq_run_hw_queues()
895 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_stop_hw_queue()
905 struct blk_mq_hw_ctx *hctx; in blk_mq_stop_hw_queues()
913 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_start_hw_queue()
923 struct blk_mq_hw_ctx *hctx; in blk_mq_start_hw_queues()
933 struct blk_mq_hw_ctx *hctx; in blk_mq_start_stopped_hw_queues()
948 struct blk_mq_hw_ctx *hctx; in blk_mq_run_work_fn()
950 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); in blk_mq_run_work_fn()
957 struct blk_mq_hw_ctx *hctx; in blk_mq_delay_work_fn()
959 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); in blk_mq_delay_work_fn()
965 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) in blk_mq_delay_queue()
975 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, in __blk_mq_insert_req_list()
988 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, in __blk_mq_insert_request()
1001 struct blk_mq_hw_ctx *hctx; in blk_mq_insert_request()
1027 struct blk_mq_hw_ctx *hctx; in blk_mq_insert_requests()
1123 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) in hctx_allow_merges()
1129 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, in blk_mq_merge_queue_io()
1156 struct blk_mq_hw_ctx *hctx;
1164 struct blk_mq_hw_ctx *hctx; in blk_mq_map_request()
1205 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, in blk_mq_direct_issue_request()
1414 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) in blk_mq_map_queue()
1573 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) in blk_mq_hctx_cpu_offline()
1618 struct blk_mq_hw_ctx *hctx = data; in blk_mq_hctx_notify()
1634 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in blk_mq_exit_hctx()
1656 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_hw_queues()
1669 struct blk_mq_hw_ctx *hctx; in blk_mq_free_hw_queues()
1678 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) in blk_mq_init_hctx()
1749 struct blk_mq_hw_ctx *hctx; in blk_mq_init_hw_queues()
1778 struct blk_mq_hw_ctx *hctx; in blk_mq_init_cpu_queues()
1805 struct blk_mq_hw_ctx *hctx; in blk_mq_map_swqueue()
1882 struct blk_mq_hw_ctx *hctx; in queue_set_hctx_shared()
1947 struct blk_mq_hw_ctx *hctx; in blk_mq_release()
1986 struct blk_mq_hw_ctx **hctxs; in blk_mq_init_allocated_queue()
2008 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), in blk_mq_init_allocated_queue()
2331 struct blk_mq_hw_ctx *hctx; in blk_mq_update_nr_requests()