Lines Matching refs:q

80 static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)  in blk_mq_queue_enter()  argument
85 if (percpu_ref_tryget_live(&q->mq_usage_counter)) in blk_mq_queue_enter()
91 ret = wait_event_interruptible(q->mq_freeze_wq, in blk_mq_queue_enter()
92 !q->mq_freeze_depth || blk_queue_dying(q)); in blk_mq_queue_enter()
93 if (blk_queue_dying(q)) in blk_mq_queue_enter()
100 static void blk_mq_queue_exit(struct request_queue *q) in blk_mq_queue_exit() argument
102 percpu_ref_put(&q->mq_usage_counter); in blk_mq_queue_exit()
107 struct request_queue *q = in blk_mq_usage_counter_release() local
110 wake_up_all(&q->mq_freeze_wq); in blk_mq_usage_counter_release()
113 void blk_mq_freeze_queue_start(struct request_queue *q) in blk_mq_freeze_queue_start() argument
117 spin_lock_irq(q->queue_lock); in blk_mq_freeze_queue_start()
118 freeze = !q->mq_freeze_depth++; in blk_mq_freeze_queue_start()
119 spin_unlock_irq(q->queue_lock); in blk_mq_freeze_queue_start()
122 percpu_ref_kill(&q->mq_usage_counter); in blk_mq_freeze_queue_start()
123 blk_mq_run_hw_queues(q, false); in blk_mq_freeze_queue_start()
128 static void blk_mq_freeze_queue_wait(struct request_queue *q) in blk_mq_freeze_queue_wait() argument
130 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); in blk_mq_freeze_queue_wait()
137 void blk_mq_freeze_queue(struct request_queue *q) in blk_mq_freeze_queue() argument
139 blk_mq_freeze_queue_start(q); in blk_mq_freeze_queue()
140 blk_mq_freeze_queue_wait(q); in blk_mq_freeze_queue()
144 void blk_mq_unfreeze_queue(struct request_queue *q) in blk_mq_unfreeze_queue() argument
148 spin_lock_irq(q->queue_lock); in blk_mq_unfreeze_queue()
149 wake = !--q->mq_freeze_depth; in blk_mq_unfreeze_queue()
150 WARN_ON_ONCE(q->mq_freeze_depth < 0); in blk_mq_unfreeze_queue()
151 spin_unlock_irq(q->queue_lock); in blk_mq_unfreeze_queue()
153 percpu_ref_reinit(&q->mq_usage_counter); in blk_mq_unfreeze_queue()
154 wake_up_all(&q->mq_freeze_wq); in blk_mq_unfreeze_queue()
159 void blk_mq_wake_waiters(struct request_queue *q) in blk_mq_wake_waiters() argument
164 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
173 wake_up_all(&q->mq_freeze_wq); in blk_mq_wake_waiters()
182 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, in blk_mq_rq_ctx_init() argument
185 if (blk_queue_io_stat(q)) in blk_mq_rq_ctx_init()
190 rq->q = q; in blk_mq_rq_ctx_init()
246 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); in __blk_mq_alloc_request()
253 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, in blk_mq_alloc_request() argument
262 ret = blk_mq_queue_enter(q, gfp); in blk_mq_alloc_request()
266 ctx = blk_mq_get_ctx(q); in blk_mq_alloc_request()
267 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
268 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT, in blk_mq_alloc_request()
276 ctx = blk_mq_get_ctx(q); in blk_mq_alloc_request()
277 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
278 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx, in blk_mq_alloc_request()
285 blk_mq_queue_exit(q); in blk_mq_alloc_request()
296 struct request_queue *q = rq->q; in __blk_mq_free_request() local
304 blk_mq_queue_exit(q); in __blk_mq_free_request()
320 struct request_queue *q = rq->q; in blk_mq_free_request() local
322 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_free_request()
353 rq->q->softirq_done_fn(rq); in __blk_mq_complete_request_remote()
362 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { in blk_mq_ipi_complete_request()
363 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
368 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) in blk_mq_ipi_complete_request()
377 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
384 struct request_queue *q = rq->q; in __blk_mq_complete_request() local
386 if (!q->softirq_done_fn) in __blk_mq_complete_request()
402 struct request_queue *q = rq->q; in blk_mq_complete_request() local
404 if (unlikely(blk_should_fake_timeout(q))) in blk_mq_complete_request()
419 struct request_queue *q = rq->q; in blk_mq_start_request() local
421 trace_block_rq_issue(q, rq); in blk_mq_start_request()
446 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_mq_start_request()
459 struct request_queue *q = rq->q; in __blk_mq_requeue_request() local
461 trace_block_rq_requeue(q, rq); in __blk_mq_requeue_request()
464 if (q->dma_drain_size && blk_rq_bytes(rq)) in __blk_mq_requeue_request()
480 struct request_queue *q = in blk_mq_requeue_work() local
486 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_requeue_work()
487 list_splice_init(&q->requeue_list, &rq_list); in blk_mq_requeue_work()
488 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_requeue_work()
509 blk_mq_start_hw_queues(q); in blk_mq_requeue_work()
514 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list() local
523 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_add_to_requeue_list()
526 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
528 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
530 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_add_to_requeue_list()
534 void blk_mq_cancel_requeue_work(struct request_queue *q) in blk_mq_cancel_requeue_work() argument
536 cancel_work_sync(&q->requeue_work); in blk_mq_cancel_requeue_work()
540 void blk_mq_kick_requeue_list(struct request_queue *q) in blk_mq_kick_requeue_list() argument
542 kblockd_schedule_work(&q->requeue_work); in blk_mq_kick_requeue_list()
546 void blk_mq_abort_requeue_list(struct request_queue *q) in blk_mq_abort_requeue_list() argument
551 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_abort_requeue_list()
552 list_splice_init(&q->requeue_list, &rq_list); in blk_mq_abort_requeue_list()
553 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_abort_requeue_list()
577 struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx); in blk_mq_tag_to_rq()
593 struct blk_mq_ops *ops = req->q->mq_ops; in blk_mq_rq_timed_out()
637 if (unlikely(blk_queue_dying(rq->q))) { in blk_mq_check_expired()
657 struct request_queue *q = (struct request_queue *)priv; in blk_mq_rq_timer() local
665 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_rq_timer()
678 mod_timer(&q->timeout, data.next); in blk_mq_rq_timer()
680 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_rq_timer()
693 static bool blk_mq_attempt_merge(struct request_queue *q, in blk_mq_attempt_merge() argument
710 if (bio_attempt_back_merge(q, rq, bio)) { in blk_mq_attempt_merge()
716 if (bio_attempt_front_merge(q, rq, bio)) { in blk_mq_attempt_merge()
769 struct request_queue *q = hctx->queue; in __blk_mq_run_hw_queue() local
820 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_run_hw_queue()
922 void blk_mq_run_hw_queues(struct request_queue *q, bool async) in blk_mq_run_hw_queues() argument
927 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
946 void blk_mq_stop_hw_queues(struct request_queue *q) in blk_mq_stop_hw_queues() argument
951 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
964 void blk_mq_start_hw_queues(struct request_queue *q) in blk_mq_start_hw_queues() argument
969 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
974 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) in blk_mq_start_stopped_hw_queues() argument
979 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_start_stopped_hw_queues()
1036 struct request_queue *q = rq->q; in blk_mq_insert_request() local
1040 current_ctx = blk_mq_get_ctx(q); in blk_mq_insert_request()
1044 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_request()
1056 static void blk_mq_insert_requests(struct request_queue *q, in blk_mq_insert_requests() argument
1066 trace_block_unplug(q, depth, !from_schedule); in blk_mq_insert_requests()
1068 current_ctx = blk_mq_get_ctx(q); in blk_mq_insert_requests()
1072 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_requests()
1123 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
1132 this_q = rq->q; in blk_mq_flush_plug_list()
1176 struct request_queue *q = hctx->queue; in blk_mq_merge_queue_io() local
1179 if (!blk_mq_attempt_merge(q, ctx, bio)) { in blk_mq_merge_queue_io()
1195 static struct request *blk_mq_map_request(struct request_queue *q, in blk_mq_map_request() argument
1205 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { in blk_mq_map_request()
1210 ctx = blk_mq_get_ctx(q); in blk_mq_map_request()
1211 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1216 trace_block_getrq(q, bio, rw); in blk_mq_map_request()
1217 blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx, in blk_mq_map_request()
1223 trace_block_sleeprq(q, bio, rw); in blk_mq_map_request()
1225 ctx = blk_mq_get_ctx(q); in blk_mq_map_request()
1226 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1227 blk_mq_set_alloc_data(&alloc_data, q, in blk_mq_map_request()
1245 static void blk_mq_make_request(struct request_queue *q, struct bio *bio) in blk_mq_make_request() argument
1252 blk_queue_bounce(q, &bio); in blk_mq_make_request()
1259 rq = blk_mq_map_request(q, bio, &data); in blk_mq_make_request()
1289 ret = q->mq_ops->queue_rq(data.hctx, &bd); in blk_mq_make_request()
1321 static void blk_sq_make_request(struct request_queue *q, struct bio *bio) in blk_sq_make_request() argument
1335 blk_queue_bounce(q, &bio); in blk_sq_make_request()
1342 if (use_plug && !blk_queue_nomerges(q) && in blk_sq_make_request()
1343 blk_attempt_plug_merge(q, bio, &request_count)) in blk_sq_make_request()
1346 rq = blk_mq_map_request(q, bio, &data); in blk_sq_make_request()
1367 trace_block_plug(q); in blk_sq_make_request()
1370 trace_block_plug(q); in blk_sq_make_request()
1395 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) in blk_mq_map_queue() argument
1397 return q->queue_hw_ctx[q->mq_map[cpu]]; in blk_mq_map_queue()
1547 struct request_queue *q = hctx->queue; in blk_mq_hctx_cpu_offline() local
1554 ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_hctx_cpu_offline()
1566 ctx = blk_mq_get_ctx(q); in blk_mq_hctx_cpu_offline()
1577 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_hctx_cpu_offline()
1604 static void blk_mq_exit_hctx(struct request_queue *q, in blk_mq_exit_hctx() argument
1625 static void blk_mq_exit_hw_queues(struct request_queue *q, in blk_mq_exit_hw_queues() argument
1631 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
1634 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
1638 static void blk_mq_free_hw_queues(struct request_queue *q, in blk_mq_free_hw_queues() argument
1644 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_free_hw_queues()
1648 static int blk_mq_init_hctx(struct request_queue *q, in blk_mq_init_hctx() argument
1663 hctx->queue = q; in blk_mq_init_hctx()
1691 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); in blk_mq_init_hctx()
1718 static int blk_mq_init_hw_queues(struct request_queue *q, in blk_mq_init_hw_queues() argument
1727 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_hw_queues()
1728 if (blk_mq_init_hctx(q, set, hctx, i)) in blk_mq_init_hw_queues()
1732 if (i == q->nr_hw_queues) in blk_mq_init_hw_queues()
1738 blk_mq_exit_hw_queues(q, set, i); in blk_mq_init_hw_queues()
1743 static void blk_mq_init_cpu_queues(struct request_queue *q, in blk_mq_init_cpu_queues() argument
1749 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_init_cpu_queues()
1756 __ctx->queue = q; in blk_mq_init_cpu_queues()
1762 hctx = q->mq_ops->map_queue(q, i); in blk_mq_init_cpu_queues()
1773 static void blk_mq_map_swqueue(struct request_queue *q) in blk_mq_map_swqueue() argument
1778 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_map_swqueue()
1780 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1788 queue_for_each_ctx(q, ctx, i) { in blk_mq_map_swqueue()
1793 hctx = q->mq_ops->map_queue(q, i); in blk_mq_map_swqueue()
1799 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1839 struct request_queue *q; in blk_mq_update_tag_set_depth() local
1848 list_for_each_entry(q, &set->tag_list, tag_set_list) { in blk_mq_update_tag_set_depth()
1849 blk_mq_freeze_queue(q); in blk_mq_update_tag_set_depth()
1851 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_tag_set_depth()
1857 blk_mq_unfreeze_queue(q); in blk_mq_update_tag_set_depth()
1861 static void blk_mq_del_queue_tag_set(struct request_queue *q) in blk_mq_del_queue_tag_set() argument
1863 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_del_queue_tag_set()
1866 list_del_init(&q->tag_set_list); in blk_mq_del_queue_tag_set()
1872 struct request_queue *q) in blk_mq_add_queue_tag_set() argument
1874 q->tag_set = set; in blk_mq_add_queue_tag_set()
1877 list_add_tail(&q->tag_set_list, &set->tag_list); in blk_mq_add_queue_tag_set()
1888 void blk_mq_release(struct request_queue *q) in blk_mq_release() argument
1894 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_release()
1901 kfree(q->queue_hw_ctx); in blk_mq_release()
1904 free_percpu(q->queue_ctx); in blk_mq_release()
1909 struct request_queue *uninit_q, *q; in blk_mq_init_queue() local
1915 q = blk_mq_init_allocated_queue(set, uninit_q); in blk_mq_init_queue()
1916 if (IS_ERR(q)) in blk_mq_init_queue()
1919 return q; in blk_mq_init_queue()
1924 struct request_queue *q) in blk_mq_init_allocated_queue() argument
1966 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, in blk_mq_init_allocated_queue()
1970 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); in blk_mq_init_allocated_queue()
1971 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); in blk_mq_init_allocated_queue()
1973 q->nr_queues = nr_cpu_ids; in blk_mq_init_allocated_queue()
1974 q->nr_hw_queues = set->nr_hw_queues; in blk_mq_init_allocated_queue()
1975 q->mq_map = map; in blk_mq_init_allocated_queue()
1977 q->queue_ctx = ctx; in blk_mq_init_allocated_queue()
1978 q->queue_hw_ctx = hctxs; in blk_mq_init_allocated_queue()
1980 q->mq_ops = set->ops; in blk_mq_init_allocated_queue()
1981 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; in blk_mq_init_allocated_queue()
1984 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; in blk_mq_init_allocated_queue()
1986 q->sg_reserved_size = INT_MAX; in blk_mq_init_allocated_queue()
1988 INIT_WORK(&q->requeue_work, blk_mq_requeue_work); in blk_mq_init_allocated_queue()
1989 INIT_LIST_HEAD(&q->requeue_list); in blk_mq_init_allocated_queue()
1990 spin_lock_init(&q->requeue_lock); in blk_mq_init_allocated_queue()
1992 if (q->nr_hw_queues > 1) in blk_mq_init_allocated_queue()
1993 blk_queue_make_request(q, blk_mq_make_request); in blk_mq_init_allocated_queue()
1995 blk_queue_make_request(q, blk_sq_make_request); in blk_mq_init_allocated_queue()
2000 q->nr_requests = set->queue_depth; in blk_mq_init_allocated_queue()
2003 blk_queue_softirq_done(q, set->ops->complete); in blk_mq_init_allocated_queue()
2005 blk_mq_init_cpu_queues(q, set->nr_hw_queues); in blk_mq_init_allocated_queue()
2007 if (blk_mq_init_hw_queues(q, set)) in blk_mq_init_allocated_queue()
2011 list_add_tail(&q->all_q_node, &all_q_list); in blk_mq_init_allocated_queue()
2014 blk_mq_add_queue_tag_set(set, q); in blk_mq_init_allocated_queue()
2016 blk_mq_map_swqueue(q); in blk_mq_init_allocated_queue()
2018 return q; in blk_mq_init_allocated_queue()
2036 void blk_mq_free_queue(struct request_queue *q) in blk_mq_free_queue() argument
2038 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_free_queue()
2040 blk_mq_del_queue_tag_set(q); in blk_mq_free_queue()
2042 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); in blk_mq_free_queue()
2043 blk_mq_free_hw_queues(q, set); in blk_mq_free_queue()
2045 percpu_ref_exit(&q->mq_usage_counter); in blk_mq_free_queue()
2047 kfree(q->mq_map); in blk_mq_free_queue()
2049 q->mq_map = NULL; in blk_mq_free_queue()
2052 list_del_init(&q->all_q_node); in blk_mq_free_queue()
2057 static void blk_mq_queue_reinit(struct request_queue *q) in blk_mq_queue_reinit() argument
2059 WARN_ON_ONCE(!q->mq_freeze_depth); in blk_mq_queue_reinit()
2061 blk_mq_sysfs_unregister(q); in blk_mq_queue_reinit()
2063 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); in blk_mq_queue_reinit()
2071 blk_mq_map_swqueue(q); in blk_mq_queue_reinit()
2073 blk_mq_sysfs_register(q); in blk_mq_queue_reinit()
2079 struct request_queue *q; in blk_mq_queue_reinit_notify() local
2100 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2101 blk_mq_freeze_queue_start(q); in blk_mq_queue_reinit_notify()
2102 list_for_each_entry(q, &all_q_list, all_q_node) { in blk_mq_queue_reinit_notify()
2103 blk_mq_freeze_queue_wait(q); in blk_mq_queue_reinit_notify()
2109 del_timer_sync(&q->timeout); in blk_mq_queue_reinit_notify()
2112 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2113 blk_mq_queue_reinit(q); in blk_mq_queue_reinit_notify()
2115 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2116 blk_mq_unfreeze_queue(q); in blk_mq_queue_reinit_notify()
2246 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_mq_update_nr_requests() argument
2248 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_update_nr_requests()
2256 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
2263 q->nr_requests = nr; in blk_mq_update_nr_requests()