Searched refs:hctx (Results 1 - 22 of 22) sorted by relevance

/linux-4.1.27/block/
H A Dblk-mq-sysfs.c82 struct blk_mq_hw_ctx *hctx; blk_mq_hw_sysfs_show() local
87 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); blk_mq_hw_sysfs_show()
88 q = hctx->queue; blk_mq_hw_sysfs_show()
96 res = entry->show(hctx, page); blk_mq_hw_sysfs_show()
106 struct blk_mq_hw_ctx *hctx; blk_mq_hw_sysfs_store() local
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); blk_mq_hw_sysfs_store()
112 q = hctx->queue; blk_mq_hw_sysfs_store()
120 res = entry->store(hctx, page, length); blk_mq_hw_sysfs_store()
177 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx, blk_mq_hw_sysfs_queued_show() argument
180 return sprintf(page, "%lu\n", hctx->queued); blk_mq_hw_sysfs_queued_show()
183 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_run_show() argument
185 return sprintf(page, "%lu\n", hctx->run); blk_mq_hw_sysfs_run_show()
188 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx, blk_mq_hw_sysfs_dispatched_show() argument
194 page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]); blk_mq_hw_sysfs_dispatched_show()
199 page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]); blk_mq_hw_sysfs_dispatched_show()
205 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, blk_mq_hw_sysfs_rq_list_show() argument
210 spin_lock(&hctx->lock); blk_mq_hw_sysfs_rq_list_show()
211 ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending"); blk_mq_hw_sysfs_rq_list_show()
212 spin_unlock(&hctx->lock); blk_mq_hw_sysfs_rq_list_show()
217 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_tags_show() argument
219 return blk_mq_tag_sysfs_show(hctx->tags, page); blk_mq_hw_sysfs_tags_show()
222 static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_active_show() argument
224 return sprintf(page, "%u\n", atomic_read(&hctx->nr_active)); blk_mq_hw_sysfs_active_show()
227 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_cpus_show() argument
234 for_each_cpu(i, hctx->cpumask) { blk_mq_hw_sysfs_cpus_show()
341 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_unregister_hctx() argument
346 if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) blk_mq_unregister_hctx()
349 hctx_for_each_ctx(hctx, ctx, i) blk_mq_unregister_hctx()
352 kobject_del(&hctx->kobj); blk_mq_unregister_hctx()
355 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_register_hctx() argument
357 struct request_queue *q = hctx->queue; blk_mq_register_hctx()
361 if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) blk_mq_register_hctx()
364 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); blk_mq_register_hctx()
368 hctx_for_each_ctx(hctx, ctx, i) { hctx_for_each_ctx()
369 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); hctx_for_each_ctx()
380 struct blk_mq_hw_ctx *hctx; blk_mq_unregister_disk() local
384 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
385 blk_mq_unregister_hctx(hctx); queue_for_each_hw_ctx()
387 hctx_for_each_ctx(hctx, ctx, j) queue_for_each_hw_ctx()
390 kobject_put(&hctx->kobj); queue_for_each_hw_ctx()
402 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_init() local
408 queue_for_each_hw_ctx(q, hctx, i) blk_mq_sysfs_init()
409 kobject_init(&hctx->kobj, &blk_mq_hw_ktype); blk_mq_sysfs_init()
425 struct blk_mq_hw_ctx *hctx; blk_mq_register_disk() local
436 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
437 hctx->flags |= BLK_MQ_F_SYSFS_UP; queue_for_each_hw_ctx()
438 ret = blk_mq_register_hctx(hctx); queue_for_each_hw_ctx()
454 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_unregister() local
457 queue_for_each_hw_ctx(q, hctx, i) blk_mq_sysfs_unregister()
458 blk_mq_unregister_hctx(hctx); blk_mq_sysfs_unregister()
463 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_register() local
466 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
467 ret = blk_mq_register_hctx(hctx); queue_for_each_hw_ctx()
H A Dblk-mq.c35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_has_pending() argument
44 for (i = 0; i < hctx->ctx_map.size; i++) blk_mq_hctx_has_pending()
45 if (hctx->ctx_map.map[i].word) blk_mq_hctx_has_pending()
51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, get_bm() argument
54 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; get_bm()
57 #define CTX_TO_BIT(hctx, ctx) \
58 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, blk_mq_hctx_mark_pending() argument
66 struct blk_align_bitmap *bm = get_bm(hctx, ctx); blk_mq_hctx_mark_pending()
68 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) blk_mq_hctx_mark_pending()
69 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); blk_mq_hctx_mark_pending()
72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, blk_mq_hctx_clear_pending() argument
75 struct blk_align_bitmap *bm = get_bm(hctx, ctx); blk_mq_hctx_clear_pending()
77 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); blk_mq_hctx_clear_pending()
161 struct blk_mq_hw_ctx *hctx; blk_mq_wake_waiters() local
164 queue_for_each_hw_ctx(q, hctx, i) blk_mq_wake_waiters()
165 if (blk_mq_hw_queue_mapped(hctx)) blk_mq_wake_waiters()
166 blk_mq_tag_wakeup_all(hctx->tags, true); blk_mq_wake_waiters()
176 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) blk_mq_can_queue() argument
178 return blk_mq_has_free_tags(hctx->tags); blk_mq_can_queue()
238 rq = data->hctx->tags->rqs[tag]; __blk_mq_alloc_request()
240 if (blk_mq_tag_busy(data->hctx)) { __blk_mq_alloc_request()
242 atomic_inc(&data->hctx->nr_active); __blk_mq_alloc_request()
257 struct blk_mq_hw_ctx *hctx; blk_mq_alloc_request() local
267 hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_alloc_request()
269 reserved, ctx, hctx); blk_mq_alloc_request()
273 __blk_mq_run_hw_queue(hctx); blk_mq_alloc_request()
277 hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_alloc_request()
279 hctx); blk_mq_alloc_request()
292 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, __blk_mq_free_request() argument
299 atomic_dec(&hctx->nr_active); __blk_mq_free_request()
303 blk_mq_put_tag(hctx, tag, &ctx->last_tag); __blk_mq_free_request()
307 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) blk_mq_free_hctx_request() argument
312 __blk_mq_free_request(hctx, ctx, rq); blk_mq_free_hctx_request()
319 struct blk_mq_hw_ctx *hctx; blk_mq_free_request() local
322 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); blk_mq_free_request()
323 blk_mq_free_hctx_request(hctx, rq); blk_mq_free_request()
627 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, blk_mq_check_expired() argument
662 struct blk_mq_hw_ctx *hctx; blk_mq_rq_timer() local
665 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
670 if (!blk_mq_hw_queue_mapped(hctx)) queue_for_each_hw_ctx()
673 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); queue_for_each_hw_ctx()
680 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
681 /* the hctx may be unmapped, so check it here */ queue_for_each_hw_ctx()
682 if (blk_mq_hw_queue_mapped(hctx)) queue_for_each_hw_ctx()
683 blk_mq_tag_idle(hctx); queue_for_each_hw_ctx()
731 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) flush_busy_ctxs() argument
736 for (i = 0; i < hctx->ctx_map.size; i++) { flush_busy_ctxs()
737 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; flush_busy_ctxs()
744 off = i * hctx->ctx_map.bits_per_word; flush_busy_ctxs()
750 ctx = hctx->ctxs[bit + off]; flush_busy_ctxs()
765 * items on the hctx->dispatch list. Ignore that for now.
767 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) __blk_mq_run_hw_queue() argument
769 struct request_queue *q = hctx->queue; __blk_mq_run_hw_queue()
776 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); __blk_mq_run_hw_queue()
778 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) __blk_mq_run_hw_queue()
781 hctx->run++; __blk_mq_run_hw_queue()
786 flush_busy_ctxs(hctx, &rq_list); __blk_mq_run_hw_queue()
792 if (!list_empty_careful(&hctx->dispatch)) { __blk_mq_run_hw_queue()
793 spin_lock(&hctx->lock); __blk_mq_run_hw_queue()
794 if (!list_empty(&hctx->dispatch)) __blk_mq_run_hw_queue()
795 list_splice_init(&hctx->dispatch, &rq_list); __blk_mq_run_hw_queue()
796 spin_unlock(&hctx->lock); __blk_mq_run_hw_queue()
820 ret = q->mq_ops->queue_rq(hctx, &bd); __blk_mq_run_hw_queue()
849 hctx->dispatched[0]++; __blk_mq_run_hw_queue()
851 hctx->dispatched[ilog2(queued) + 1]++; __blk_mq_run_hw_queue()
854 * Any items that need requeuing? Stuff them into hctx->dispatch, __blk_mq_run_hw_queue()
858 spin_lock(&hctx->lock); __blk_mq_run_hw_queue()
859 list_splice(&rq_list, &hctx->dispatch); __blk_mq_run_hw_queue()
860 spin_unlock(&hctx->lock); __blk_mq_run_hw_queue()
865 * requests in rq_list aren't added into hctx->dispatch yet, __blk_mq_run_hw_queue()
870 blk_mq_run_hw_queue(hctx, true); __blk_mq_run_hw_queue()
880 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_next_cpu() argument
882 if (hctx->queue->nr_hw_queues == 1) blk_mq_hctx_next_cpu()
885 if (--hctx->next_cpu_batch <= 0) { blk_mq_hctx_next_cpu()
886 int cpu = hctx->next_cpu, next_cpu; blk_mq_hctx_next_cpu()
888 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); blk_mq_hctx_next_cpu()
890 next_cpu = cpumask_first(hctx->cpumask); blk_mq_hctx_next_cpu()
892 hctx->next_cpu = next_cpu; blk_mq_hctx_next_cpu()
893 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; blk_mq_hctx_next_cpu()
898 return hctx->next_cpu; blk_mq_hctx_next_cpu()
901 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) blk_mq_run_hw_queue() argument
903 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) || blk_mq_run_hw_queue()
904 !blk_mq_hw_queue_mapped(hctx))) blk_mq_run_hw_queue()
909 if (cpumask_test_cpu(cpu, hctx->cpumask)) { blk_mq_run_hw_queue()
910 __blk_mq_run_hw_queue(hctx); blk_mq_run_hw_queue()
918 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), blk_mq_run_hw_queue()
919 &hctx->run_work, 0); blk_mq_run_hw_queue()
924 struct blk_mq_hw_ctx *hctx; blk_mq_run_hw_queues() local
927 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
928 if ((!blk_mq_hctx_has_pending(hctx) && queue_for_each_hw_ctx()
929 list_empty_careful(&hctx->dispatch)) || queue_for_each_hw_ctx()
930 test_bit(BLK_MQ_S_STOPPED, &hctx->state)) queue_for_each_hw_ctx()
933 blk_mq_run_hw_queue(hctx, async); queue_for_each_hw_ctx()
938 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) blk_mq_stop_hw_queue() argument
940 cancel_delayed_work(&hctx->run_work); blk_mq_stop_hw_queue()
941 cancel_delayed_work(&hctx->delay_work); blk_mq_stop_hw_queue()
942 set_bit(BLK_MQ_S_STOPPED, &hctx->state); blk_mq_stop_hw_queue()
948 struct blk_mq_hw_ctx *hctx; blk_mq_stop_hw_queues() local
951 queue_for_each_hw_ctx(q, hctx, i) blk_mq_stop_hw_queues()
952 blk_mq_stop_hw_queue(hctx); blk_mq_stop_hw_queues()
956 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) blk_mq_start_hw_queue() argument
958 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); blk_mq_start_hw_queue()
960 blk_mq_run_hw_queue(hctx, false); blk_mq_start_hw_queue()
966 struct blk_mq_hw_ctx *hctx; blk_mq_start_hw_queues() local
969 queue_for_each_hw_ctx(q, hctx, i) blk_mq_start_hw_queues()
970 blk_mq_start_hw_queue(hctx); blk_mq_start_hw_queues()
976 struct blk_mq_hw_ctx *hctx; blk_mq_start_stopped_hw_queues() local
979 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
980 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state)) queue_for_each_hw_ctx()
983 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); queue_for_each_hw_ctx()
984 blk_mq_run_hw_queue(hctx, async); queue_for_each_hw_ctx()
991 struct blk_mq_hw_ctx *hctx; blk_mq_run_work_fn() local
993 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); blk_mq_run_work_fn()
995 __blk_mq_run_hw_queue(hctx); blk_mq_run_work_fn()
1000 struct blk_mq_hw_ctx *hctx; blk_mq_delay_work_fn() local
1002 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); blk_mq_delay_work_fn()
1004 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) blk_mq_delay_work_fn()
1005 __blk_mq_run_hw_queue(hctx); blk_mq_delay_work_fn()
1008 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) blk_mq_delay_queue() argument
1010 if (unlikely(!blk_mq_hw_queue_mapped(hctx))) blk_mq_delay_queue()
1013 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), blk_mq_delay_queue()
1014 &hctx->delay_work, msecs_to_jiffies(msecs)); blk_mq_delay_queue()
1018 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, __blk_mq_insert_request() argument
1023 trace_block_rq_insert(hctx->queue, rq); __blk_mq_insert_request()
1030 blk_mq_hctx_mark_pending(hctx, ctx); __blk_mq_insert_request()
1037 struct blk_mq_hw_ctx *hctx; blk_mq_insert_request() local
1044 hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_insert_request()
1047 __blk_mq_insert_request(hctx, rq, at_head); blk_mq_insert_request()
1051 blk_mq_run_hw_queue(hctx, async); blk_mq_insert_request()
1063 struct blk_mq_hw_ctx *hctx; blk_mq_insert_requests() local
1072 hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_insert_requests()
1085 __blk_mq_insert_request(hctx, rq, false); blk_mq_insert_requests()
1089 blk_mq_run_hw_queue(hctx, from_schedule); blk_mq_insert_requests()
1158 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) hctx_allow_merges() argument
1160 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && hctx_allow_merges()
1161 !blk_queue_nomerges(hctx->queue); hctx_allow_merges()
1164 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, blk_mq_merge_queue_io() argument
1168 if (!hctx_allow_merges(hctx)) { blk_mq_merge_queue_io()
1172 __blk_mq_insert_request(hctx, rq, false); blk_mq_merge_queue_io()
1176 struct request_queue *q = hctx->queue; blk_mq_merge_queue_io()
1185 __blk_mq_free_request(hctx, ctx, rq); blk_mq_merge_queue_io()
1191 struct blk_mq_hw_ctx *hctx; member in struct:blk_map_ctx
1199 struct blk_mq_hw_ctx *hctx; blk_mq_map_request() local
1211 hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_map_request()
1218 hctx); blk_mq_map_request()
1221 __blk_mq_run_hw_queue(hctx); blk_mq_map_request()
1226 hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_map_request()
1228 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); blk_mq_map_request()
1231 hctx = alloc_data.hctx; blk_mq_map_request()
1234 hctx->queued++; blk_mq_map_request()
1235 data->hctx = hctx; blk_mq_map_request()
1242 * but will attempt to bypass the hctx queueing if we can go straight to
1274 if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) { blk_mq_make_request()
1289 ret = q->mq_ops->queue_rq(data.hctx, &bd); blk_mq_make_request()
1303 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { blk_mq_make_request()
1311 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); blk_mq_make_request()
1378 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { blk_sq_make_request()
1386 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); blk_sq_make_request()
1545 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) blk_mq_hctx_cpu_offline() argument
1547 struct request_queue *q = hctx->queue; blk_mq_hctx_cpu_offline()
1559 blk_mq_hctx_clear_pending(hctx, ctx); blk_mq_hctx_cpu_offline()
1577 hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_hctx_cpu_offline()
1578 blk_mq_hctx_mark_pending(hctx, ctx); blk_mq_hctx_cpu_offline()
1582 blk_mq_run_hw_queue(hctx, true); blk_mq_hctx_cpu_offline()
1590 struct blk_mq_hw_ctx *hctx = data; blk_mq_hctx_notify() local
1593 return blk_mq_hctx_cpu_offline(hctx, cpu); blk_mq_hctx_notify()
1603 /* hctx->ctxs will be freed in queue's release handler */ blk_mq_exit_hctx()
1606 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) blk_mq_exit_hctx()
1610 blk_mq_tag_idle(hctx); blk_mq_exit_hctx()
1614 hctx->fq->flush_rq, hctx_idx, blk_mq_exit_hctx()
1618 set->ops->exit_hctx(hctx, hctx_idx); blk_mq_exit_hctx()
1620 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); blk_mq_exit_hctx()
1621 blk_free_flush_queue(hctx->fq); blk_mq_exit_hctx()
1622 blk_mq_free_bitmap(&hctx->ctx_map); blk_mq_exit_hctx()
1628 struct blk_mq_hw_ctx *hctx; blk_mq_exit_hw_queues() local
1631 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
1634 blk_mq_exit_hctx(q, set, hctx, i); queue_for_each_hw_ctx()
1641 struct blk_mq_hw_ctx *hctx; blk_mq_free_hw_queues() local
1644 queue_for_each_hw_ctx(q, hctx, i) blk_mq_free_hw_queues()
1645 free_cpumask_var(hctx->cpumask); blk_mq_free_hw_queues()
1650 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) blk_mq_init_hctx()
1655 node = hctx->numa_node; blk_mq_init_hctx()
1657 node = hctx->numa_node = set->numa_node; blk_mq_init_hctx()
1659 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); blk_mq_init_hctx()
1660 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); blk_mq_init_hctx()
1661 spin_lock_init(&hctx->lock); blk_mq_init_hctx()
1662 INIT_LIST_HEAD(&hctx->dispatch); blk_mq_init_hctx()
1663 hctx->queue = q; blk_mq_init_hctx()
1664 hctx->queue_num = hctx_idx; blk_mq_init_hctx()
1665 hctx->flags = set->flags; blk_mq_init_hctx()
1667 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_init_hctx()
1668 blk_mq_hctx_notify, hctx); blk_mq_init_hctx()
1669 blk_mq_register_cpu_notifier(&hctx->cpu_notifier); blk_mq_init_hctx()
1671 hctx->tags = set->tags[hctx_idx]; blk_mq_init_hctx()
1677 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), blk_mq_init_hctx()
1679 if (!hctx->ctxs) blk_mq_init_hctx()
1682 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) blk_mq_init_hctx()
1685 hctx->nr_ctx = 0; blk_mq_init_hctx()
1688 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) blk_mq_init_hctx()
1691 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); blk_mq_init_hctx()
1692 if (!hctx->fq) blk_mq_init_hctx()
1697 hctx->fq->flush_rq, hctx_idx, blk_mq_init_hctx()
1704 kfree(hctx->fq); blk_mq_init_hctx()
1707 set->ops->exit_hctx(hctx, hctx_idx); blk_mq_init_hctx()
1709 blk_mq_free_bitmap(&hctx->ctx_map); blk_mq_init_hctx()
1711 kfree(hctx->ctxs); blk_mq_init_hctx()
1713 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); blk_mq_init_hctx()
1721 struct blk_mq_hw_ctx *hctx; blk_mq_init_hw_queues() local
1727 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
1728 if (blk_mq_init_hctx(q, set, hctx, i)) queue_for_each_hw_ctx()
1750 struct blk_mq_hw_ctx *hctx; for_each_possible_cpu() local
1758 /* If the cpu isn't online, the cpu is mapped to first hctx */ for_each_possible_cpu()
1762 hctx = q->mq_ops->map_queue(q, i); for_each_possible_cpu()
1768 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) for_each_possible_cpu()
1769 hctx->numa_node = cpu_to_node(i); for_each_possible_cpu()
1776 struct blk_mq_hw_ctx *hctx; blk_mq_map_swqueue() local
1780 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
1781 cpumask_clear(hctx->cpumask); queue_for_each_hw_ctx()
1782 hctx->nr_ctx = 0; queue_for_each_hw_ctx()
1789 /* If the cpu isn't online, the cpu is mapped to first hctx */ queue_for_each_ctx()
1793 hctx = q->mq_ops->map_queue(q, i); queue_for_each_ctx()
1794 cpumask_set_cpu(i, hctx->cpumask); queue_for_each_ctx()
1795 ctx->index_hw = hctx->nr_ctx; queue_for_each_ctx()
1796 hctx->ctxs[hctx->nr_ctx++] = ctx; queue_for_each_ctx()
1799 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
1800 struct blk_mq_ctxmap *map = &hctx->ctx_map; queue_for_each_hw_ctx()
1806 if (!hctx->nr_ctx) { queue_for_each_hw_ctx()
1811 hctx->tags = NULL; queue_for_each_hw_ctx()
1818 hctx->tags = set->tags[i]; queue_for_each_hw_ctx()
1819 WARN_ON(!hctx->tags); queue_for_each_hw_ctx()
1826 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word); queue_for_each_hw_ctx()
1831 hctx->next_cpu = cpumask_first(hctx->cpumask); queue_for_each_hw_ctx()
1832 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; queue_for_each_hw_ctx()
1838 struct blk_mq_hw_ctx *hctx; blk_mq_update_tag_set_depth() local
1851 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
1853 hctx->flags |= BLK_MQ_F_TAG_SHARED; queue_for_each_hw_ctx()
1855 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; queue_for_each_hw_ctx()
1890 struct blk_mq_hw_ctx *hctx; blk_mq_release() local
1893 /* hctx kobj stays in hctx */ queue_for_each_hw_ctx()
1894 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
1895 if (!hctx) queue_for_each_hw_ctx()
1897 kfree(hctx->ctxs); queue_for_each_hw_ctx()
1898 kfree(hctx); queue_for_each_hw_ctx()
2067 * we should change hctx numa_node according to new topology (this blk_mq_queue_reinit()
2249 struct blk_mq_hw_ctx *hctx; blk_mq_update_nr_requests() local
2256 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
2257 ret = blk_mq_tag_update_depth(hctx->tags, nr); queue_for_each_hw_ctx()
1604 blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) blk_mq_exit_hctx() argument
1648 blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) blk_mq_init_hctx() argument
H A Dblk-mq-tag.c61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) __blk_mq_tag_busy() argument
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && __blk_mq_tag_busy()
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) __blk_mq_tag_busy()
65 atomic_inc(&hctx->tags->active_queues); __blk_mq_tag_busy()
100 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) __blk_mq_tag_idle() argument
102 struct blk_mq_tags *tags = hctx->tags; __blk_mq_tag_idle()
104 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) __blk_mq_tag_idle()
116 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, hctx_may_queue() argument
121 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) hctx_may_queue()
123 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) hctx_may_queue()
132 users = atomic_read(&hctx->tags->active_queues); hctx_may_queue()
140 return atomic_read(&hctx->nr_active) < depth; hctx_may_queue()
187 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, __bt_get() argument
193 if (!hctx_may_queue(hctx, bt)) __bt_get()
240 struct blk_mq_hw_ctx *hctx) bt_wait_ptr()
245 if (!hctx) bt_wait_ptr()
248 wait_index = atomic_read(&hctx->wait_index); bt_wait_ptr()
250 bt_index_atomic_inc(&hctx->wait_index); bt_wait_ptr()
256 struct blk_mq_hw_ctx *hctx, bt_get()
263 tag = __bt_get(hctx, bt, last_tag, tags); bt_get()
270 bs = bt_wait_ptr(bt, hctx); bt_get()
274 tag = __bt_get(hctx, bt, last_tag, tags); bt_get()
281 * some to complete. Note that hctx can be NULL here for bt_get()
284 if (hctx) bt_get()
285 blk_mq_run_hw_queue(hctx, false); bt_get()
291 tag = __bt_get(hctx, bt, last_tag, tags); bt_get()
300 data->hctx = data->q->mq_ops->map_queue(data->q, bt_get()
303 bt = &data->hctx->tags->breserved_tags; bt_get()
306 hctx = data->hctx; bt_get()
307 bt = &hctx->tags->bitmap_tags; bt_get()
310 bs = bt_wait_ptr(bt, hctx); bt_get()
321 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, __blk_mq_get_tag()
322 &data->ctx->last_tag, data->hctx->tags); __blk_mq_get_tag()
324 return tag + data->hctx->tags->nr_reserved_tags; __blk_mq_get_tag()
333 if (unlikely(!data->hctx->tags->nr_reserved_tags)) { __blk_mq_get_reserved_tag()
338 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero, __blk_mq_get_reserved_tag()
339 data->hctx->tags); __blk_mq_get_reserved_tag()
401 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, blk_mq_put_tag() argument
404 struct blk_mq_tags *tags = hctx->tags; blk_mq_put_tag()
419 static void bt_for_each(struct blk_mq_hw_ctx *hctx, bt_for_each() argument
432 rq = blk_mq_tag_to_rq(hctx->tags, off + bit); bt_for_each()
433 if (rq->q == hctx->queue) bt_for_each()
434 fn(hctx, rq, data, reserved); bt_for_each()
441 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, blk_mq_tag_busy_iter() argument
444 struct blk_mq_tags *tags = hctx->tags; blk_mq_tag_busy_iter()
447 bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); blk_mq_tag_busy_iter()
448 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, blk_mq_tag_busy_iter()
633 struct blk_mq_hw_ctx *hctx; blk_mq_unique_tag() local
637 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); blk_mq_unique_tag()
638 hwq = hctx->queue_num; blk_mq_unique_tag()
239 bt_wait_ptr(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx) bt_wait_ptr() argument
254 bt_get(struct blk_mq_alloc_data *data, struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, struct blk_mq_tags *tags) bt_get() argument
H A Dblk-mq-tag.h54 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
75 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) blk_mq_tag_busy() argument
77 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) blk_mq_tag_busy()
80 return __blk_mq_tag_busy(hctx); blk_mq_tag_busy()
83 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) blk_mq_tag_idle() argument
85 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) blk_mq_tag_idle()
88 __blk_mq_tag_idle(hctx); blk_mq_tag_idle()
H A Dblk-mq.h29 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
106 struct blk_mq_hw_ctx *hctx; member in struct:blk_mq_alloc_data
112 struct blk_mq_hw_ctx *hctx) blk_mq_set_alloc_data()
118 data->hctx = hctx; blk_mq_set_alloc_data()
121 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) blk_mq_hw_queue_mapped() argument
123 return hctx->nr_ctx && hctx->tags; blk_mq_hw_queue_mapped()
109 blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, struct request_queue *q, gfp_t gfp, bool reserved, struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx) blk_mq_set_alloc_data() argument
H A Dblk-mq-cpumap.c92 /* If cpus are offline, map them to first hctx */ blk_mq_make_queue_map()
H A Dblk.h36 struct blk_mq_hw_ctx *hctx; blk_get_flush_queue() local
41 hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_get_flush_queue()
43 return hctx->fq; blk_get_flush_queue()
H A Dblk-flush.c342 struct blk_mq_hw_ctx *hctx; mq_flush_data_end_io() local
347 hctx = q->mq_ops->map_queue(q, ctx->cpu); mq_flush_data_end_io()
355 blk_mq_run_hw_queue(hctx, true); mq_flush_data_end_io()
H A Dblk-core.c248 struct blk_mq_hw_ctx *hctx; blk_sync_queue() local
251 queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx()
252 cancel_delayed_work_sync(&hctx->run_work); queue_for_each_hw_ctx()
253 cancel_delayed_work_sync(&hctx->delay_work); queue_for_each_hw_ctx()
/linux-4.1.27/include/linux/
H A Dblk-mq.h218 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
219 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
224 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
225 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
244 #define queue_for_each_hw_ctx(q, hctx, i) \
246 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
252 #define hctx_for_each_ctx(hctx, ctx, i) \
253 for ((i) = 0; (i) < (hctx)->nr_ctx && \
254 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
/linux-4.1.27/net/dccp/ccids/
H A Dccid3.h116 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); ccid3_hc_tx_sk() local
117 BUG_ON(hctx == NULL); ccid3_hc_tx_sk()
118 return hctx; ccid3_hc_tx_sk()
/linux-4.1.27/drivers/block/
H A Dnvme-core.c117 struct blk_mq_hw_ctx *hctx; member in struct:nvme_queue
179 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, nvme_admin_init_hctx() argument
185 WARN_ON(nvmeq->hctx); nvme_admin_init_hctx()
186 nvmeq->hctx = hctx; nvme_admin_init_hctx()
187 hctx->driver_data = nvmeq; nvme_admin_init_hctx()
204 static void nvme_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) nvme_exit_hctx() argument
206 struct nvme_queue *nvmeq = hctx->driver_data; nvme_exit_hctx()
208 nvmeq->hctx = NULL; nvme_exit_hctx()
211 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, nvme_init_hctx() argument
218 if (!nvmeq->hctx) nvme_init_hctx()
219 nvmeq->hctx = hctx; nvme_init_hctx()
223 WARN_ON(nvmeq->hctx->tags != hctx->tags); nvme_init_hctx()
225 hctx->driver_data = nvmeq; nvme_init_hctx()
323 blk_mq_free_hctx_request(nvmeq->hctx, req); abort_completion()
336 blk_mq_free_hctx_request(nvmeq->hctx, cmdinfo->req); async_completion()
342 struct blk_mq_hw_ctx *hctx = nvmeq->hctx; get_cmd_from_tag() local
343 struct request *req = blk_mq_tag_to_rq(hctx->tags, tag); get_cmd_from_tag()
813 static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_queue_rq() argument
816 struct nvme_ns *ns = hctx->queue->queuedata; nvme_queue_rq()
817 struct nvme_queue *nvmeq = hctx->driver_data; nvme_queue_rq()
950 static int nvme_admin_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_admin_queue_rq() argument
1039 blk_mq_free_hctx_request(nvmeq->hctx, req); nvme_submit_async_admin_req()
1260 static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx, nvme_cancel_queue_ios() argument
1358 struct blk_mq_hw_ctx *hctx = nvmeq->hctx; nvme_clear_queue() local
1361 if (hctx && hctx->tags) nvme_clear_queue()
1362 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq); nvme_clear_queue()
2808 if (!nvmeq->hctx) nvme_set_irq_hints()
2812 nvmeq->hctx->cpumask); nvme_set_irq_hints()
H A Dnull_blk.c354 static int null_queue_rq(struct blk_mq_hw_ctx *hctx, null_queue_rq() argument
360 cmd->nq = hctx->driver_data; null_queue_rq()
377 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, null_init_hctx() argument
383 hctx->driver_data = nq; null_init_hctx()
H A Dvirtio_blk.c160 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, virtio_queue_rq() argument
163 struct virtio_blk *vblk = hctx->queue->queuedata; virtio_queue_rq()
168 int qid = hctx->queue_num; virtio_queue_rq()
204 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); virtio_queue_rq()
216 blk_mq_stop_hw_queue(hctx); virtio_queue_rq()
H A Dloop.c1430 static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, loop_queue_rq() argument
H A Drbd.c3448 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, rbd_queue_rq() argument
/linux-4.1.27/crypto/
H A Dmcryptd.c577 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); mcryptd_free() local
581 crypto_drop_shash(&hctx->spawn); mcryptd_free()
H A Dcryptd.c826 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); cryptd_free() local
831 crypto_drop_shash(&hctx->spawn); cryptd_free()
/linux-4.1.27/drivers/block/mtip32xx/
H A Dmtip32xx.c192 * Once we add support for one hctx per mtip group, this will change a bit
197 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; mtip_rq_from_tag() local
199 return blk_mq_tag_to_rq(hctx->tags, tag); mtip_rq_from_tag()
2380 struct blk_mq_hw_ctx *hctx) mtip_hw_submit_io()
3694 static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) mtip_submit_request() argument
3696 struct driver_data *dd = hctx->queue->queuedata; mtip_submit_request()
3727 nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg); mtip_submit_request()
3730 mtip_hw_submit_io(dd, rq, cmd, nents, hctx); mtip_submit_request()
3734 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, mtip_check_unal_depth() argument
3737 struct driver_data *dd = hctx->queue->queuedata; mtip_check_unal_depth()
3758 static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, mtip_queue_rq() argument
3764 if (unlikely(mtip_check_unal_depth(hctx, rq))) mtip_queue_rq()
3769 ret = mtip_submit_request(hctx, rq); mtip_queue_rq()
2378 mtip_hw_submit_io(struct driver_data *dd, struct request *rq, struct mtip_cmd *command, int nents, struct blk_mq_hw_ctx *hctx) mtip_hw_submit_io() argument
/linux-4.1.27/drivers/mtd/ubi/
H A Dblock.c318 static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, ubiblock_queue_rq() argument
322 struct ubiblock *dev = hctx->queue->queuedata; ubiblock_queue_rq()
/linux-4.1.27/drivers/scsi/
H A Dscsi_lib.c1966 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, scsi_queue_rq() argument
2031 blk_mq_stop_hw_queue(hctx); scsi_queue_rq()
2034 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); scsi_queue_rq()
/linux-4.1.27/drivers/md/
H A Ddm.c2705 static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, dm_mq_queue_rq() argument

Completed in 553 milliseconds