hctx 195 arch/um/drivers/ubd_kern.c static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 1312 arch/um/drivers/ubd_kern.c static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, hctx 1315 arch/um/drivers/ubd_kern.c struct ubd *dev = hctx->queue->queuedata; hctx 1359 arch/um/drivers/ubd_kern.c static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req) hctx 1367 arch/um/drivers/ubd_kern.c ret = ubd_queue_one_vec(hctx, req, off, &bvec); hctx 1375 arch/um/drivers/ubd_kern.c static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 1378 arch/um/drivers/ubd_kern.c struct ubd *ubd_dev = hctx->queue->queuedata; hctx 1389 arch/um/drivers/ubd_kern.c ret = ubd_queue_one_vec(hctx, req, 0, NULL); hctx 1393 arch/um/drivers/ubd_kern.c ret = queue_rw_req(hctx, req); hctx 1397 arch/um/drivers/ubd_kern.c ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL); hctx 2213 block/bfq-iosched.c static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, hctx 2216 block/bfq-iosched.c struct request_queue *q = hctx->queue; hctx 4639 block/bfq-iosched.c static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) hctx 4641 block/bfq-iosched.c struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; hctx 4651 block/bfq-iosched.c static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) hctx 4653 block/bfq-iosched.c struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; hctx 4791 block/bfq-iosched.c static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) hctx 4793 block/bfq-iosched.c struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; hctx 4803 block/bfq-iosched.c rq = __bfq_dispatch_request(hctx); hctx 4810 block/bfq-iosched.c bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, hctx 5488 block/bfq-iosched.c static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, hctx 5491 block/bfq-iosched.c struct request_queue *q = hctx->queue; hctx 5543 block/bfq-iosched.c static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, hctx 5551 block/bfq-iosched.c bfq_insert_request(hctx, rq, at_head); hctx 6362 block/bfq-iosched.c static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) hctx 6364 block/bfq-iosched.c struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; hctx 6365 block/bfq-iosched.c struct blk_mq_tags *tags = hctx->sched_tags; hctx 6372 block/bfq-iosched.c static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) hctx 6374 block/bfq-iosched.c bfq_depth_updated(hctx); hctx 214 block/blk-flush.c struct blk_mq_hw_ctx *hctx; hctx 228 block/blk-flush.c hctx = flush_rq->mq_hctx; hctx 230 block/blk-flush.c blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); hctx 330 block/blk-flush.c struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx 348 block/blk-flush.c blk_mq_sched_restart(hctx); hctx 221 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 223 block/blk-mq-debugfs.c blk_flags_show(m, hctx->state, hctx_state_name, hctx 247 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 248 block/blk-mq-debugfs.c const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); hctx 258 block/blk-mq-debugfs.c hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), hctx 358 block/blk-mq-debugfs.c __acquires(&hctx->lock) hctx 360 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = m->private; hctx 362 block/blk-mq-debugfs.c spin_lock(&hctx->lock); hctx 363 block/blk-mq-debugfs.c return seq_list_start(&hctx->dispatch, *pos); hctx 368 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = m->private; hctx 370 block/blk-mq-debugfs.c return seq_list_next(v, &hctx->dispatch, pos); hctx 374 block/blk-mq-debugfs.c __releases(&hctx->lock) hctx 376 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = m->private; hctx 378 block/blk-mq-debugfs.c spin_unlock(&hctx->lock); hctx 390 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx; hctx 402 block/blk-mq-debugfs.c if (rq->mq_hctx == params->hctx) hctx 411 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 412 block/blk-mq-debugfs.c struct show_busy_params params = { .m = m, .hctx = hctx }; hctx 414 block/blk-mq-debugfs.c blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, hctx 428 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 431 block/blk-mq-debugfs.c seq_printf(m, "%s\n", hctx_types[hctx->type]); hctx 437 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 439 block/blk-mq-debugfs.c sbitmap_bitmap_show(&hctx->ctx_map, m); hctx 462 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 463 block/blk-mq-debugfs.c struct request_queue *q = hctx->queue; hctx 469 block/blk-mq-debugfs.c if (hctx->tags) hctx 470 block/blk-mq-debugfs.c blk_mq_debugfs_tags_show(m, hctx->tags); hctx 479 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 480 block/blk-mq-debugfs.c struct request_queue *q = hctx->queue; hctx 486 block/blk-mq-debugfs.c if (hctx->tags) hctx 487 block/blk-mq-debugfs.c sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); hctx 496 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 497 block/blk-mq-debugfs.c struct request_queue *q = hctx->queue; hctx 503 block/blk-mq-debugfs.c if (hctx->sched_tags) hctx 504 block/blk-mq-debugfs.c blk_mq_debugfs_tags_show(m, hctx->sched_tags); hctx 513 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 514 block/blk-mq-debugfs.c struct request_queue *q = hctx->queue; hctx 520 block/blk-mq-debugfs.c if (hctx->sched_tags) hctx 521 block/blk-mq-debugfs.c sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); hctx 530 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 532 block/blk-mq-debugfs.c seq_printf(m, "considered=%lu\n", hctx->poll_considered); hctx 533 block/blk-mq-debugfs.c seq_printf(m, "invoked=%lu\n", hctx->poll_invoked); hctx 534 block/blk-mq-debugfs.c seq_printf(m, "success=%lu\n", hctx->poll_success); hctx 541 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 543 block/blk-mq-debugfs.c hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; hctx 549 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 552 block/blk-mq-debugfs.c seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); hctx 557 block/blk-mq-debugfs.c seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]); hctx 560 block/blk-mq-debugfs.c seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]); hctx 567 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 571 block/blk-mq-debugfs.c hctx->dispatched[i] = 0; hctx 577 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 579 block/blk-mq-debugfs.c seq_printf(m, "%lu\n", hctx->queued); hctx 586 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 588 block/blk-mq-debugfs.c hctx->queued = 0; hctx 594 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 596 block/blk-mq-debugfs.c seq_printf(m, "%lu\n", hctx->run); hctx 603 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 605 block/blk-mq-debugfs.c hctx->run = 0; hctx 611 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 613 block/blk-mq-debugfs.c seq_printf(m, "%d\n", atomic_read(&hctx->nr_active)); hctx 619 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx = data; hctx 621 block/blk-mq-debugfs.c seq_printf(m, "%u\n", hctx->dispatch_busy); hctx 823 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx; hctx 840 block/blk-mq-debugfs.c queue_for_each_hw_ctx(q, hctx, i) { hctx 841 block/blk-mq-debugfs.c if (!hctx->debugfs_dir) hctx 842 block/blk-mq-debugfs.c blk_mq_debugfs_register_hctx(q, hctx); hctx 843 block/blk-mq-debugfs.c if (q->elevator && !hctx->sched_debugfs_dir) hctx 844 block/blk-mq-debugfs.c blk_mq_debugfs_register_sched_hctx(q, hctx); hctx 864 block/blk-mq-debugfs.c static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, hctx 871 block/blk-mq-debugfs.c ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); hctx 877 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx) hctx 883 block/blk-mq-debugfs.c snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); hctx 884 block/blk-mq-debugfs.c hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); hctx 886 block/blk-mq-debugfs.c debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); hctx 888 block/blk-mq-debugfs.c hctx_for_each_ctx(hctx, ctx, i) hctx 889 block/blk-mq-debugfs.c blk_mq_debugfs_register_ctx(hctx, ctx); hctx 892 block/blk-mq-debugfs.c void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) hctx 894 block/blk-mq-debugfs.c debugfs_remove_recursive(hctx->debugfs_dir); hctx 895 block/blk-mq-debugfs.c hctx->sched_debugfs_dir = NULL; hctx 896 block/blk-mq-debugfs.c hctx->debugfs_dir = NULL; hctx 901 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx; hctx 904 block/blk-mq-debugfs.c queue_for_each_hw_ctx(q, hctx, i) hctx 905 block/blk-mq-debugfs.c blk_mq_debugfs_register_hctx(q, hctx); hctx 910 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx; hctx 913 block/blk-mq-debugfs.c queue_for_each_hw_ctx(q, hctx, i) hctx 914 block/blk-mq-debugfs.c blk_mq_debugfs_unregister_hctx(hctx); hctx 973 block/blk-mq-debugfs.c struct blk_mq_hw_ctx *hctx) hctx 980 block/blk-mq-debugfs.c hctx->sched_debugfs_dir = debugfs_create_dir("sched", hctx 981 block/blk-mq-debugfs.c hctx->debugfs_dir); hctx 982 block/blk-mq-debugfs.c debugfs_create_files(hctx->sched_debugfs_dir, hctx, hctx 986 block/blk-mq-debugfs.c void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) hctx 988 block/blk-mq-debugfs.c debugfs_remove_recursive(hctx->sched_debugfs_dir); hctx 989 block/blk-mq-debugfs.c hctx->sched_debugfs_dir = NULL; hctx 24 block/blk-mq-debugfs.h struct blk_mq_hw_ctx *hctx); hctx 25 block/blk-mq-debugfs.h void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); hctx 32 block/blk-mq-debugfs.h struct blk_mq_hw_ctx *hctx); hctx 33 block/blk-mq-debugfs.h void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); hctx 48 block/blk-mq-debugfs.h struct blk_mq_hw_ctx *hctx) hctx 52 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) hctx 73 block/blk-mq-debugfs.h struct blk_mq_hw_ctx *hctx) hctx 77 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) hctx 23 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx; hctx 26 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { hctx 27 block/blk-mq-sched.c if (exit && hctx->sched_data) hctx 28 block/blk-mq-sched.c exit(hctx); hctx 29 block/blk-mq-sched.c kfree(hctx->sched_data); hctx 30 block/blk-mq-sched.c hctx->sched_data = NULL; hctx 65 block/blk-mq-sched.c void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) hctx 67 block/blk-mq-sched.c if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) hctx 70 block/blk-mq-sched.c set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); hctx 74 block/blk-mq-sched.c void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) hctx 76 block/blk-mq-sched.c if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) hctx 78 block/blk-mq-sched.c clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); hctx 80 block/blk-mq-sched.c blk_mq_run_hw_queue(hctx, true); hctx 88 block/blk-mq-sched.c static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) hctx 90 block/blk-mq-sched.c struct request_queue *q = hctx->queue; hctx 97 block/blk-mq-sched.c if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) hctx 100 block/blk-mq-sched.c if (!blk_mq_get_dispatch_budget(hctx)) hctx 103 block/blk-mq-sched.c rq = e->type->ops.dispatch_request(hctx); hctx 105 block/blk-mq-sched.c blk_mq_put_dispatch_budget(hctx); hctx 118 block/blk-mq-sched.c static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, hctx 121 block/blk-mq-sched.c unsigned short idx = ctx->index_hw[hctx->type]; hctx 123 block/blk-mq-sched.c if (++idx == hctx->nr_ctx) hctx 126 block/blk-mq-sched.c return hctx->ctxs[idx]; hctx 134 block/blk-mq-sched.c static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) hctx 136 block/blk-mq-sched.c struct request_queue *q = hctx->queue; hctx 138 block/blk-mq-sched.c struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); hctx 143 block/blk-mq-sched.c if (!sbitmap_any_bit_set(&hctx->ctx_map)) hctx 146 block/blk-mq-sched.c if (!blk_mq_get_dispatch_budget(hctx)) hctx 149 block/blk-mq-sched.c rq = blk_mq_dequeue_from_ctx(hctx, ctx); hctx 151 block/blk-mq-sched.c blk_mq_put_dispatch_budget(hctx); hctx 163 block/blk-mq-sched.c ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); hctx 167 block/blk-mq-sched.c WRITE_ONCE(hctx->dispatch_from, ctx); hctx 170 block/blk-mq-sched.c void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) hctx 172 block/blk-mq-sched.c struct request_queue *q = hctx->queue; hctx 178 block/blk-mq-sched.c if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) hctx 181 block/blk-mq-sched.c hctx->run++; hctx 187 block/blk-mq-sched.c if (!list_empty_careful(&hctx->dispatch)) { hctx 188 block/blk-mq-sched.c spin_lock(&hctx->lock); hctx 189 block/blk-mq-sched.c if (!list_empty(&hctx->dispatch)) hctx 190 block/blk-mq-sched.c list_splice_init(&hctx->dispatch, &rq_list); hctx 191 block/blk-mq-sched.c spin_unlock(&hctx->lock); hctx 208 block/blk-mq-sched.c blk_mq_sched_mark_restart_hctx(hctx); hctx 211 block/blk-mq-sched.c blk_mq_do_dispatch_sched(hctx); hctx 213 block/blk-mq-sched.c blk_mq_do_dispatch_ctx(hctx); hctx 216 block/blk-mq-sched.c blk_mq_do_dispatch_sched(hctx); hctx 217 block/blk-mq-sched.c } else if (hctx->dispatch_busy) { hctx 219 block/blk-mq-sched.c blk_mq_do_dispatch_ctx(hctx); hctx 221 block/blk-mq-sched.c blk_mq_flush_busy_ctxs(hctx, &rq_list); hctx 308 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx, hctx 312 block/blk-mq-sched.c enum hctx_type type = hctx->type; hctx 329 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); hctx 334 block/blk-mq-sched.c return e->type->ops.bio_merge(hctx, bio, nr_segs); hctx 336 block/blk-mq-sched.c type = hctx->type; hctx 337 block/blk-mq-sched.c if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && hctx 341 block/blk-mq-sched.c ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs); hctx 360 block/blk-mq-sched.c static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, hctx 390 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx 400 block/blk-mq-sched.c if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { hctx 431 block/blk-mq-sched.c e->type->ops.insert_requests(hctx, &list, at_head); hctx 434 block/blk-mq-sched.c __blk_mq_insert_request(hctx, rq, at_head); hctx 440 block/blk-mq-sched.c blk_mq_run_hw_queue(hctx, async); hctx 443 block/blk-mq-sched.c void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, hctx 448 block/blk-mq-sched.c struct request_queue *q = hctx->queue; hctx 457 block/blk-mq-sched.c e = hctx->queue->elevator; hctx 459 block/blk-mq-sched.c e->type->ops.insert_requests(hctx, list, false); hctx 466 block/blk-mq-sched.c if (!hctx->dispatch_busy && !e && !run_queue_async) { hctx 467 block/blk-mq-sched.c blk_mq_try_issue_list_directly(hctx, list); hctx 471 block/blk-mq-sched.c blk_mq_insert_requests(hctx, ctx, list); hctx 474 block/blk-mq-sched.c blk_mq_run_hw_queue(hctx, run_queue_async); hctx 480 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx, hctx 483 block/blk-mq-sched.c if (hctx->sched_tags) { hctx 484 block/blk-mq-sched.c blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); hctx 485 block/blk-mq-sched.c blk_mq_free_rq_map(hctx->sched_tags); hctx 486 block/blk-mq-sched.c hctx->sched_tags = NULL; hctx 491 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx, hctx 497 block/blk-mq-sched.c hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, hctx 499 block/blk-mq-sched.c if (!hctx->sched_tags) hctx 502 block/blk-mq-sched.c ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); hctx 504 block/blk-mq-sched.c blk_mq_sched_free_tags(set, hctx, hctx_idx); hctx 512 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx; hctx 515 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { hctx 516 block/blk-mq-sched.c if (hctx->sched_tags) { hctx 517 block/blk-mq-sched.c blk_mq_free_rq_map(hctx->sched_tags); hctx 518 block/blk-mq-sched.c hctx->sched_tags = NULL; hctx 525 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx; hctx 544 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { hctx 545 block/blk-mq-sched.c ret = blk_mq_sched_alloc_tags(q, hctx, i); hctx 556 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { hctx 558 block/blk-mq-sched.c ret = e->ops.init_hctx(hctx, i); hctx 567 block/blk-mq-sched.c blk_mq_debugfs_register_sched_hctx(q, hctx); hctx 585 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx; hctx 588 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { hctx 589 block/blk-mq-sched.c if (hctx->sched_tags) hctx 590 block/blk-mq-sched.c blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); hctx 596 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx; hctx 599 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { hctx 600 block/blk-mq-sched.c blk_mq_debugfs_unregister_sched_hctx(hctx); hctx 601 block/blk-mq-sched.c if (e->type->ops.exit_hctx && hctx->sched_data) { hctx 602 block/blk-mq-sched.c e->type->ops.exit_hctx(hctx, i); hctx 603 block/blk-mq-sched.c hctx->sched_data = NULL; hctx 19 block/blk-mq-sched.h void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); hctx 20 block/blk-mq-sched.h void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); hctx 24 block/blk-mq-sched.h void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, hctx 28 block/blk-mq-sched.h void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); hctx 73 block/blk-mq-sched.h static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) hctx 75 block/blk-mq-sched.h struct elevator_queue *e = hctx->queue->elevator; hctx 78 block/blk-mq-sched.h return e->type->ops.has_work(hctx); hctx 83 block/blk-mq-sched.h static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) hctx 85 block/blk-mq-sched.h return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); hctx 36 block/blk-mq-sysfs.c struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, hctx 39 block/blk-mq-sysfs.c cancel_delayed_work_sync(&hctx->run_work); hctx 41 block/blk-mq-sysfs.c if (hctx->flags & BLK_MQ_F_BLOCKING) hctx 42 block/blk-mq-sysfs.c cleanup_srcu_struct(hctx->srcu); hctx 43 block/blk-mq-sysfs.c blk_free_flush_queue(hctx->fq); hctx 44 block/blk-mq-sysfs.c sbitmap_free(&hctx->ctx_map); hctx 45 block/blk-mq-sysfs.c free_cpumask_var(hctx->cpumask); hctx 46 block/blk-mq-sysfs.c kfree(hctx->ctxs); hctx 47 block/blk-mq-sysfs.c kfree(hctx); hctx 112 block/blk-mq-sysfs.c struct blk_mq_hw_ctx *hctx; hctx 117 block/blk-mq-sysfs.c hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); hctx 118 block/blk-mq-sysfs.c q = hctx->queue; hctx 126 block/blk-mq-sysfs.c res = entry->show(hctx, page); hctx 136 block/blk-mq-sysfs.c struct blk_mq_hw_ctx *hctx; hctx 141 block/blk-mq-sysfs.c hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); hctx 142 block/blk-mq-sysfs.c q = hctx->queue; hctx 150 block/blk-mq-sysfs.c res = entry->store(hctx, page, length); hctx 155 block/blk-mq-sysfs.c static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, hctx 158 block/blk-mq-sysfs.c return sprintf(page, "%u\n", hctx->tags->nr_tags); hctx 161 block/blk-mq-sysfs.c static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, hctx 164 block/blk-mq-sysfs.c return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); hctx 167 block/blk-mq-sysfs.c static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) hctx 173 block/blk-mq-sysfs.c for_each_cpu(i, hctx->cpumask) { hctx 237 block/blk-mq-sysfs.c static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) hctx 242 block/blk-mq-sysfs.c if (!hctx->nr_ctx) hctx 245 block/blk-mq-sysfs.c hctx_for_each_ctx(hctx, ctx, i) hctx 248 block/blk-mq-sysfs.c kobject_del(&hctx->kobj); hctx 251 block/blk-mq-sysfs.c static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) hctx 253 block/blk-mq-sysfs.c struct request_queue *q = hctx->queue; hctx 257 block/blk-mq-sysfs.c if (!hctx->nr_ctx) hctx 260 block/blk-mq-sysfs.c ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); hctx 264 block/blk-mq-sysfs.c hctx_for_each_ctx(hctx, ctx, i) { hctx 265 block/blk-mq-sysfs.c ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); hctx 275 block/blk-mq-sysfs.c struct blk_mq_hw_ctx *hctx; hctx 280 block/blk-mq-sysfs.c queue_for_each_hw_ctx(q, hctx, i) hctx 281 block/blk-mq-sysfs.c blk_mq_unregister_hctx(hctx); hctx 290 block/blk-mq-sysfs.c void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) hctx 292 block/blk-mq-sysfs.c kobject_init(&hctx->kobj, &blk_mq_hw_ktype); hctx 324 block/blk-mq-sysfs.c struct blk_mq_hw_ctx *hctx; hctx 336 block/blk-mq-sysfs.c queue_for_each_hw_ctx(q, hctx, i) { hctx 337 block/blk-mq-sysfs.c ret = blk_mq_register_hctx(hctx); hctx 359 block/blk-mq-sysfs.c struct blk_mq_hw_ctx *hctx; hctx 366 block/blk-mq-sysfs.c queue_for_each_hw_ctx(q, hctx, i) hctx 367 block/blk-mq-sysfs.c blk_mq_unregister_hctx(hctx); hctx 375 block/blk-mq-sysfs.c struct blk_mq_hw_ctx *hctx; hctx 382 block/blk-mq-sysfs.c queue_for_each_hw_ctx(q, hctx, i) { hctx 383 block/blk-mq-sysfs.c ret = blk_mq_register_hctx(hctx); hctx 32 block/blk-mq-tag.c bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) hctx 34 block/blk-mq-tag.c if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && hctx 35 block/blk-mq-tag.c !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) hctx 36 block/blk-mq-tag.c atomic_inc(&hctx->tags->active_queues); hctx 55 block/blk-mq-tag.c void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) hctx 57 block/blk-mq-tag.c struct blk_mq_tags *tags = hctx->tags; hctx 59 block/blk-mq-tag.c if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) hctx 71 block/blk-mq-tag.c static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, hctx 76 block/blk-mq-tag.c if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) hctx 78 block/blk-mq-tag.c if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) hctx 87 block/blk-mq-tag.c users = atomic_read(&hctx->tags->active_queues); hctx 95 block/blk-mq-tag.c return atomic_read(&hctx->nr_active) < depth; hctx 102 block/blk-mq-tag.c !hctx_may_queue(data->hctx, bt)) hctx 138 block/blk-mq-tag.c ws = bt_wait_ptr(bt, data->hctx); hctx 147 block/blk-mq-tag.c blk_mq_run_hw_queue(data->hctx, false); hctx 169 block/blk-mq-tag.c data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, hctx 185 block/blk-mq-tag.c ws = bt_wait_ptr(bt, data->hctx); hctx 194 block/blk-mq-tag.c void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, hctx 209 block/blk-mq-tag.c struct blk_mq_hw_ctx *hctx; hctx 218 block/blk-mq-tag.c struct blk_mq_hw_ctx *hctx = iter_data->hctx; hctx 219 block/blk-mq-tag.c struct blk_mq_tags *tags = hctx->tags; hctx 231 block/blk-mq-tag.c if (rq && rq->q == hctx->queue) hctx 232 block/blk-mq-tag.c return iter_data->fn(hctx, rq, iter_data->data, reserved); hctx 250 block/blk-mq-tag.c static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, hctx 254 block/blk-mq-tag.c .hctx = hctx, hctx 406 block/blk-mq-tag.c struct blk_mq_hw_ctx *hctx; hctx 419 block/blk-mq-tag.c queue_for_each_hw_ctx(q, hctx, i) { hctx 420 block/blk-mq-tag.c struct blk_mq_tags *tags = hctx->tags; hctx 426 block/blk-mq-tag.c if (!blk_mq_hw_queue_mapped(hctx)) hctx 430 block/blk-mq-tag.c bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); hctx 431 block/blk-mq-tag.c bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); hctx 491 block/blk-mq-tag.c int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, hctx 505 block/blk-mq-tag.c struct blk_mq_tag_set *set = hctx->queue->tag_set; hctx 519 block/blk-mq-tag.c new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, hctx 523 block/blk-mq-tag.c ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); hctx 529 block/blk-mq-tag.c blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); hctx 29 block/blk-mq-tag.h extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, hctx 32 block/blk-mq-tag.h extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, hctx 40 block/blk-mq-tag.h struct blk_mq_hw_ctx *hctx) hctx 42 block/blk-mq-tag.h if (!hctx) hctx 44 block/blk-mq-tag.h return sbq_wait_ptr(bt, &hctx->wait_index); hctx 56 block/blk-mq-tag.h static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) hctx 58 block/blk-mq-tag.h if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) hctx 61 block/blk-mq-tag.h return __blk_mq_tag_busy(hctx); hctx 64 block/blk-mq-tag.h static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) hctx 66 block/blk-mq-tag.h if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) hctx 69 block/blk-mq-tag.h __blk_mq_tag_idle(hctx); hctx 78 block/blk-mq-tag.h static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, hctx 81 block/blk-mq-tag.h hctx->tags->rqs[tag] = rq; hctx 67 block/blk-mq.c static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) hctx 69 block/blk-mq.c return !list_empty_careful(&hctx->dispatch) || hctx 70 block/blk-mq.c sbitmap_any_bit_set(&hctx->ctx_map) || hctx 71 block/blk-mq.c blk_mq_sched_has_work(hctx); hctx 77 block/blk-mq.c static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, hctx 80 block/blk-mq.c const int bit = ctx->index_hw[hctx->type]; hctx 82 block/blk-mq.c if (!sbitmap_test_bit(&hctx->ctx_map, bit)) hctx 83 block/blk-mq.c sbitmap_set_bit(&hctx->ctx_map, bit); hctx 86 block/blk-mq.c static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, hctx 89 block/blk-mq.c const int bit = ctx->index_hw[hctx->type]; hctx 91 block/blk-mq.c sbitmap_clear_bit(&hctx->ctx_map, bit); hctx 99 block/blk-mq.c static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, hctx 125 block/blk-mq.c static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, hctx 236 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 242 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { hctx 243 block/blk-mq.c if (hctx->flags & BLK_MQ_F_BLOCKING) hctx 244 block/blk-mq.c synchronize_srcu(hctx->srcu); hctx 271 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 274 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) hctx 275 block/blk-mq.c if (blk_mq_hw_queue_mapped(hctx)) hctx 276 block/blk-mq.c blk_mq_tag_wakeup_all(hctx->tags, true); hctx 279 block/blk-mq.c bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) hctx 281 block/blk-mq.c return blk_mq_has_free_tags(hctx->tags); hctx 305 block/blk-mq.c if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) { hctx 307 block/blk-mq.c atomic_inc(&data->hctx->nr_active); hctx 311 block/blk-mq.c data->hctx->tags->rqs[rq->tag] = rq; hctx 317 block/blk-mq.c rq->mq_hctx = data->hctx; hctx 377 block/blk-mq.c if (likely(!data->hctx)) hctx 378 block/blk-mq.c data->hctx = blk_mq_map_queue(q, data->cmd_flags, hctx 396 block/blk-mq.c blk_mq_tag_busy(data->hctx); hctx 418 block/blk-mq.c data->hctx->queued++; hctx 474 block/blk-mq.c alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; hctx 475 block/blk-mq.c if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { hctx 479 block/blk-mq.c cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); hctx 496 block/blk-mq.c struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx 502 block/blk-mq.c blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); hctx 504 block/blk-mq.c blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); hctx 505 block/blk-mq.c blk_mq_sched_restart(hctx); hctx 514 block/blk-mq.c struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx 527 block/blk-mq.c atomic_dec(&hctx->nr_active); hctx 629 block/blk-mq.c static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) hctx 630 block/blk-mq.c __releases(hctx->srcu) hctx 632 block/blk-mq.c if (!(hctx->flags & BLK_MQ_F_BLOCKING)) hctx 635 block/blk-mq.c srcu_read_unlock(hctx->srcu, srcu_idx); hctx 638 block/blk-mq.c static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) hctx 639 block/blk-mq.c __acquires(hctx->srcu) hctx 641 block/blk-mq.c if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { hctx 646 block/blk-mq.c *srcu_idx = srcu_read_lock(hctx->srcu); hctx 828 block/blk-mq.c static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, hctx 835 block/blk-mq.c if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { hctx 889 block/blk-mq.c static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, hctx 922 block/blk-mq.c if (is_flush_rq(rq, hctx)) hctx 935 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 965 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { hctx 967 block/blk-mq.c if (blk_mq_hw_queue_mapped(hctx)) hctx 968 block/blk-mq.c blk_mq_tag_idle(hctx); hctx 975 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 982 block/blk-mq.c struct blk_mq_hw_ctx *hctx = flush_data->hctx; hctx 983 block/blk-mq.c struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; hctx 984 block/blk-mq.c enum hctx_type type = hctx->type; hctx 997 block/blk-mq.c void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) hctx 1000 block/blk-mq.c .hctx = hctx, hctx 1004 block/blk-mq.c sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); hctx 1009 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1017 block/blk-mq.c struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; hctx 1018 block/blk-mq.c struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; hctx 1019 block/blk-mq.c enum hctx_type type = hctx->type; hctx 1033 block/blk-mq.c struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, hctx 1036 block/blk-mq.c unsigned off = start ? start->index_hw[hctx->type] : 0; hctx 1038 block/blk-mq.c .hctx = hctx, hctx 1042 block/blk-mq.c __sbitmap_for_each_set(&hctx->ctx_map, off, hctx 1060 block/blk-mq.c .hctx = rq->mq_hctx, hctx 1069 block/blk-mq.c if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) hctx 1072 block/blk-mq.c shared = blk_mq_tag_busy(data.hctx); hctx 1077 block/blk-mq.c atomic_inc(&data.hctx->nr_active); hctx 1079 block/blk-mq.c data.hctx->tags->rqs[rq->tag] = rq; hctx 1089 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1091 block/blk-mq.c hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); hctx 1093 block/blk-mq.c spin_lock(&hctx->dispatch_wait_lock); hctx 1098 block/blk-mq.c sbq = &hctx->tags->bitmap_tags; hctx 1101 block/blk-mq.c spin_unlock(&hctx->dispatch_wait_lock); hctx 1103 block/blk-mq.c blk_mq_run_hw_queue(hctx, true); hctx 1113 block/blk-mq.c static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, hctx 1116 block/blk-mq.c struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; hctx 1121 block/blk-mq.c if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) { hctx 1122 block/blk-mq.c blk_mq_sched_mark_restart_hctx(hctx); hctx 1135 block/blk-mq.c wait = &hctx->dispatch_wait; hctx 1139 block/blk-mq.c wq = &bt_wait_ptr(sbq, hctx)->wait; hctx 1142 block/blk-mq.c spin_lock(&hctx->dispatch_wait_lock); hctx 1144 block/blk-mq.c spin_unlock(&hctx->dispatch_wait_lock); hctx 1160 block/blk-mq.c spin_unlock(&hctx->dispatch_wait_lock); hctx 1171 block/blk-mq.c spin_unlock(&hctx->dispatch_wait_lock); hctx 1186 block/blk-mq.c static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) hctx 1190 block/blk-mq.c if (hctx->queue->elevator) hctx 1193 block/blk-mq.c ewma = hctx->dispatch_busy; hctx 1203 block/blk-mq.c hctx->dispatch_busy = ewma; hctx 1214 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1234 block/blk-mq.c hctx = rq->mq_hctx; hctx 1235 block/blk-mq.c if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) { hctx 1248 block/blk-mq.c if (!blk_mq_mark_tag_wait(hctx, rq)) { hctx 1249 block/blk-mq.c blk_mq_put_dispatch_budget(hctx); hctx 1254 block/blk-mq.c if (hctx->flags & BLK_MQ_F_TAG_SHARED) hctx 1275 block/blk-mq.c ret = q->mq_ops->queue_rq(hctx, &bd); hctx 1300 block/blk-mq.c hctx->dispatched[queued_to_index(queued)]++; hctx 1315 block/blk-mq.c q->mq_ops->commit_rqs(hctx); hctx 1317 block/blk-mq.c spin_lock(&hctx->lock); hctx 1318 block/blk-mq.c list_splice_tail_init(list, &hctx->dispatch); hctx 1319 block/blk-mq.c spin_unlock(&hctx->lock); hctx 1345 block/blk-mq.c needs_restart = blk_mq_sched_needs_restart(hctx); hctx 1347 block/blk-mq.c (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) hctx 1348 block/blk-mq.c blk_mq_run_hw_queue(hctx, true); hctx 1350 block/blk-mq.c blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); hctx 1352 block/blk-mq.c blk_mq_update_dispatch_busy(hctx, true); hctx 1355 block/blk-mq.c blk_mq_update_dispatch_busy(hctx, false); hctx 1367 block/blk-mq.c static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) hctx 1388 block/blk-mq.c if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && hctx 1389 block/blk-mq.c cpu_online(hctx->next_cpu)) { hctx 1392 block/blk-mq.c cpumask_empty(hctx->cpumask) ? "inactive": "active"); hctx 1402 block/blk-mq.c might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); hctx 1404 block/blk-mq.c hctx_lock(hctx, &srcu_idx); hctx 1405 block/blk-mq.c blk_mq_sched_dispatch_requests(hctx); hctx 1406 block/blk-mq.c hctx_unlock(hctx, srcu_idx); hctx 1409 block/blk-mq.c static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) hctx 1411 block/blk-mq.c int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); hctx 1414 block/blk-mq.c cpu = cpumask_first(hctx->cpumask); hctx 1424 block/blk-mq.c static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) hctx 1427 block/blk-mq.c int next_cpu = hctx->next_cpu; hctx 1429 block/blk-mq.c if (hctx->queue->nr_hw_queues == 1) hctx 1432 block/blk-mq.c if (--hctx->next_cpu_batch <= 0) { hctx 1434 block/blk-mq.c next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, hctx 1437 block/blk-mq.c next_cpu = blk_mq_first_mapped_cpu(hctx); hctx 1438 block/blk-mq.c hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; hctx 1455 block/blk-mq.c hctx->next_cpu = next_cpu; hctx 1456 block/blk-mq.c hctx->next_cpu_batch = 1; hctx 1460 block/blk-mq.c hctx->next_cpu = next_cpu; hctx 1464 block/blk-mq.c static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, hctx 1467 block/blk-mq.c if (unlikely(blk_mq_hctx_stopped(hctx))) hctx 1470 block/blk-mq.c if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { hctx 1472 block/blk-mq.c if (cpumask_test_cpu(cpu, hctx->cpumask)) { hctx 1473 block/blk-mq.c __blk_mq_run_hw_queue(hctx); hctx 1481 block/blk-mq.c kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, hctx 1485 block/blk-mq.c void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) hctx 1487 block/blk-mq.c __blk_mq_delay_run_hw_queue(hctx, true, msecs); hctx 1491 block/blk-mq.c bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) hctx 1504 block/blk-mq.c hctx_lock(hctx, &srcu_idx); hctx 1505 block/blk-mq.c need_run = !blk_queue_quiesced(hctx->queue) && hctx 1506 block/blk-mq.c blk_mq_hctx_has_pending(hctx); hctx 1507 block/blk-mq.c hctx_unlock(hctx, srcu_idx); hctx 1510 block/blk-mq.c __blk_mq_delay_run_hw_queue(hctx, async, 0); hctx 1520 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1523 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { hctx 1524 block/blk-mq.c if (blk_mq_hctx_stopped(hctx)) hctx 1527 block/blk-mq.c blk_mq_run_hw_queue(hctx, async); hctx 1541 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1544 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) hctx 1545 block/blk-mq.c if (blk_mq_hctx_stopped(hctx)) hctx 1561 block/blk-mq.c void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) hctx 1563 block/blk-mq.c cancel_delayed_work(&hctx->run_work); hctx 1565 block/blk-mq.c set_bit(BLK_MQ_S_STOPPED, &hctx->state); hctx 1580 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1583 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) hctx 1584 block/blk-mq.c blk_mq_stop_hw_queue(hctx); hctx 1588 block/blk-mq.c void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) hctx 1590 block/blk-mq.c clear_bit(BLK_MQ_S_STOPPED, &hctx->state); hctx 1592 block/blk-mq.c blk_mq_run_hw_queue(hctx, false); hctx 1598 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1601 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) hctx 1602 block/blk-mq.c blk_mq_start_hw_queue(hctx); hctx 1606 block/blk-mq.c void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) hctx 1608 block/blk-mq.c if (!blk_mq_hctx_stopped(hctx)) hctx 1611 block/blk-mq.c clear_bit(BLK_MQ_S_STOPPED, &hctx->state); hctx 1612 block/blk-mq.c blk_mq_run_hw_queue(hctx, async); hctx 1618 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1621 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) hctx 1622 block/blk-mq.c blk_mq_start_stopped_hw_queue(hctx, async); hctx 1628 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 1630 block/blk-mq.c hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); hctx 1635 block/blk-mq.c if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) hctx 1638 block/blk-mq.c __blk_mq_run_hw_queue(hctx); hctx 1641 block/blk-mq.c static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, hctx 1646 block/blk-mq.c enum hctx_type type = hctx->type; hctx 1650 block/blk-mq.c trace_block_rq_insert(hctx->queue, rq); hctx 1658 block/blk-mq.c void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, hctx 1665 block/blk-mq.c __blk_mq_insert_req_list(hctx, rq, at_head); hctx 1666 block/blk-mq.c blk_mq_hctx_mark_pending(hctx, ctx); hctx 1676 block/blk-mq.c struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx 1678 block/blk-mq.c spin_lock(&hctx->lock); hctx 1680 block/blk-mq.c list_add(&rq->queuelist, &hctx->dispatch); hctx 1682 block/blk-mq.c list_add_tail(&rq->queuelist, &hctx->dispatch); hctx 1683 block/blk-mq.c spin_unlock(&hctx->lock); hctx 1686 block/blk-mq.c blk_mq_run_hw_queue(hctx, false); hctx 1689 block/blk-mq.c void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, hctx 1694 block/blk-mq.c enum hctx_type type = hctx->type; hctx 1702 block/blk-mq.c trace_block_rq_insert(hctx->queue, rq); hctx 1707 block/blk-mq.c blk_mq_hctx_mark_pending(hctx, ctx); hctx 1796 block/blk-mq.c static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, hctx 1808 block/blk-mq.c new_cookie = request_to_qc_t(hctx, rq); hctx 1815 block/blk-mq.c ret = q->mq_ops->queue_rq(hctx, &bd); hctx 1818 block/blk-mq.c blk_mq_update_dispatch_busy(hctx, false); hctx 1823 block/blk-mq.c blk_mq_update_dispatch_busy(hctx, true); hctx 1827 block/blk-mq.c blk_mq_update_dispatch_busy(hctx, false); hctx 1835 block/blk-mq.c static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, hctx 1850 block/blk-mq.c if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { hctx 1859 block/blk-mq.c if (!blk_mq_get_dispatch_budget(hctx)) hctx 1863 block/blk-mq.c blk_mq_put_dispatch_budget(hctx); hctx 1867 block/blk-mq.c return __blk_mq_issue_directly(hctx, rq, cookie, last); hctx 1876 block/blk-mq.c static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, hctx 1882 block/blk-mq.c might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); hctx 1884 block/blk-mq.c hctx_lock(hctx, &srcu_idx); hctx 1886 block/blk-mq.c ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); hctx 1892 block/blk-mq.c hctx_unlock(hctx, srcu_idx); hctx 1900 block/blk-mq.c struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx 1902 block/blk-mq.c hctx_lock(hctx, &srcu_idx); hctx 1903 block/blk-mq.c ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); hctx 1904 block/blk-mq.c hctx_unlock(hctx, srcu_idx); hctx 1909 block/blk-mq.c void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, hctx 1935 block/blk-mq.c if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs) hctx 1936 block/blk-mq.c hctx->queue->mq_ops->commit_rqs(hctx); hctx 1992 block/blk-mq.c cookie = request_to_qc_t(data.hctx, rq); hctx 2000 block/blk-mq.c blk_mq_run_hw_queue(data.hctx, true); hctx 2045 block/blk-mq.c data.hctx = same_queue_rq->mq_hctx; hctx 2047 block/blk-mq.c blk_mq_try_issue_directly(data.hctx, same_queue_rq, hctx 2051 block/blk-mq.c !data.hctx->dispatch_busy) { hctx 2052 block/blk-mq.c blk_mq_try_issue_directly(data.hctx, rq, &cookie); hctx 2241 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 2246 block/blk-mq.c hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); hctx 2247 block/blk-mq.c ctx = __blk_mq_get_ctx(hctx->queue, cpu); hctx 2248 block/blk-mq.c type = hctx->type; hctx 2253 block/blk-mq.c blk_mq_hctx_clear_pending(hctx, ctx); hctx 2260 block/blk-mq.c spin_lock(&hctx->lock); hctx 2261 block/blk-mq.c list_splice_tail_init(&tmp, &hctx->dispatch); hctx 2262 block/blk-mq.c spin_unlock(&hctx->lock); hctx 2264 block/blk-mq.c blk_mq_run_hw_queue(hctx, true); hctx 2268 block/blk-mq.c static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) hctx 2271 block/blk-mq.c &hctx->cpuhp_dead); hctx 2277 block/blk-mq.c struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) hctx 2279 block/blk-mq.c if (blk_mq_hw_queue_mapped(hctx)) hctx 2280 block/blk-mq.c blk_mq_tag_idle(hctx); hctx 2283 block/blk-mq.c set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); hctx 2286 block/blk-mq.c set->ops->exit_hctx(hctx, hctx_idx); hctx 2288 block/blk-mq.c blk_mq_remove_cpuhp(hctx); hctx 2291 block/blk-mq.c list_add(&hctx->hctx_list, &q->unused_hctx_list); hctx 2298 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 2301 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { hctx 2304 block/blk-mq.c blk_mq_debugfs_unregister_hctx(hctx); hctx 2305 block/blk-mq.c blk_mq_exit_hctx(q, set, hctx, i); hctx 2325 block/blk-mq.c struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) hctx 2327 block/blk-mq.c hctx->queue_num = hctx_idx; hctx 2329 block/blk-mq.c cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); hctx 2331 block/blk-mq.c hctx->tags = set->tags[hctx_idx]; hctx 2334 block/blk-mq.c set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) hctx 2337 block/blk-mq.c if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, hctx 2338 block/blk-mq.c hctx->numa_node)) hctx 2344 block/blk-mq.c set->ops->exit_hctx(hctx, hctx_idx); hctx 2346 block/blk-mq.c blk_mq_remove_cpuhp(hctx); hctx 2354 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 2357 block/blk-mq.c hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); hctx 2358 block/blk-mq.c if (!hctx) hctx 2361 block/blk-mq.c if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) hctx 2364 block/blk-mq.c atomic_set(&hctx->nr_active, 0); hctx 2367 block/blk-mq.c hctx->numa_node = node; hctx 2369 block/blk-mq.c INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); hctx 2370 block/blk-mq.c spin_lock_init(&hctx->lock); hctx 2371 block/blk-mq.c INIT_LIST_HEAD(&hctx->dispatch); hctx 2372 block/blk-mq.c hctx->queue = q; hctx 2373 block/blk-mq.c hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; hctx 2375 block/blk-mq.c INIT_LIST_HEAD(&hctx->hctx_list); hctx 2381 block/blk-mq.c hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), hctx 2383 block/blk-mq.c if (!hctx->ctxs) hctx 2386 block/blk-mq.c if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), hctx 2389 block/blk-mq.c hctx->nr_ctx = 0; hctx 2391 block/blk-mq.c spin_lock_init(&hctx->dispatch_wait_lock); hctx 2392 block/blk-mq.c init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); hctx 2393 block/blk-mq.c INIT_LIST_HEAD(&hctx->dispatch_wait.entry); hctx 2395 block/blk-mq.c hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size, hctx 2397 block/blk-mq.c if (!hctx->fq) hctx 2400 block/blk-mq.c if (hctx->flags & BLK_MQ_F_BLOCKING) hctx 2401 block/blk-mq.c init_srcu_struct(hctx->srcu); hctx 2402 block/blk-mq.c blk_mq_hctx_kobj_init(hctx); hctx 2404 block/blk-mq.c return hctx; hctx 2407 block/blk-mq.c sbitmap_free(&hctx->ctx_map); hctx 2409 block/blk-mq.c kfree(hctx->ctxs); hctx 2411 block/blk-mq.c free_cpumask_var(hctx->cpumask); hctx 2413 block/blk-mq.c kfree(hctx); hctx 2426 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 2441 block/blk-mq.c hctx = blk_mq_map_queue_type(q, j, i); hctx 2442 block/blk-mq.c if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) hctx 2443 block/blk-mq.c hctx->numa_node = local_memory_node(cpu_to_node(i)); hctx 2480 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 2484 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { hctx 2485 block/blk-mq.c cpumask_clear(hctx->cpumask); hctx 2486 block/blk-mq.c hctx->nr_ctx = 0; hctx 2487 block/blk-mq.c hctx->dispatch_from = NULL; hctx 2517 block/blk-mq.c hctx = blk_mq_map_queue_type(q, j, i); hctx 2518 block/blk-mq.c ctx->hctxs[j] = hctx; hctx 2524 block/blk-mq.c if (cpumask_test_cpu(i, hctx->cpumask)) hctx 2527 block/blk-mq.c cpumask_set_cpu(i, hctx->cpumask); hctx 2528 block/blk-mq.c hctx->type = j; hctx 2529 block/blk-mq.c ctx->index_hw[hctx->type] = hctx->nr_ctx; hctx 2530 block/blk-mq.c hctx->ctxs[hctx->nr_ctx++] = ctx; hctx 2536 block/blk-mq.c BUG_ON(!hctx->nr_ctx); hctx 2544 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { hctx 2549 block/blk-mq.c if (!hctx->nr_ctx) { hctx 2557 block/blk-mq.c hctx->tags = NULL; hctx 2561 block/blk-mq.c hctx->tags = set->tags[i]; hctx 2562 block/blk-mq.c WARN_ON(!hctx->tags); hctx 2569 block/blk-mq.c sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); hctx 2574 block/blk-mq.c hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); hctx 2575 block/blk-mq.c hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; hctx 2585 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 2588 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { hctx 2590 block/blk-mq.c hctx->flags |= BLK_MQ_F_TAG_SHARED; hctx 2592 block/blk-mq.c hctx->flags &= ~BLK_MQ_F_TAG_SHARED; hctx 2683 block/blk-mq.c struct blk_mq_hw_ctx *hctx, *next; hctx 2686 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) hctx 2687 block/blk-mq.c WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); hctx 2690 block/blk-mq.c list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { hctx 2691 block/blk-mq.c list_del_init(&hctx->hctx_list); hctx 2692 block/blk-mq.c kobject_put(&hctx->kobj); hctx 2762 block/blk-mq.c struct blk_mq_hw_ctx *hctx = NULL, *tmp; hctx 2768 block/blk-mq.c hctx = tmp; hctx 2772 block/blk-mq.c if (hctx) hctx 2773 block/blk-mq.c list_del_init(&hctx->hctx_list); hctx 2776 block/blk-mq.c if (!hctx) hctx 2777 block/blk-mq.c hctx = blk_mq_alloc_hctx(q, set, node); hctx 2778 block/blk-mq.c if (!hctx) hctx 2781 block/blk-mq.c if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) hctx 2784 block/blk-mq.c return hctx; hctx 2787 block/blk-mq.c kobject_put(&hctx->kobj); hctx 2802 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 2813 block/blk-mq.c hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); hctx 2814 block/blk-mq.c if (hctx) { hctx 2817 block/blk-mq.c hctxs[i] = hctx; hctx 2841 block/blk-mq.c struct blk_mq_hw_ctx *hctx = hctxs[j]; hctx 2843 block/blk-mq.c if (hctx) { hctx 2844 block/blk-mq.c if (hctx->tags) hctx 2846 block/blk-mq.c blk_mq_exit_hctx(q, set, hctx, j); hctx 3159 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 3172 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { hctx 3173 block/blk-mq.c if (!hctx->tags) hctx 3179 block/blk-mq.c if (!hctx->sched_tags) { hctx 3180 block/blk-mq.c ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, hctx 3183 block/blk-mq.c ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, hctx 3189 block/blk-mq.c q->elevator->type->ops.depth_updated(hctx); hctx 3377 block/blk-mq.c struct blk_mq_hw_ctx *hctx, hctx 3410 block/blk-mq.c struct blk_mq_hw_ctx *hctx, hctx 3430 block/blk-mq.c nsecs = blk_mq_poll_nsecs(q, hctx, rq); hctx 3464 block/blk-mq.c struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) hctx 3472 block/blk-mq.c rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); hctx 3474 block/blk-mq.c rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); hctx 3485 block/blk-mq.c return blk_mq_poll_hybrid_sleep(q, hctx, rq); hctx 3502 block/blk-mq.c struct blk_mq_hw_ctx *hctx; hctx 3512 block/blk-mq.c hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; hctx 3521 block/blk-mq.c if (blk_mq_poll_hybrid(q, hctx, cookie)) hctx 3524 block/blk-mq.c hctx->poll_considered++; hctx 3530 block/blk-mq.c hctx->poll_invoked++; hctx 3532 block/blk-mq.c ret = q->mq_ops->poll(hctx); hctx 3534 block/blk-mq.c hctx->poll_success++; hctx 46 block/blk-mq.h void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); hctx 48 block/blk-mq.h struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, hctx 67 block/blk-mq.h void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, hctx 71 block/blk-mq.h void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, hctx 76 block/blk-mq.h void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, hctx 128 block/blk-mq.h extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); hctx 167 block/blk-mq.h struct blk_mq_hw_ctx *hctx; hctx 173 block/blk-mq.h return data->hctx->sched_tags; hctx 175 block/blk-mq.h return data->hctx->tags; hctx 178 block/blk-mq.h static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) hctx 180 block/blk-mq.h return test_bit(BLK_MQ_S_STOPPED, &hctx->state); hctx 183 block/blk-mq.h static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) hctx 185 block/blk-mq.h return hctx->nr_ctx && hctx->tags; hctx 192 block/blk-mq.h static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) hctx 194 block/blk-mq.h struct request_queue *q = hctx->queue; hctx 197 block/blk-mq.h q->mq_ops->put_budget(hctx); hctx 200 block/blk-mq.h static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) hctx 202 block/blk-mq.h struct request_queue *q = hctx->queue; hctx 205 block/blk-mq.h return q->mq_ops->get_budget(hctx); hctx 209 block/blk-mq.h static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, hctx 212 block/blk-mq.h blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); hctx 217 block/blk-mq.h atomic_dec(&hctx->nr_active); hctx 53 block/blk.h is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx) hctx 55 block/blk.h return hctx->fq->flush_rq == req; hctx 261 block/bsg-lib.c static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 264 block/bsg-lib.c struct request_queue *q = hctx->queue; hctx 461 block/kyber-iosched.c static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) hctx 463 block/kyber-iosched.c struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; hctx 467 block/kyber-iosched.c khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); hctx 471 block/kyber-iosched.c khd->kcqs = kmalloc_array_node(hctx->nr_ctx, hctx 473 block/kyber-iosched.c GFP_KERNEL, hctx->numa_node); hctx 477 block/kyber-iosched.c for (i = 0; i < hctx->nr_ctx; i++) hctx 481 block/kyber-iosched.c if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, hctx 482 block/kyber-iosched.c ilog2(8), GFP_KERNEL, hctx->numa_node)) { hctx 496 block/kyber-iosched.c khd->domain_wait[i].wait.private = hctx; hctx 504 block/kyber-iosched.c hctx->sched_data = khd; hctx 505 block/kyber-iosched.c sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, hctx 517 block/kyber-iosched.c static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) hctx 519 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; hctx 525 block/kyber-iosched.c kfree(hctx->sched_data); hctx 565 block/kyber-iosched.c static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, hctx 568 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; hctx 569 block/kyber-iosched.c struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue); hctx 570 block/kyber-iosched.c struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; hctx 576 block/kyber-iosched.c merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); hctx 587 block/kyber-iosched.c static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, hctx 590 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; hctx 595 block/kyber-iosched.c struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]]; hctx 604 block/kyber-iosched.c rq->mq_ctx->index_hw[hctx->type]); hctx 694 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); hctx 698 block/kyber-iosched.c blk_mq_run_hw_queue(hctx, true); hctx 704 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx) hctx 752 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx) hctx 770 block/kyber-iosched.c nr = kyber_get_domain_token(kqd, khd, hctx); hctx 781 block/kyber-iosched.c nr = kyber_get_domain_token(kqd, khd, hctx); hctx 799 block/kyber-iosched.c static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) hctx 801 block/kyber-iosched.c struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; hctx 802 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; hctx 813 block/kyber-iosched.c rq = kyber_dispatch_cur_domain(kqd, khd, hctx); hctx 834 block/kyber-iosched.c rq = kyber_dispatch_cur_domain(kqd, khd, hctx); hctx 845 block/kyber-iosched.c static bool kyber_has_work(struct blk_mq_hw_ctx *hctx) hctx 847 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; hctx 909 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx = m->private; \ hctx 910 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; \ hctx 919 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx = m->private; \ hctx 920 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; \ hctx 928 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx = m->private; \ hctx 929 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; \ hctx 943 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx = data; \ hctx 944 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; \ hctx 967 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx = data; hctx 968 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; hctx 976 block/kyber-iosched.c struct blk_mq_hw_ctx *hctx = data; hctx 977 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; hctx 381 block/mq-deadline.c static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) hctx 383 block/mq-deadline.c struct deadline_data *dd = hctx->queue->elevator->elevator_data; hctx 462 block/mq-deadline.c static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, hctx 465 block/mq-deadline.c struct request_queue *q = hctx->queue; hctx 483 block/mq-deadline.c static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, hctx 486 block/mq-deadline.c struct request_queue *q = hctx->queue; hctx 523 block/mq-deadline.c static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, hctx 526 block/mq-deadline.c struct request_queue *q = hctx->queue; hctx 535 block/mq-deadline.c dd_insert_request(hctx, rq, at_head); hctx 578 block/mq-deadline.c static bool dd_has_work(struct blk_mq_hw_ctx *hctx) hctx 580 block/mq-deadline.c struct deadline_data *dd = hctx->queue->elevator->elevator_data; hctx 936 crypto/cryptd.c struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); hctx 941 crypto/cryptd.c crypto_drop_shash(&hctx->spawn); hctx 1504 drivers/block/amiflop.c static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 263 drivers/block/aoe/aoeblk.c static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 266 drivers/block/aoe/aoeblk.c struct aoedev *d = hctx->queue->queuedata; hctx 1475 drivers/block/ataflop.c static void ataflop_commit_rqs(struct blk_mq_hw_ctx *hctx) hctx 1484 drivers/block/ataflop.c static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 2890 drivers/block/floppy.c static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 1921 drivers/block/loop.c static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 165 drivers/block/mtip32xx/mtip32xx.c struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; hctx 167 drivers/block/mtip32xx/mtip32xx.c return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag)); hctx 2058 drivers/block/mtip32xx/mtip32xx.c struct blk_mq_hw_ctx *hctx) hctx 2070 drivers/block/mtip32xx/mtip32xx.c nents = blk_rq_map_sg(hctx->queue, rq, command->sg); hctx 3422 drivers/block/mtip32xx/mtip32xx.c static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, hctx 3425 drivers/block/mtip32xx/mtip32xx.c struct driver_data *dd = hctx->queue->queuedata; hctx 3446 drivers/block/mtip32xx/mtip32xx.c static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, hctx 3449 drivers/block/mtip32xx/mtip32xx.c struct driver_data *dd = hctx->queue->queuedata; hctx 3483 drivers/block/mtip32xx/mtip32xx.c static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 3486 drivers/block/mtip32xx/mtip32xx.c struct driver_data *dd = hctx->queue->queuedata; hctx 3491 drivers/block/mtip32xx/mtip32xx.c return mtip_issue_reserved_cmd(hctx, rq); hctx 3493 drivers/block/mtip32xx/mtip32xx.c if (unlikely(mtip_check_unal_depth(hctx, rq))) hctx 3501 drivers/block/mtip32xx/mtip32xx.c mtip_hw_submit_io(dd, rq, cmd, hctx); hctx 950 drivers/block/nbd.c static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 973 drivers/block/nbd.c ret = nbd_handle_cmd(cmd, hctx->queue_num); hctx 1324 drivers/block/null_blk_main.c static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 1328 drivers/block/null_blk_main.c struct nullb_queue *nq = hctx->driver_data; hctx 1332 drivers/block/null_blk_main.c might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); hctx 1457 drivers/block/null_blk_main.c struct blk_mq_hw_ctx *hctx; hctx 1461 drivers/block/null_blk_main.c queue_for_each_hw_ctx(q, hctx, i) { hctx 1462 drivers/block/null_blk_main.c if (!hctx->nr_ctx || !hctx->tags) hctx 1465 drivers/block/null_blk_main.c hctx->driver_data = nq; hctx 189 drivers/block/paride/pcd.c static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 813 drivers/block/paride/pcd.c static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 816 drivers/block/paride/pcd.c struct pcd_unit *cd = hctx->queue->queuedata; hctx 755 drivers/block/paride/pd.c static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 758 drivers/block/paride/pd.c struct pd_unit *disk = hctx->queue->queuedata; hctx 209 drivers/block/paride/pf.c static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 871 drivers/block/paride/pf.c static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 874 drivers/block/paride/pf.c struct pf_unit *pf = hctx->queue->queuedata; hctx 195 drivers/block/ps3disk.c static blk_status_t ps3disk_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 198 drivers/block/ps3disk.c struct request_queue *q = hctx->queue; hctx 4910 drivers/block/rbd.c static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 478 drivers/block/skd_main.c static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 534 drivers/block/sunvdc.c static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 537 drivers/block/sunvdc.c struct vdc_port *port = hctx->queue->queuedata; hctx 557 drivers/block/sunvdc.c blk_mq_stop_hw_queue(hctx); hctx 524 drivers/block/swim.c static blk_status_t swim_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 527 drivers/block/swim.c struct floppy_state *fs = hctx->queue->queuedata; hctx 308 drivers/block/swim3.c static blk_status_t swim3_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 311 drivers/block/swim3.c struct floppy_state *fs = hctx->queue->queuedata; hctx 702 drivers/block/sx8.c static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 705 drivers/block/sx8.c struct request_queue *q = hctx->queue; hctx 273 drivers/block/virtio_blk.c static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) hctx 275 drivers/block/virtio_blk.c struct virtio_blk *vblk = hctx->queue->queuedata; hctx 276 drivers/block/virtio_blk.c struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; hctx 287 drivers/block/virtio_blk.c static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 290 drivers/block/virtio_blk.c struct virtio_blk *vblk = hctx->queue->queuedata; hctx 295 drivers/block/virtio_blk.c int qid = hctx->queue_num; hctx 343 drivers/block/virtio_blk.c num = blk_rq_map_sg(hctx->queue, req, vbr->sg); hctx 362 drivers/block/virtio_blk.c blk_mq_stop_hw_queue(hctx); hctx 882 drivers/block/xen-blkfront.c static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 886 drivers/block/xen-blkfront.c int qid = hctx->queue_num; hctx 887 drivers/block/xen-blkfront.c struct blkfront_info *info = hctx->queue->queuedata; hctx 912 drivers/block/xen-blkfront.c blk_mq_stop_hw_queue(hctx); hctx 864 drivers/block/xsysace.c static blk_status_t ace_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 867 drivers/block/xsysace.c struct ace_device *ace = hctx->queue->queuedata; hctx 69 drivers/block/z2ram.c static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 631 drivers/cdrom/gdrom.c static blk_status_t gdrom_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 565 drivers/ide/ide-io.c blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 568 drivers/ide/ide-io.c ide_drive_t *drive = hctx->queue->queuedata; hctx 491 drivers/md/dm-rq.c static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 1997 drivers/memstick/core/ms_block.c static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 2000 drivers/memstick/core/ms_block.c struct memstick_dev *card = hctx->queue->queuedata; hctx 825 drivers/memstick/core/mspro_block.c static blk_status_t mspro_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 828 drivers/memstick/core/mspro_block.c struct memstick_dev *card = hctx->queue->queuedata; hctx 240 drivers/mmc/core/queue.c static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 185 drivers/mtd/mtd_blkdevs.c static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 190 drivers/mtd/mtd_blkdevs.c dev = hctx->queue->queuedata; hctx 313 drivers/mtd/ubi/block.c static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 317 drivers/mtd/ubi/block.c struct ubiblock *dev = hctx->queue->queuedata; hctx 32 drivers/nvme/host/fc.c struct blk_mq_hw_ctx *hctx; hctx 1836 drivers/nvme/host/fc.c __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, hctx 1841 drivers/nvme/host/fc.c hctx->driver_data = queue; hctx 1842 drivers/nvme/host/fc.c queue->hctx = hctx; hctx 1846 drivers/nvme/host/fc.c nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 1851 drivers/nvme/host/fc.c __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); hctx 1857 drivers/nvme/host/fc.c nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 1862 drivers/nvme/host/fc.c __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); hctx 2321 drivers/nvme/host/fc.c nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 2324 drivers/nvme/host/fc.c struct nvme_ns *ns = hctx->queue->queuedata; hctx 2325 drivers/nvme/host/fc.c struct nvme_fc_queue *queue = hctx->driver_data; hctx 371 drivers/nvme/host/pci.c static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 378 drivers/nvme/host/pci.c WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); hctx 380 drivers/nvme/host/pci.c hctx->driver_data = nvmeq; hctx 384 drivers/nvme/host/pci.c static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 390 drivers/nvme/host/pci.c WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); hctx 391 drivers/nvme/host/pci.c hctx->driver_data = nvmeq; hctx 488 drivers/nvme/host/pci.c static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) hctx 490 drivers/nvme/host/pci.c struct nvme_queue *nvmeq = hctx->driver_data; hctx 862 drivers/nvme/host/pci.c static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 865 drivers/nvme/host/pci.c struct nvme_ns *ns = hctx->queue->queuedata; hctx 866 drivers/nvme/host/pci.c struct nvme_queue *nvmeq = hctx->driver_data; hctx 1076 drivers/nvme/host/pci.c static int nvme_poll(struct blk_mq_hw_ctx *hctx) hctx 1078 drivers/nvme/host/pci.c struct nvme_queue *nvmeq = hctx->driver_data; hctx 306 drivers/nvme/host/rdma.c static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 314 drivers/nvme/host/rdma.c hctx->driver_data = queue; hctx 318 drivers/nvme/host/rdma.c static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 326 drivers/nvme/host/rdma.c hctx->driver_data = queue; hctx 1732 drivers/nvme/host/rdma.c static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 1735 drivers/nvme/host/rdma.c struct nvme_ns *ns = hctx->queue->queuedata; hctx 1736 drivers/nvme/host/rdma.c struct nvme_rdma_queue *queue = hctx->driver_data; hctx 1802 drivers/nvme/host/rdma.c static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx) hctx 1804 drivers/nvme/host/rdma.c struct nvme_rdma_queue *queue = hctx->driver_data; hctx 381 drivers/nvme/host/tcp.c static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 387 drivers/nvme/host/tcp.c hctx->driver_data = queue; hctx 391 drivers/nvme/host/tcp.c static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 397 drivers/nvme/host/tcp.c hctx->driver_data = queue; hctx 2154 drivers/nvme/host/tcp.c static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 2157 drivers/nvme/host/tcp.c struct nvme_ns *ns = hctx->queue->queuedata; hctx 2158 drivers/nvme/host/tcp.c struct nvme_tcp_queue *queue = hctx->driver_data; hctx 2223 drivers/nvme/host/tcp.c static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) hctx 2225 drivers/nvme/host/tcp.c struct nvme_tcp_queue *queue = hctx->driver_data; hctx 132 drivers/nvme/target/loop.c static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 135 drivers/nvme/target/loop.c struct nvme_ns *ns = hctx->queue->queuedata; hctx 136 drivers/nvme/target/loop.c struct nvme_loop_queue *queue = hctx->driver_data; hctx 215 drivers/nvme/target/loop.c static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 223 drivers/nvme/target/loop.c hctx->driver_data = queue; hctx 227 drivers/nvme/target/loop.c static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 235 drivers/nvme/target/loop.c hctx->driver_data = queue; hctx 3072 drivers/s390/block/dasd.c static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, hctx 3075 drivers/s390/block/dasd.c struct dasd_block *block = hctx->queue->queuedata; hctx 3076 drivers/s390/block/dasd.c struct dasd_queue *dq = hctx->driver_data; hctx 3231 drivers/s390/block/dasd.c static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 3240 drivers/s390/block/dasd.c hctx->driver_data = dq; hctx 3245 drivers/s390/block/dasd.c static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) hctx 3247 drivers/s390/block/dasd.c kfree(hctx->driver_data); hctx 3248 drivers/s390/block/dasd.c hctx->driver_data = NULL; hctx 282 drivers/s390/block/scm_blk.c static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, hctx 285 drivers/s390/block/scm_blk.c struct scm_device *scmdev = hctx->queue->queuedata; hctx 287 drivers/s390/block/scm_blk.c struct scm_queue *sq = hctx->driver_data; hctx 331 drivers/s390/block/scm_blk.c static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, hctx 340 drivers/s390/block/scm_blk.c hctx->driver_data = qd; hctx 345 drivers/s390/block/scm_blk.c static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) hctx 347 drivers/s390/block/scm_blk.c struct scm_queue *qd = hctx->driver_data; hctx 350 drivers/s390/block/scm_blk.c kfree(hctx->driver_data); hctx 351 drivers/s390/block/scm_blk.c hctx->driver_data = NULL; hctx 1623 drivers/scsi/scsi_lib.c static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) hctx 1625 drivers/scsi/scsi_lib.c struct request_queue *q = hctx->queue; hctx 1631 drivers/scsi/scsi_lib.c static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) hctx 1633 drivers/scsi/scsi_lib.c struct request_queue *q = hctx->queue; hctx 1640 drivers/scsi/scsi_lib.c blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); hctx 1644 drivers/scsi/scsi_lib.c static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, hctx 1705 drivers/scsi/scsi_lib.c scsi_mq_put_budget(hctx); hctx 1843 drivers/scsi/scsi_lib.c static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) hctx 1845 drivers/scsi/scsi_lib.c struct request_queue *q = hctx->queue; hctx 1849 drivers/scsi/scsi_lib.c shost->hostt->commit_rqs(shost, hctx->queue_num); hctx 361 include/linux/blk-mq.h ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) hctx 363 include/linux/blk-mq.h #define hctx_for_each_ctx(hctx, ctx, i) \ hctx 364 include/linux/blk-mq.h for ((i) = 0; (i) < (hctx)->nr_ctx && \ hctx 365 include/linux/blk-mq.h ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) hctx 367 include/linux/blk-mq.h static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, hctx 371 include/linux/blk-mq.h return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); hctx 373 include/linux/blk-mq.h return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | hctx 104 net/dccp/ccids/ccid3.h struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); hctx 105 net/dccp/ccids/ccid3.h BUG_ON(hctx == NULL); hctx 106 net/dccp/ccids/ccid3.h return hctx;