khd 464 block/kyber-iosched.c struct kyber_hctx_data *khd; khd 467 block/kyber-iosched.c khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); khd 468 block/kyber-iosched.c if (!khd) khd 471 block/kyber-iosched.c khd->kcqs = kmalloc_array_node(hctx->nr_ctx, khd 474 block/kyber-iosched.c if (!khd->kcqs) khd 478 block/kyber-iosched.c kyber_ctx_queue_init(&khd->kcqs[i]); khd 481 block/kyber-iosched.c if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, khd 484 block/kyber-iosched.c sbitmap_free(&khd->kcq_map[i]); khd 489 block/kyber-iosched.c spin_lock_init(&khd->lock); khd 492 block/kyber-iosched.c INIT_LIST_HEAD(&khd->rqs[i]); khd 493 block/kyber-iosched.c khd->domain_wait[i].sbq = NULL; khd 494 block/kyber-iosched.c init_waitqueue_func_entry(&khd->domain_wait[i].wait, khd 496 block/kyber-iosched.c khd->domain_wait[i].wait.private = hctx; khd 497 block/kyber-iosched.c INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry); khd 498 block/kyber-iosched.c atomic_set(&khd->wait_index[i], 0); khd 501 block/kyber-iosched.c khd->cur_domain = 0; khd 502 block/kyber-iosched.c khd->batching = 0; khd 504 block/kyber-iosched.c hctx->sched_data = khd; khd 511 block/kyber-iosched.c kfree(khd->kcqs); khd 513 block/kyber-iosched.c kfree(khd); khd 519 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; khd 523 block/kyber-iosched.c sbitmap_free(&khd->kcq_map[i]); khd 524 block/kyber-iosched.c kfree(khd->kcqs); khd 568 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; khd 570 block/kyber-iosched.c struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; khd 590 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; khd 595 block/kyber-iosched.c struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]]; khd 603 block/kyber-iosched.c sbitmap_set_bit(&khd->kcq_map[sched_domain], khd 658 block/kyber-iosched.c struct kyber_hctx_data *khd; khd 666 block/kyber-iosched.c struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr]; khd 677 block/kyber-iosched.c static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd, khd 682 block/kyber-iosched.c .khd = khd, khd 687 block/kyber-iosched.c sbitmap_for_each_set(&khd->kcq_map[sched_domain], khd 703 block/kyber-iosched.c struct kyber_hctx_data *khd, khd 706 block/kyber-iosched.c unsigned int sched_domain = khd->cur_domain; khd 708 block/kyber-iosched.c struct sbq_wait *wait = &khd->domain_wait[sched_domain]; khd 721 block/kyber-iosched.c &khd->wait_index[sched_domain]); khd 722 block/kyber-iosched.c khd->domain_ws[sched_domain] = ws; khd 740 block/kyber-iosched.c ws = khd->domain_ws[sched_domain]; khd 751 block/kyber-iosched.c struct kyber_hctx_data *khd, khd 758 block/kyber-iosched.c rqs = &khd->rqs[khd->cur_domain]; khd 770 block/kyber-iosched.c nr = kyber_get_domain_token(kqd, khd, hctx); khd 772 block/kyber-iosched.c khd->batching++; khd 778 block/kyber-iosched.c kyber_domain_names[khd->cur_domain]); khd 780 block/kyber-iosched.c } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { khd 781 block/kyber-iosched.c nr = kyber_get_domain_token(kqd, khd, hctx); khd 783 block/kyber-iosched.c kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs); khd 785 block/kyber-iosched.c khd->batching++; khd 791 block/kyber-iosched.c kyber_domain_names[khd->cur_domain]); khd 802 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; khd 806 block/kyber-iosched.c spin_lock(&khd->lock); khd 812 block/kyber-iosched.c if (khd->batching < kyber_batch_size[khd->cur_domain]) { khd 813 block/kyber-iosched.c rq = kyber_dispatch_cur_domain(kqd, khd, hctx); khd 827 block/kyber-iosched.c khd->batching = 0; khd 829 block/kyber-iosched.c if (khd->cur_domain == KYBER_NUM_DOMAINS - 1) khd 830 block/kyber-iosched.c khd->cur_domain = 0; khd 832 block/kyber-iosched.c khd->cur_domain++; khd 834 block/kyber-iosched.c rq = kyber_dispatch_cur_domain(kqd, khd, hctx); khd 841 block/kyber-iosched.c spin_unlock(&khd->lock); khd 847 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; khd 851 block/kyber-iosched.c if (!list_empty_careful(&khd->rqs[i]) || khd 852 block/kyber-iosched.c sbitmap_any_bit_set(&khd->kcq_map[i])) khd 907 block/kyber-iosched.c __acquires(&khd->lock) \ khd 910 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; \ khd 912 block/kyber-iosched.c spin_lock(&khd->lock); \ khd 913 block/kyber-iosched.c return seq_list_start(&khd->rqs[domain], *pos); \ khd 920 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; \ khd 922 block/kyber-iosched.c return seq_list_next(v, &khd->rqs[domain], pos); \ khd 926 block/kyber-iosched.c __releases(&khd->lock) \ khd 929 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; \ khd 931 block/kyber-iosched.c spin_unlock(&khd->lock); \ khd 944 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; \ khd 945 block/kyber-iosched.c wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \ khd 968 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; khd 970 block/kyber-iosched.c seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]); khd 977 block/kyber-iosched.c struct kyber_hctx_data *khd = hctx->sched_data; khd 979 block/kyber-iosched.c seq_printf(m, "%u\n", khd->batching);