bfqg 138 block/bfq-cgroup.c static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, bfqg 141 block/bfq-cgroup.c struct bfqg_stats *stats = &bfqg->stats; bfqg 145 block/bfq-cgroup.c if (bfqg == curr_bfqg) bfqg 166 block/bfq-cgroup.c void bfqg_stats_update_dequeue(struct bfq_group *bfqg) bfqg 168 block/bfq-cgroup.c bfq_stat_add(&bfqg->stats.dequeue, 1); bfqg 171 block/bfq-cgroup.c void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) bfqg 173 block/bfq-cgroup.c struct bfqg_stats *stats = &bfqg->stats; bfqg 190 block/bfq-cgroup.c void bfqg_stats_update_idle_time(struct bfq_group *bfqg) bfqg 192 block/bfq-cgroup.c struct bfqg_stats *stats = &bfqg->stats; bfqg 204 block/bfq-cgroup.c void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) bfqg 206 block/bfq-cgroup.c struct bfqg_stats *stats = &bfqg->stats; bfqg 212 block/bfq-cgroup.c void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) bfqg 214 block/bfq-cgroup.c struct bfqg_stats *stats = &bfqg->stats; bfqg 222 block/bfq-cgroup.c void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, bfqg 225 block/bfq-cgroup.c blkg_rwstat_add(&bfqg->stats.queued, op, 1); bfqg 226 block/bfq-cgroup.c bfqg_stats_end_empty_time(&bfqg->stats); bfqg 227 block/bfq-cgroup.c if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) bfqg 228 block/bfq-cgroup.c bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); bfqg 231 block/bfq-cgroup.c void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) bfqg 233 block/bfq-cgroup.c blkg_rwstat_add(&bfqg->stats.queued, op, -1); bfqg 236 block/bfq-cgroup.c void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) bfqg 238 block/bfq-cgroup.c blkg_rwstat_add(&bfqg->stats.merged, op, 1); bfqg 241 block/bfq-cgroup.c void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, bfqg 244 block/bfq-cgroup.c struct bfqg_stats *stats = &bfqg->stats; bfqg 257 block/bfq-cgroup.c void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, bfqg 259 block/bfq-cgroup.c void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } bfqg 260 block/bfq-cgroup.c void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } bfqg 261 block/bfq-cgroup.c void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, bfqg 263 block/bfq-cgroup.c void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } bfqg 264 block/bfq-cgroup.c void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } bfqg 265 block/bfq-cgroup.c void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } bfqg 266 block/bfq-cgroup.c void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } bfqg 267 block/bfq-cgroup.c void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } bfqg 284 block/bfq-cgroup.c struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) bfqg 286 block/bfq-cgroup.c return pd_to_blkg(&bfqg->pd); bfqg 301 block/bfq-cgroup.c static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) bfqg 303 block/bfq-cgroup.c struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; bfqg 322 block/bfq-cgroup.c static void bfqg_get(struct bfq_group *bfqg) bfqg 324 block/bfq-cgroup.c bfqg->ref++; bfqg 327 block/bfq-cgroup.c static void bfqg_put(struct bfq_group *bfqg) bfqg 329 block/bfq-cgroup.c bfqg->ref--; bfqg 331 block/bfq-cgroup.c if (bfqg->ref == 0) bfqg 332 block/bfq-cgroup.c kfree(bfqg); bfqg 335 block/bfq-cgroup.c void bfqg_and_blkg_get(struct bfq_group *bfqg) bfqg 338 block/bfq-cgroup.c bfqg_get(bfqg); bfqg 340 block/bfq-cgroup.c blkg_get(bfqg_to_blkg(bfqg)); bfqg 343 block/bfq-cgroup.c void bfqg_and_blkg_put(struct bfq_group *bfqg) bfqg 345 block/bfq-cgroup.c blkg_put(bfqg_to_blkg(bfqg)); bfqg 347 block/bfq-cgroup.c bfqg_put(bfqg); bfqg 395 block/bfq-cgroup.c static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) bfqg 399 block/bfq-cgroup.c if (!bfqg) /* root_group */ bfqg 402 block/bfq-cgroup.c parent = bfqg_parent(bfqg); bfqg 404 block/bfq-cgroup.c lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); bfqg 409 block/bfq-cgroup.c bfqg_stats_add_aux(&parent->stats, &bfqg->stats); bfqg 410 block/bfq-cgroup.c bfqg_stats_reset(&bfqg->stats); bfqg 413 block/bfq-cgroup.c void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) bfqg 426 block/bfq-cgroup.c bfqg_and_blkg_get(bfqg); bfqg 428 block/bfq-cgroup.c entity->parent = bfqg->my_entity; /* NULL for root group */ bfqg 429 block/bfq-cgroup.c entity->sched_data = &bfqg->sched_data; bfqg 507 block/bfq-cgroup.c struct bfq_group *bfqg; bfqg 509 block/bfq-cgroup.c bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node); bfqg 510 block/bfq-cgroup.c if (!bfqg) bfqg 513 block/bfq-cgroup.c if (bfqg_stats_init(&bfqg->stats, gfp)) { bfqg 514 block/bfq-cgroup.c kfree(bfqg); bfqg 519 block/bfq-cgroup.c bfqg_get(bfqg); bfqg 520 block/bfq-cgroup.c return &bfqg->pd; bfqg 526 block/bfq-cgroup.c struct bfq_group *bfqg = blkg_to_bfqg(blkg); bfqg 528 block/bfq-cgroup.c struct bfq_entity *entity = &bfqg->entity; bfqg 532 block/bfq-cgroup.c entity->my_sched_data = &bfqg->sched_data; bfqg 533 block/bfq-cgroup.c bfqg->my_entity = entity; /* bfqg 537 block/bfq-cgroup.c bfqg->bfqd = bfqd; bfqg 538 block/bfq-cgroup.c bfqg->active_entities = 0; bfqg 539 block/bfq-cgroup.c bfqg->rq_pos_tree = RB_ROOT; bfqg 544 block/bfq-cgroup.c struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg 546 block/bfq-cgroup.c bfqg_stats_exit(&bfqg->stats); bfqg 547 block/bfq-cgroup.c bfqg_put(bfqg); bfqg 552 block/bfq-cgroup.c struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg 554 block/bfq-cgroup.c bfqg_stats_reset(&bfqg->stats); bfqg 557 block/bfq-cgroup.c static void bfq_group_set_parent(struct bfq_group *bfqg, bfqg 562 block/bfq-cgroup.c entity = &bfqg->entity; bfqg 581 block/bfq-cgroup.c struct bfq_group *bfqg, *parent; bfqg 584 block/bfq-cgroup.c bfqg = bfq_lookup_bfqg(bfqd, blkcg); bfqg 586 block/bfq-cgroup.c if (unlikely(!bfqg)) bfqg 594 block/bfq-cgroup.c entity = &bfqg->entity; bfqg 606 block/bfq-cgroup.c return bfqg; bfqg 624 block/bfq-cgroup.c struct bfq_group *bfqg) bfqg 650 block/bfq-cgroup.c entity->parent = bfqg->my_entity; bfqg 651 block/bfq-cgroup.c entity->sched_data = &bfqg->sched_data; bfqg 653 block/bfq-cgroup.c bfqg_and_blkg_get(bfqg); bfqg 687 block/bfq-cgroup.c struct bfq_group *bfqg; bfqg 690 block/bfq-cgroup.c bfqg = bfq_find_set_group(bfqd, blkcg); bfqg 692 block/bfq-cgroup.c if (unlikely(!bfqg)) bfqg 693 block/bfq-cgroup.c bfqg = bfqd->root_group; bfqg 698 block/bfq-cgroup.c if (entity->sched_data != &bfqg->sched_data) { bfqg 706 block/bfq-cgroup.c if (entity->sched_data != &bfqg->sched_data) bfqg 707 block/bfq-cgroup.c bfq_bfqq_move(bfqd, sync_bfqq, bfqg); bfqg 710 block/bfq-cgroup.c return bfqg; bfqg 716 block/bfq-cgroup.c struct bfq_group *bfqg = NULL; bfqg 729 block/bfq-cgroup.c bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); bfqg 780 block/bfq-cgroup.c blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); bfqg 834 block/bfq-cgroup.c struct bfq_group *bfqg, bfqg 844 block/bfq-cgroup.c if (bfqg->sched_data.in_service_entity) bfqg 846 block/bfq-cgroup.c bfqg->sched_data.in_service_entity, bfqg 861 block/bfq-cgroup.c struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg 862 block/bfq-cgroup.c struct bfq_data *bfqd = bfqg->bfqd; bfqg 863 block/bfq-cgroup.c struct bfq_entity *entity = bfqg->my_entity; bfqg 877 block/bfq-cgroup.c st = bfqg->sched_data.service_tree + i; bfqg 891 block/bfq-cgroup.c bfq_reparent_active_queues(bfqd, bfqg, st, i); bfqg 910 block/bfq-cgroup.c bfq_put_async_queues(bfqd, bfqg); bfqg 919 block/bfq-cgroup.c bfqg_stats_xfer_dead(bfqg); bfqg 927 block/bfq-cgroup.c struct bfq_group *bfqg = blkg_to_bfqg(blkg); bfqg 929 block/bfq-cgroup.c bfq_end_wr_async_queues(bfqd, bfqg); bfqg 951 block/bfq-cgroup.c struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg 953 block/bfq-cgroup.c if (!bfqg->entity.dev_weight) bfqg 955 block/bfq-cgroup.c return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight); bfqg 969 block/bfq-cgroup.c static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight) bfqg 973 block/bfq-cgroup.c bfqg->entity.dev_weight = dev_weight; bfqg 980 block/bfq-cgroup.c if ((unsigned short)weight != bfqg->entity.new_weight) { bfqg 981 block/bfq-cgroup.c bfqg->entity.new_weight = (unsigned short)weight; bfqg 998 block/bfq-cgroup.c bfqg->entity.prio_changed = 1; bfqg 1018 block/bfq-cgroup.c struct bfq_group *bfqg = blkg_to_bfqg(blkg); bfqg 1020 block/bfq-cgroup.c if (bfqg) bfqg 1021 block/bfq-cgroup.c bfq_group_set_weight(bfqg, val, 0); bfqg 1035 block/bfq-cgroup.c struct bfq_group *bfqg; bfqg 1054 block/bfq-cgroup.c bfqg = blkg_to_bfqg(ctx.blkg); bfqg 1058 block/bfq-cgroup.c bfq_group_set_weight(bfqg, bfqg->entity.weight, v); bfqg 1189 block/bfq-cgroup.c struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg 1190 block/bfq-cgroup.c u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples); bfqg 1194 block/bfq-cgroup.c v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum); bfqg 1377 block/bfq-cgroup.c struct bfq_group *bfqg) {} bfqg 1379 block/bfq-cgroup.c void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) bfqg 1389 block/bfq-cgroup.c entity->sched_data = &bfqg->sched_data; bfqg 1409 block/bfq-cgroup.c void bfqg_and_blkg_get(struct bfq_group *bfqg) {} bfqg 1411 block/bfq-cgroup.c void bfqg_and_blkg_put(struct bfq_group *bfqg) {} bfqg 1415 block/bfq-cgroup.c struct bfq_group *bfqg; bfqg 1418 block/bfq-cgroup.c bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); bfqg 1419 block/bfq-cgroup.c if (!bfqg) bfqg 1423 block/bfq-cgroup.c bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; bfqg 1425 block/bfq-cgroup.c return bfqg; bfqg 2368 block/bfq-iosched.c struct bfq_group *bfqg) bfqg 2374 block/bfq-iosched.c if (bfqg->async_bfqq[i][j]) bfqg 2375 block/bfq-iosched.c bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); bfqg 2376 block/bfq-iosched.c if (bfqg->async_idle_bfqq) bfqg 2377 block/bfq-iosched.c bfq_bfqq_end_wr(bfqg->async_idle_bfqq); bfqg 4776 block/bfq-iosched.c struct bfq_group *bfqg = bfqq_group(bfqq); bfqg 4778 block/bfq-iosched.c bfqg_stats_update_avg_queue_size(bfqg); bfqg 4779 block/bfq-iosched.c bfqg_stats_set_start_empty_time(bfqg); bfqg 4780 block/bfq-iosched.c bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); bfqg 4827 block/bfq-iosched.c struct bfq_group *bfqg = bfqq_group(bfqq); bfqg 4900 block/bfq-iosched.c bfqg_and_blkg_put(bfqg); bfqg 5106 block/bfq-iosched.c struct bfq_group *bfqg, bfqg 5111 block/bfq-iosched.c return &bfqg->async_bfqq[0][ioprio]; bfqg 5116 block/bfq-iosched.c return &bfqg->async_bfqq[1][ioprio]; bfqg 5118 block/bfq-iosched.c return &bfqg->async_idle_bfqq; bfqg 5132 block/bfq-iosched.c struct bfq_group *bfqg; bfqg 5136 block/bfq-iosched.c bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio)); bfqg 5137 block/bfq-iosched.c if (!bfqg) { bfqg 5143 block/bfq-iosched.c async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, bfqg 5157 block/bfq-iosched.c bfq_init_entity(&bfqq->entity, bfqg); bfqg 6304 block/bfq-iosched.c void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) bfqg 6310 block/bfq-iosched.c __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); bfqg 6312 block/bfq-iosched.c __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); bfqg 952 block/bfq-iosched.h void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); bfqg 955 block/bfq-iosched.h void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); bfqg 961 block/bfq-iosched.h void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, bfqg 963 block/bfq-iosched.h void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op); bfqg 964 block/bfq-iosched.h void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op); bfqg 965 block/bfq-iosched.h void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, bfqg 967 block/bfq-iosched.h void bfqg_stats_update_dequeue(struct bfq_group *bfqg); bfqg 968 block/bfq-iosched.h void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); bfqg 969 block/bfq-iosched.h void bfqg_stats_update_idle_time(struct bfq_group *bfqg); bfqg 970 block/bfq-iosched.h void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg); bfqg 971 block/bfq-iosched.h void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg); bfqg 973 block/bfq-iosched.h struct bfq_group *bfqg); bfqg 975 block/bfq-iosched.h void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg); bfqg 980 block/bfq-iosched.h struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); bfqg 983 block/bfq-iosched.h void bfqg_and_blkg_get(struct bfq_group *bfqg); bfqg 984 block/bfq-iosched.h void bfqg_and_blkg_put(struct bfq_group *bfqg); bfqg 1075 block/bfq-iosched.h #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ bfqg 1077 block/bfq-iosched.h bfqg_to_blkg(bfqg)->blkcg, fmt, ##args); \ bfqg 1089 block/bfq-iosched.h #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) bfqg 165 block/bfq-wf2q.c struct bfq_group *bfqg; bfqg 171 block/bfq-wf2q.c bfqg = container_of(group_sd, struct bfq_group, sched_data); bfqg 177 block/bfq-wf2q.c bfqg_entity = bfqg->my_entity; bfqg 209 block/bfq-wf2q.c struct bfq_group *bfqg; bfqg 214 block/bfq-wf2q.c bfqg = container_of(entity, struct bfq_group, entity); bfqg 228 block/bfq-wf2q.c if (bfqg->active_entities == 1) bfqg 482 block/bfq-wf2q.c struct bfq_group *bfqg = NULL; bfqg 497 block/bfq-wf2q.c bfqg = container_of(sd, struct bfq_group, sched_data); bfqg 498 block/bfq-wf2q.c bfqd = (struct bfq_data *)bfqg->bfqd; bfqg 503 block/bfq-wf2q.c if (bfqg != bfqd->root_group) bfqg 504 block/bfq-wf2q.c bfqg->active_entities++; bfqg 586 block/bfq-wf2q.c struct bfq_group *bfqg = NULL; bfqg 598 block/bfq-wf2q.c bfqg = container_of(sd, struct bfq_group, sched_data); bfqg 599 block/bfq-wf2q.c bfqd = (struct bfq_data *)bfqg->bfqd; bfqg 604 block/bfq-wf2q.c if (bfqg != bfqd->root_group) bfqg 605 block/bfq-wf2q.c bfqg->active_entities--; bfqg 742 block/bfq-wf2q.c struct bfq_group *bfqg; bfqg 750 block/bfq-wf2q.c bfqg = container_of(sd, struct bfq_group, sched_data); bfqg 751 block/bfq-wf2q.c bfqd = (struct bfq_data *)bfqg->bfqd; bfqg 1018 block/bfq-wf2q.c struct bfq_group *bfqg = bfqg 1020 block/bfq-wf2q.c struct bfq_data *bfqd = bfqg->bfqd;