Lines Matching refs:tg

55 	struct throtl_grp	*tg;		/* tg this qnode belongs to */  member
169 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) in tg_to_blkg() argument
171 return pd_to_blkg(&tg->pd); in tg_to_blkg()
198 struct throtl_grp *tg = sq_to_tg(sq); in sq_to_td() local
200 if (tg) in sq_to_td()
201 return tg->td; in sq_to_td()
233 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) in throtl_qnode_init() argument
237 qn->tg = tg; in throtl_qnode_init()
256 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
306 *tg_to_put = qn->tg; in throtl_pop_queued()
308 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
328 struct throtl_grp *tg; in throtl_pd_alloc() local
331 tg = kzalloc_node(sizeof(*tg), gfp, node); in throtl_pd_alloc()
332 if (!tg) in throtl_pd_alloc()
335 throtl_service_queue_init(&tg->service_queue); in throtl_pd_alloc()
338 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
339 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
342 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_alloc()
343 tg->bps[READ] = -1; in throtl_pd_alloc()
344 tg->bps[WRITE] = -1; in throtl_pd_alloc()
345 tg->iops[READ] = -1; in throtl_pd_alloc()
346 tg->iops[WRITE] = -1; in throtl_pd_alloc()
348 return &tg->pd; in throtl_pd_alloc()
353 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_init() local
354 struct blkcg_gq *blkg = tg_to_blkg(tg); in throtl_pd_init()
356 struct throtl_service_queue *sq = &tg->service_queue; in throtl_pd_init()
374 tg->td = td; in throtl_pd_init()
382 static void tg_update_has_rules(struct throtl_grp *tg) in tg_update_has_rules() argument
384 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
388 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || in tg_update_has_rules()
389 (tg->bps[rw] != -1 || tg->iops[rw] != -1); in tg_update_has_rules()
403 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_free() local
405 del_timer_sync(&tg->service_queue.pending_timer); in throtl_pd_free()
406 kfree(tg); in throtl_pd_free()
442 struct throtl_grp *tg; in update_min_dispatch_time() local
444 tg = throtl_rb_first(parent_sq); in update_min_dispatch_time()
445 if (!tg) in update_min_dispatch_time()
448 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
451 static void tg_service_queue_add(struct throtl_grp *tg) in tg_service_queue_add() argument
453 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
457 unsigned long key = tg->disptime; in tg_service_queue_add()
473 parent_sq->first_pending = &tg->rb_node; in tg_service_queue_add()
475 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
476 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); in tg_service_queue_add()
479 static void __throtl_enqueue_tg(struct throtl_grp *tg) in __throtl_enqueue_tg() argument
481 tg_service_queue_add(tg); in __throtl_enqueue_tg()
482 tg->flags |= THROTL_TG_PENDING; in __throtl_enqueue_tg()
483 tg->service_queue.parent_sq->nr_pending++; in __throtl_enqueue_tg()
486 static void throtl_enqueue_tg(struct throtl_grp *tg) in throtl_enqueue_tg() argument
488 if (!(tg->flags & THROTL_TG_PENDING)) in throtl_enqueue_tg()
489 __throtl_enqueue_tg(tg); in throtl_enqueue_tg()
492 static void __throtl_dequeue_tg(struct throtl_grp *tg) in __throtl_dequeue_tg() argument
494 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in __throtl_dequeue_tg()
495 tg->flags &= ~THROTL_TG_PENDING; in __throtl_dequeue_tg()
498 static void throtl_dequeue_tg(struct throtl_grp *tg) in throtl_dequeue_tg() argument
500 if (tg->flags & THROTL_TG_PENDING) in throtl_dequeue_tg()
501 __throtl_dequeue_tg(tg); in throtl_dequeue_tg()
550 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, in throtl_start_new_slice_with_credit() argument
553 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
554 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
562 if (time_after_eq(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
563 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
565 tg->slice_end[rw] = jiffies + throtl_slice; in throtl_start_new_slice_with_credit()
566 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
568 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
569 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
572 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) in throtl_start_new_slice() argument
574 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
575 tg->io_disp[rw] = 0; in throtl_start_new_slice()
576 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
577 tg->slice_end[rw] = jiffies + throtl_slice; in throtl_start_new_slice()
578 throtl_log(&tg->service_queue, in throtl_start_new_slice()
580 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
581 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
584 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
587 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); in throtl_set_slice_end()
590 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
593 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); in throtl_extend_slice()
594 throtl_log(&tg->service_queue, in throtl_extend_slice()
596 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
597 tg->slice_end[rw], jiffies); in throtl_extend_slice()
601 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
603 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
610 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
615 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
622 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
633 throtl_set_slice_end(tg, rw, jiffies + throtl_slice); in throtl_trim_slice()
635 time_elapsed = jiffies - tg->slice_start[rw]; in throtl_trim_slice()
641 tmp = tg->bps[rw] * throtl_slice * nr_slices; in throtl_trim_slice()
645 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ; in throtl_trim_slice()
650 if (tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
651 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
653 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
655 if (tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
656 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
658 tg->io_disp[rw] = 0; in throtl_trim_slice()
660 tg->slice_start[rw] += nr_slices * throtl_slice; in throtl_trim_slice()
662 throtl_log(&tg->service_queue, in throtl_trim_slice()
665 tg->slice_start[rw], tg->slice_end[rw], jiffies); in throtl_trim_slice()
668 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_iops_limit() argument
676 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_iops_limit()
691 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd; in tg_with_in_iops_limit()
699 if (tg->io_disp[rw] + 1 <= io_allowed) { in tg_with_in_iops_limit()
706 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1; in tg_with_in_iops_limit()
718 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_bps_limit() argument
725 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_bps_limit()
733 tmp = tg->bps[rw] * jiffy_elapsed_rnd; in tg_with_in_bps_limit()
737 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { in tg_with_in_bps_limit()
744 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; in tg_with_in_bps_limit()
745 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); in tg_with_in_bps_limit()
764 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, in tg_may_dispatch() argument
776 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
777 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
780 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { in tg_may_dispatch()
791 if (throtl_slice_used(tg, rw)) in tg_may_dispatch()
792 throtl_start_new_slice(tg, rw); in tg_may_dispatch()
794 if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) in tg_may_dispatch()
795 throtl_extend_slice(tg, rw, jiffies + throtl_slice); in tg_may_dispatch()
798 if (tg_with_in_bps_limit(tg, bio, &bps_wait) && in tg_may_dispatch()
799 tg_with_in_iops_limit(tg, bio, &iops_wait)) { in tg_may_dispatch()
810 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
811 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
816 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bio() argument
821 tg->bytes_disp[rw] += bio->bi_iter.bi_size; in throtl_charge_bio()
822 tg->io_disp[rw]++; in throtl_charge_bio()
844 struct throtl_grp *tg) in throtl_add_bio_tg() argument
846 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
850 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
859 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
864 throtl_enqueue_tg(tg); in throtl_add_bio_tg()
867 static void tg_update_disptime(struct throtl_grp *tg) in tg_update_disptime() argument
869 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
874 tg_may_dispatch(tg, bio, &read_wait); in tg_update_disptime()
877 tg_may_dispatch(tg, bio, &write_wait); in tg_update_disptime()
883 throtl_dequeue_tg(tg); in tg_update_disptime()
884 tg->disptime = disptime; in tg_update_disptime()
885 throtl_enqueue_tg(tg); in tg_update_disptime()
888 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
901 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
903 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
918 throtl_charge_bio(tg, bio); in tg_dispatch_one_bio()
928 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
929 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
931 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
933 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
934 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
937 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
943 static int throtl_dispatch_tg(struct throtl_grp *tg) in throtl_dispatch_tg() argument
945 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
954 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
956 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
964 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
966 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
981 struct throtl_grp *tg = throtl_rb_first(parent_sq); in throtl_select_dispatch() local
982 struct throtl_service_queue *sq = &tg->service_queue; in throtl_select_dispatch()
984 if (!tg) in throtl_select_dispatch()
987 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
990 throtl_dequeue_tg(tg); in throtl_select_dispatch()
992 nr_disp += throtl_dispatch_tg(tg); in throtl_select_dispatch()
995 tg_update_disptime(tg); in throtl_select_dispatch()
1022 struct throtl_grp *tg = sq_to_tg(sq); in throtl_pending_timer_fn() local
1059 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn()
1060 tg_update_disptime(tg); in throtl_pending_timer_fn()
1064 tg = sq_to_tg(sq); in throtl_pending_timer_fn()
1114 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_u64() local
1115 u64 v = *(u64 *)((void *)tg + off); in tg_prfill_conf_u64()
1125 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_uint() local
1126 unsigned int v = *(unsigned int *)((void *)tg + off); in tg_prfill_conf_uint()
1147 static void tg_conf_updated(struct throtl_grp *tg) in tg_conf_updated() argument
1149 struct throtl_service_queue *sq = &tg->service_queue; in tg_conf_updated()
1153 throtl_log(&tg->service_queue, in tg_conf_updated()
1155 tg->bps[READ], tg->bps[WRITE], in tg_conf_updated()
1156 tg->iops[READ], tg->iops[WRITE]); in tg_conf_updated()
1165 blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg)) in tg_conf_updated()
1176 throtl_start_new_slice(tg, 0); in tg_conf_updated()
1177 throtl_start_new_slice(tg, 1); in tg_conf_updated()
1179 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated()
1180 tg_update_disptime(tg); in tg_conf_updated()
1190 struct throtl_grp *tg; in tg_set_conf() local
1204 tg = blkg_to_tg(ctx.blkg); in tg_set_conf()
1207 *(u64 *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1209 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1211 tg_conf_updated(tg); in tg_set_conf()
1271 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_max() local
1277 if (tg->bps[READ] == -1 && tg->bps[WRITE] == -1 && in tg_prfill_max()
1278 tg->iops[READ] == -1 && tg->iops[WRITE] == -1) in tg_prfill_max()
1281 if (tg->bps[READ] != -1) in tg_prfill_max()
1282 snprintf(bufs[0], sizeof(bufs[0]), "%llu", tg->bps[READ]); in tg_prfill_max()
1283 if (tg->bps[WRITE] != -1) in tg_prfill_max()
1284 snprintf(bufs[1], sizeof(bufs[1]), "%llu", tg->bps[WRITE]); in tg_prfill_max()
1285 if (tg->iops[READ] != -1) in tg_prfill_max()
1286 snprintf(bufs[2], sizeof(bufs[2]), "%u", tg->iops[READ]); in tg_prfill_max()
1287 if (tg->iops[WRITE] != -1) in tg_prfill_max()
1288 snprintf(bufs[3], sizeof(bufs[3]), "%u", tg->iops[WRITE]); in tg_prfill_max()
1307 struct throtl_grp *tg; in tg_set_max() local
1315 tg = blkg_to_tg(ctx.blkg); in tg_set_max()
1317 v[0] = tg->bps[READ]; in tg_set_max()
1318 v[1] = tg->bps[WRITE]; in tg_set_max()
1319 v[2] = tg->iops[READ]; in tg_set_max()
1320 v[3] = tg->iops[WRITE]; in tg_set_max()
1357 tg->bps[READ] = v[0]; in tg_set_max()
1358 tg->bps[WRITE] = v[1]; in tg_set_max()
1359 tg->iops[READ] = v[2]; in tg_set_max()
1360 tg->iops[WRITE] = v[3]; in tg_set_max()
1362 tg_conf_updated(tg); in tg_set_max()
1400 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); in blk_throtl_bio() local
1408 if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw]) in blk_throtl_bio()
1416 sq = &tg->service_queue; in blk_throtl_bio()
1424 if (!tg_may_dispatch(tg, bio, NULL)) in blk_throtl_bio()
1428 throtl_charge_bio(tg, bio); in blk_throtl_bio()
1441 throtl_trim_slice(tg, rw); in blk_throtl_bio()
1448 qn = &tg->qnode_on_parent[rw]; in blk_throtl_bio()
1450 tg = sq_to_tg(sq); in blk_throtl_bio()
1451 if (!tg) in blk_throtl_bio()
1458 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], in blk_throtl_bio()
1459 tg->io_disp[rw], tg->iops[rw], in blk_throtl_bio()
1463 tg->td->nr_queued[rw]++; in blk_throtl_bio()
1464 throtl_add_bio_tg(bio, qn, tg); in blk_throtl_bio()
1473 if (tg->flags & THROTL_TG_WAS_EMPTY) { in blk_throtl_bio()
1474 tg_update_disptime(tg); in blk_throtl_bio()
1475 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in blk_throtl_bio()
1498 struct throtl_grp *tg; in tg_drain_bios() local
1500 while ((tg = throtl_rb_first(parent_sq))) { in tg_drain_bios()
1501 struct throtl_service_queue *sq = &tg->service_queue; in tg_drain_bios()
1504 throtl_dequeue_tg(tg); in tg_drain_bios()
1507 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()
1509 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()