Lines Matching refs:tg

55 	struct throtl_grp	*tg;		/* tg this qnode belongs to */  member
190 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) in tg_to_blkg() argument
192 return pd_to_blkg(&tg->pd); in tg_to_blkg()
224 struct throtl_grp *tg = sq_to_tg(sq); in sq_to_td() local
226 if (tg) in sq_to_td()
227 return tg->td; in sq_to_td()
293 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list, in tg_stats_alloc_fn() local
296 swap(tg->stats_cpu, stats_cpu); in tg_stats_alloc_fn()
297 list_del_init(&tg->stats_alloc_node); in tg_stats_alloc_fn()
306 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) in throtl_qnode_init() argument
310 qn->tg = tg; in throtl_qnode_init()
329 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
379 *tg_to_put = qn->tg; in throtl_pop_queued()
381 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
408 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_pd_init() local
432 throtl_service_queue_init(&tg->service_queue, parent_sq); in throtl_pd_init()
435 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_init()
436 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_init()
439 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_init()
440 tg->td = td; in throtl_pd_init()
442 tg->bps[READ] = -1; in throtl_pd_init()
443 tg->bps[WRITE] = -1; in throtl_pd_init()
444 tg->iops[READ] = -1; in throtl_pd_init()
445 tg->iops[WRITE] = -1; in throtl_pd_init()
453 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); in throtl_pd_init()
463 static void tg_update_has_rules(struct throtl_grp *tg) in tg_update_has_rules() argument
465 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
469 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || in tg_update_has_rules()
470 (tg->bps[rw] != -1 || tg->iops[rw] != -1); in tg_update_has_rules()
484 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_pd_exit() local
488 list_del_init(&tg->stats_alloc_node); in throtl_pd_exit()
491 free_percpu(tg->stats_cpu); in throtl_pd_exit()
493 throtl_service_queue_exit(&tg->service_queue); in throtl_pd_exit()
498 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_pd_reset_stats() local
501 if (tg->stats_cpu == NULL) in throtl_pd_reset_stats()
505 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); in throtl_pd_reset_stats()
529 struct throtl_grp *tg = NULL; in throtl_lookup_create_tg() local
536 tg = td_root_tg(td); in throtl_lookup_create_tg()
544 tg = blkg_to_tg(blkg); in throtl_lookup_create_tg()
546 tg = td_root_tg(td); in throtl_lookup_create_tg()
549 return tg; in throtl_lookup_create_tg()
585 struct throtl_grp *tg; in update_min_dispatch_time() local
587 tg = throtl_rb_first(parent_sq); in update_min_dispatch_time()
588 if (!tg) in update_min_dispatch_time()
591 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
594 static void tg_service_queue_add(struct throtl_grp *tg) in tg_service_queue_add() argument
596 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
600 unsigned long key = tg->disptime; in tg_service_queue_add()
616 parent_sq->first_pending = &tg->rb_node; in tg_service_queue_add()
618 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
619 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); in tg_service_queue_add()
622 static void __throtl_enqueue_tg(struct throtl_grp *tg) in __throtl_enqueue_tg() argument
624 tg_service_queue_add(tg); in __throtl_enqueue_tg()
625 tg->flags |= THROTL_TG_PENDING; in __throtl_enqueue_tg()
626 tg->service_queue.parent_sq->nr_pending++; in __throtl_enqueue_tg()
629 static void throtl_enqueue_tg(struct throtl_grp *tg) in throtl_enqueue_tg() argument
631 if (!(tg->flags & THROTL_TG_PENDING)) in throtl_enqueue_tg()
632 __throtl_enqueue_tg(tg); in throtl_enqueue_tg()
635 static void __throtl_dequeue_tg(struct throtl_grp *tg) in __throtl_dequeue_tg() argument
637 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in __throtl_dequeue_tg()
638 tg->flags &= ~THROTL_TG_PENDING; in __throtl_dequeue_tg()
641 static void throtl_dequeue_tg(struct throtl_grp *tg) in throtl_dequeue_tg() argument
643 if (tg->flags & THROTL_TG_PENDING) in throtl_dequeue_tg()
644 __throtl_dequeue_tg(tg); in throtl_dequeue_tg()
693 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, in throtl_start_new_slice_with_credit() argument
696 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
697 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
705 if (time_after_eq(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
706 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
708 tg->slice_end[rw] = jiffies + throtl_slice; in throtl_start_new_slice_with_credit()
709 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
711 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
712 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
715 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) in throtl_start_new_slice() argument
717 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
718 tg->io_disp[rw] = 0; in throtl_start_new_slice()
719 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
720 tg->slice_end[rw] = jiffies + throtl_slice; in throtl_start_new_slice()
721 throtl_log(&tg->service_queue, in throtl_start_new_slice()
723 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
724 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
727 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
730 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); in throtl_set_slice_end()
733 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
736 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); in throtl_extend_slice()
737 throtl_log(&tg->service_queue, in throtl_extend_slice()
739 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
740 tg->slice_end[rw], jiffies); in throtl_extend_slice()
744 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
746 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
753 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
758 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
765 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
776 throtl_set_slice_end(tg, rw, jiffies + throtl_slice); in throtl_trim_slice()
778 time_elapsed = jiffies - tg->slice_start[rw]; in throtl_trim_slice()
784 tmp = tg->bps[rw] * throtl_slice * nr_slices; in throtl_trim_slice()
788 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ; in throtl_trim_slice()
793 if (tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
794 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
796 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
798 if (tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
799 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
801 tg->io_disp[rw] = 0; in throtl_trim_slice()
803 tg->slice_start[rw] += nr_slices * throtl_slice; in throtl_trim_slice()
805 throtl_log(&tg->service_queue, in throtl_trim_slice()
808 tg->slice_start[rw], tg->slice_end[rw], jiffies); in throtl_trim_slice()
811 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_iops_limit() argument
819 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_iops_limit()
834 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd; in tg_with_in_iops_limit()
842 if (tg->io_disp[rw] + 1 <= io_allowed) { in tg_with_in_iops_limit()
849 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1; in tg_with_in_iops_limit()
861 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_bps_limit() argument
868 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_bps_limit()
876 tmp = tg->bps[rw] * jiffy_elapsed_rnd; in tg_with_in_bps_limit()
880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { in tg_with_in_bps_limit()
887 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; in tg_with_in_bps_limit()
888 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); in tg_with_in_bps_limit()
907 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, in tg_may_dispatch() argument
919 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
920 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
923 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { in tg_may_dispatch()
934 if (throtl_slice_used(tg, rw)) in tg_may_dispatch()
935 throtl_start_new_slice(tg, rw); in tg_may_dispatch()
937 if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) in tg_may_dispatch()
938 throtl_extend_slice(tg, rw, jiffies + throtl_slice); in tg_may_dispatch()
941 if (tg_with_in_bps_limit(tg, bio, &bps_wait) && in tg_may_dispatch()
942 tg_with_in_iops_limit(tg, bio, &iops_wait)) { in tg_may_dispatch()
953 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
954 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
962 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_update_dispatch_stats() local
967 if (tg->stats_cpu == NULL) in throtl_update_dispatch_stats()
977 stats_cpu = this_cpu_ptr(tg->stats_cpu); in throtl_update_dispatch_stats()
985 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bio() argument
990 tg->bytes_disp[rw] += bio->bi_iter.bi_size; in throtl_charge_bio()
991 tg->io_disp[rw]++; in throtl_charge_bio()
1006 throtl_update_dispatch_stats(tg_to_blkg(tg), in throtl_charge_bio()
1021 struct throtl_grp *tg) in throtl_add_bio_tg() argument
1023 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
1027 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
1036 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
1041 throtl_enqueue_tg(tg); in throtl_add_bio_tg()
1044 static void tg_update_disptime(struct throtl_grp *tg) in tg_update_disptime() argument
1046 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
1051 tg_may_dispatch(tg, bio, &read_wait); in tg_update_disptime()
1054 tg_may_dispatch(tg, bio, &write_wait); in tg_update_disptime()
1060 throtl_dequeue_tg(tg); in tg_update_disptime()
1061 tg->disptime = disptime; in tg_update_disptime()
1062 throtl_enqueue_tg(tg); in tg_update_disptime()
1065 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
1078 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
1080 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
1095 throtl_charge_bio(tg, bio); in tg_dispatch_one_bio()
1105 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
1106 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
1108 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
1110 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1111 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1114 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
1120 static int throtl_dispatch_tg(struct throtl_grp *tg) in throtl_dispatch_tg() argument
1122 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
1131 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1133 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1141 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1143 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1158 struct throtl_grp *tg = throtl_rb_first(parent_sq); in throtl_select_dispatch() local
1159 struct throtl_service_queue *sq = &tg->service_queue; in throtl_select_dispatch()
1161 if (!tg) in throtl_select_dispatch()
1164 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
1167 throtl_dequeue_tg(tg); in throtl_select_dispatch()
1169 nr_disp += throtl_dispatch_tg(tg); in throtl_select_dispatch()
1172 tg_update_disptime(tg); in throtl_select_dispatch()
1199 struct throtl_grp *tg = sq_to_tg(sq); in throtl_pending_timer_fn() local
1236 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn()
1237 tg_update_disptime(tg); in throtl_pending_timer_fn()
1241 tg = sq_to_tg(sq); in throtl_pending_timer_fn()
1291 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_cpu_rwstat() local
1295 if (tg->stats_cpu == NULL) in tg_prfill_cpu_rwstat()
1299 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); in tg_prfill_cpu_rwstat()
1319 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_u64() local
1320 u64 v = *(u64 *)((void *)tg + off); in tg_prfill_conf_u64()
1330 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_uint() local
1331 unsigned int v = *(unsigned int *)((void *)tg + off); in tg_prfill_conf_uint()
1357 struct throtl_grp *tg; in tg_set_conf() local
1367 tg = blkg_to_tg(ctx.blkg); in tg_set_conf()
1368 sq = &tg->service_queue; in tg_set_conf()
1374 *(u64 *)((void *)tg + of_cft(of)->private) = ctx.v; in tg_set_conf()
1376 *(unsigned int *)((void *)tg + of_cft(of)->private) = ctx.v; in tg_set_conf()
1378 throtl_log(&tg->service_queue, in tg_set_conf()
1380 tg->bps[READ], tg->bps[WRITE], in tg_set_conf()
1381 tg->iops[READ], tg->iops[WRITE]); in tg_set_conf()
1401 throtl_start_new_slice(tg, 0); in tg_set_conf()
1402 throtl_start_new_slice(tg, 1); in tg_set_conf()
1404 if (tg->flags & THROTL_TG_PENDING) { in tg_set_conf()
1405 tg_update_disptime(tg); in tg_set_conf()
1484 struct throtl_grp *tg; in blk_throtl_bio() local
1501 tg = throtl_lookup_tg(td, blkcg); in blk_throtl_bio()
1502 if (tg) { in blk_throtl_bio()
1503 if (!tg->has_rules[rw]) { in blk_throtl_bio()
1504 throtl_update_dispatch_stats(tg_to_blkg(tg), in blk_throtl_bio()
1515 tg = throtl_lookup_create_tg(td, blkcg); in blk_throtl_bio()
1516 if (unlikely(!tg)) in blk_throtl_bio()
1519 sq = &tg->service_queue; in blk_throtl_bio()
1527 if (!tg_may_dispatch(tg, bio, NULL)) in blk_throtl_bio()
1531 throtl_charge_bio(tg, bio); in blk_throtl_bio()
1544 throtl_trim_slice(tg, rw); in blk_throtl_bio()
1551 qn = &tg->qnode_on_parent[rw]; in blk_throtl_bio()
1553 tg = sq_to_tg(sq); in blk_throtl_bio()
1554 if (!tg) in blk_throtl_bio()
1561 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], in blk_throtl_bio()
1562 tg->io_disp[rw], tg->iops[rw], in blk_throtl_bio()
1566 tg->td->nr_queued[rw]++; in blk_throtl_bio()
1567 throtl_add_bio_tg(bio, qn, tg); in blk_throtl_bio()
1576 if (tg->flags & THROTL_TG_WAS_EMPTY) { in blk_throtl_bio()
1577 tg_update_disptime(tg); in blk_throtl_bio()
1578 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in blk_throtl_bio()
1603 struct throtl_grp *tg; in tg_drain_bios() local
1605 while ((tg = throtl_rb_first(parent_sq))) { in tg_drain_bios()
1606 struct throtl_service_queue *sq = &tg->service_queue; in tg_drain_bios()
1609 throtl_dequeue_tg(tg); in tg_drain_bios()
1612 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()
1614 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()