Lines Matching refs:rw
329 int rw; in throtl_pd_alloc() local
337 for (rw = READ; rw <= WRITE; rw++) { in throtl_pd_alloc()
338 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
339 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
385 int rw; in tg_update_has_rules() local
387 for (rw = READ; rw <= WRITE; rw++) in tg_update_has_rules()
388 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || in tg_update_has_rules()
389 (tg->bps[rw] != -1 || tg->iops[rw] != -1); in tg_update_has_rules()
551 bool rw, unsigned long start) in throtl_start_new_slice_with_credit() argument
553 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
554 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
562 if (time_after_eq(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
563 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
565 tg->slice_end[rw] = jiffies + throtl_slice; in throtl_start_new_slice_with_credit()
568 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
569 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
572 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) in throtl_start_new_slice() argument
574 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
575 tg->io_disp[rw] = 0; in throtl_start_new_slice()
576 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
577 tg->slice_end[rw] = jiffies + throtl_slice; in throtl_start_new_slice()
580 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
581 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
584 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
587 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); in throtl_set_slice_end()
590 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
593 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); in throtl_extend_slice()
596 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
597 tg->slice_end[rw], jiffies); in throtl_extend_slice()
601 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
603 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
610 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
615 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
622 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
633 throtl_set_slice_end(tg, rw, jiffies + throtl_slice); in throtl_trim_slice()
635 time_elapsed = jiffies - tg->slice_start[rw]; in throtl_trim_slice()
641 tmp = tg->bps[rw] * throtl_slice * nr_slices; in throtl_trim_slice()
645 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ; in throtl_trim_slice()
650 if (tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
651 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
653 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
655 if (tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
656 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
658 tg->io_disp[rw] = 0; in throtl_trim_slice()
660 tg->slice_start[rw] += nr_slices * throtl_slice; in throtl_trim_slice()
664 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, in throtl_trim_slice()
665 tg->slice_start[rw], tg->slice_end[rw], jiffies); in throtl_trim_slice()
671 bool rw = bio_data_dir(bio); in tg_with_in_iops_limit() local
676 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_iops_limit()
691 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd; in tg_with_in_iops_limit()
699 if (tg->io_disp[rw] + 1 <= io_allowed) { in tg_with_in_iops_limit()
706 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1; in tg_with_in_iops_limit()
721 bool rw = bio_data_dir(bio); in tg_with_in_bps_limit() local
725 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_bps_limit()
733 tmp = tg->bps[rw] * jiffy_elapsed_rnd; in tg_with_in_bps_limit()
737 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { in tg_with_in_bps_limit()
744 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; in tg_with_in_bps_limit()
745 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); in tg_with_in_bps_limit()
767 bool rw = bio_data_dir(bio); in tg_may_dispatch() local
776 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
777 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
780 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { in tg_may_dispatch()
791 if (throtl_slice_used(tg, rw)) in tg_may_dispatch()
792 throtl_start_new_slice(tg, rw); in tg_may_dispatch()
794 if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) in tg_may_dispatch()
795 throtl_extend_slice(tg, rw, jiffies + throtl_slice); in tg_may_dispatch()
810 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
811 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
818 bool rw = bio_data_dir(bio); in throtl_charge_bio() local
821 tg->bytes_disp[rw] += bio->bi_iter.bi_size; in throtl_charge_bio()
822 tg->io_disp[rw]++; in throtl_charge_bio()
847 bool rw = bio_data_dir(bio); in throtl_add_bio_tg() local
850 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
858 if (!sq->nr_queued[rw]) in throtl_add_bio_tg()
861 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); in throtl_add_bio_tg()
863 sq->nr_queued[rw]++; in throtl_add_bio_tg()
892 struct throtl_grp *parent_tg, bool rw) in start_parent_slice_with_credit() argument
894 if (throtl_slice_used(parent_tg, rw)) { in start_parent_slice_with_credit()
895 throtl_start_new_slice_with_credit(parent_tg, rw, in start_parent_slice_with_credit()
896 child_tg->slice_start[rw]); in start_parent_slice_with_credit()
901 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
915 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); in tg_dispatch_one_bio()
916 sq->nr_queued[rw]--; in tg_dispatch_one_bio()
928 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
929 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
931 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
932 &parent_sq->queued[rw]); in tg_dispatch_one_bio()
933 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
934 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
937 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
1093 int rw; in blk_throtl_dispatch_work_fn() local
1098 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_dispatch_work_fn()
1099 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) in blk_throtl_dispatch_work_fn()
1402 bool rw = bio_data_dir(bio); in blk_throtl_bio() local
1408 if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw]) in blk_throtl_bio()
1420 if (sq->nr_queued[rw]) in blk_throtl_bio()
1441 throtl_trim_slice(tg, rw); in blk_throtl_bio()
1448 qn = &tg->qnode_on_parent[rw]; in blk_throtl_bio()
1457 rw == READ ? 'R' : 'W', in blk_throtl_bio()
1458 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], in blk_throtl_bio()
1459 tg->io_disp[rw], tg->iops[rw], in blk_throtl_bio()
1463 tg->td->nr_queued[rw]++; in blk_throtl_bio()
1526 int rw; in blk_throtl_drain() local
1547 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_drain()
1548 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], in blk_throtl_drain()