Lines Matching refs:bio
250 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, in throtl_qnode_add_bio() argument
253 bio_list_add(&qn->bios, bio); in throtl_qnode_add_bio()
264 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued()
267 struct bio *bio; in throtl_peek_queued() local
272 bio = bio_list_peek(&qn->bios); in throtl_peek_queued()
273 WARN_ON_ONCE(!bio); in throtl_peek_queued()
274 return bio; in throtl_peek_queued()
291 static struct bio *throtl_pop_queued(struct list_head *queued, in throtl_pop_queued()
295 struct bio *bio; in throtl_pop_queued() local
300 bio = bio_list_pop(&qn->bios); in throtl_pop_queued()
301 WARN_ON_ONCE(!bio); in throtl_pop_queued()
313 return bio; in throtl_pop_queued()
668 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_iops_limit() argument
671 bool rw = bio_data_dir(bio); in tg_with_in_iops_limit()
718 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_bps_limit() argument
721 bool rw = bio_data_dir(bio); in tg_with_in_bps_limit()
737 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { in tg_with_in_bps_limit()
744 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; in tg_with_in_bps_limit()
764 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, in tg_may_dispatch() argument
767 bool rw = bio_data_dir(bio); in tg_may_dispatch()
777 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
798 if (tg_with_in_bps_limit(tg, bio, &bps_wait) && in tg_may_dispatch()
799 tg_with_in_iops_limit(tg, bio, &iops_wait)) { in tg_may_dispatch()
816 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bio() argument
818 bool rw = bio_data_dir(bio); in throtl_charge_bio()
821 tg->bytes_disp[rw] += bio->bi_iter.bi_size; in throtl_charge_bio()
830 if (!(bio->bi_rw & REQ_THROTTLED)) in throtl_charge_bio()
831 bio->bi_rw |= REQ_THROTTLED; in throtl_charge_bio()
843 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, in throtl_add_bio_tg() argument
847 bool rw = bio_data_dir(bio); in throtl_add_bio_tg()
861 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); in throtl_add_bio_tg()
871 struct bio *bio; in tg_update_disptime() local
873 if ((bio = throtl_peek_queued(&sq->queued[READ]))) in tg_update_disptime()
874 tg_may_dispatch(tg, bio, &read_wait); in tg_update_disptime()
876 if ((bio = throtl_peek_queued(&sq->queued[WRITE]))) in tg_update_disptime()
877 tg_may_dispatch(tg, bio, &write_wait); in tg_update_disptime()
907 struct bio *bio; in tg_dispatch_one_bio() local
915 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); in tg_dispatch_one_bio()
918 throtl_charge_bio(tg, bio); in tg_dispatch_one_bio()
928 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
931 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
949 struct bio *bio; in throtl_dispatch_tg() local
953 while ((bio = throtl_peek_queued(&sq->queued[READ])) && in throtl_dispatch_tg()
954 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
956 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
963 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && in throtl_dispatch_tg()
964 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
966 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1091 struct bio *bio; in blk_throtl_dispatch_work_fn() local
1099 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) in blk_throtl_dispatch_work_fn()
1100 bio_list_add(&bio_list_on_stack, bio); in blk_throtl_dispatch_work_fn()
1105 while((bio = bio_list_pop(&bio_list_on_stack))) in blk_throtl_dispatch_work_fn()
1106 generic_make_request(bio); in blk_throtl_dispatch_work_fn()
1397 struct bio *bio) in blk_throtl_bio() argument
1402 bool rw = bio_data_dir(bio); in blk_throtl_bio()
1408 if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw]) in blk_throtl_bio()
1424 if (!tg_may_dispatch(tg, bio, NULL)) in blk_throtl_bio()
1428 throtl_charge_bio(tg, bio); in blk_throtl_bio()
1458 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], in blk_throtl_bio()
1462 bio_associate_current(bio); in blk_throtl_bio()
1464 throtl_add_bio_tg(bio, qn, tg); in blk_throtl_bio()
1487 bio->bi_rw &= ~REQ_THROTTLED; in blk_throtl_bio()
1502 struct bio *bio; in tg_drain_bios() local
1506 while ((bio = throtl_peek_queued(&sq->queued[READ]))) in tg_drain_bios()
1507 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()
1508 while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) in tg_drain_bios()
1509 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()
1525 struct bio *bio; in blk_throtl_drain() local
1548 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], in blk_throtl_drain()
1550 generic_make_request(bio); in blk_throtl_drain()