Lines Matching refs:rl

66 static void blk_clear_congested(struct request_list *rl, int sync)  in blk_clear_congested()  argument
69 clear_wb_congested(rl->blkg->wb_congested, sync); in blk_clear_congested()
75 if (rl == &rl->q->root_rl) in blk_clear_congested()
76 clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); in blk_clear_congested()
80 static void blk_set_congested(struct request_list *rl, int sync) in blk_set_congested() argument
83 set_wb_congested(rl->blkg->wb_congested, sync); in blk_set_congested()
86 if (rl == &rl->q->root_rl) in blk_set_congested()
87 set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); in blk_set_congested()
459 struct request_list *rl; in __blk_drain_queue() local
461 blk_queue_for_each_rl(rl, q) in __blk_drain_queue()
462 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) in __blk_drain_queue()
463 wake_up_all(&rl->wait[i]); in __blk_drain_queue()
523 struct request_list *rl; in blk_set_queue_dying() local
525 blk_queue_for_each_rl(rl, q) { in blk_set_queue_dying()
526 if (rl->rq_pool) { in blk_set_queue_dying()
527 wake_up(&rl->wait[BLK_RW_SYNC]); in blk_set_queue_dying()
528 wake_up(&rl->wait[BLK_RW_ASYNC]); in blk_set_queue_dying()
615 int blk_init_rl(struct request_list *rl, struct request_queue *q, in blk_init_rl() argument
618 if (unlikely(rl->rq_pool)) in blk_init_rl()
621 rl->q = q; in blk_init_rl()
622 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; in blk_init_rl()
623 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; in blk_init_rl()
624 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); in blk_init_rl()
625 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); in blk_init_rl()
627 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, in blk_init_rl()
631 if (!rl->rq_pool) in blk_init_rl()
637 void blk_exit_rl(struct request_list *rl) in blk_exit_rl() argument
639 if (rl->rq_pool) in blk_exit_rl()
640 mempool_destroy(rl->rq_pool); in blk_exit_rl()
890 static inline void blk_free_request(struct request_list *rl, struct request *rq) in blk_free_request() argument
893 elv_put_request(rl->q, rq); in blk_free_request()
898 mempool_free(rq, rl->rq_pool); in blk_free_request()
935 static void __freed_request(struct request_list *rl, int sync) in __freed_request() argument
937 struct request_queue *q = rl->q; in __freed_request()
939 if (rl->count[sync] < queue_congestion_off_threshold(q)) in __freed_request()
940 blk_clear_congested(rl, sync); in __freed_request()
942 if (rl->count[sync] + 1 <= q->nr_requests) { in __freed_request()
943 if (waitqueue_active(&rl->wait[sync])) in __freed_request()
944 wake_up(&rl->wait[sync]); in __freed_request()
946 blk_clear_rl_full(rl, sync); in __freed_request()
954 static void freed_request(struct request_list *rl, unsigned int flags) in freed_request() argument
956 struct request_queue *q = rl->q; in freed_request()
960 rl->count[sync]--; in freed_request()
964 __freed_request(rl, sync); in freed_request()
966 if (unlikely(rl->starved[sync ^ 1])) in freed_request()
967 __freed_request(rl, sync ^ 1); in freed_request()
972 struct request_list *rl; in blk_update_nr_requests() local
981 blk_queue_for_each_rl(rl, q) { in blk_update_nr_requests()
982 if (rl->count[BLK_RW_SYNC] >= on_thresh) in blk_update_nr_requests()
983 blk_set_congested(rl, BLK_RW_SYNC); in blk_update_nr_requests()
984 else if (rl->count[BLK_RW_SYNC] < off_thresh) in blk_update_nr_requests()
985 blk_clear_congested(rl, BLK_RW_SYNC); in blk_update_nr_requests()
987 if (rl->count[BLK_RW_ASYNC] >= on_thresh) in blk_update_nr_requests()
988 blk_set_congested(rl, BLK_RW_ASYNC); in blk_update_nr_requests()
989 else if (rl->count[BLK_RW_ASYNC] < off_thresh) in blk_update_nr_requests()
990 blk_clear_congested(rl, BLK_RW_ASYNC); in blk_update_nr_requests()
992 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in blk_update_nr_requests()
993 blk_set_rl_full(rl, BLK_RW_SYNC); in blk_update_nr_requests()
995 blk_clear_rl_full(rl, BLK_RW_SYNC); in blk_update_nr_requests()
996 wake_up(&rl->wait[BLK_RW_SYNC]); in blk_update_nr_requests()
999 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { in blk_update_nr_requests()
1000 blk_set_rl_full(rl, BLK_RW_ASYNC); in blk_update_nr_requests()
1002 blk_clear_rl_full(rl, BLK_RW_ASYNC); in blk_update_nr_requests()
1003 wake_up(&rl->wait[BLK_RW_ASYNC]); in blk_update_nr_requests()
1060 static struct request *__get_request(struct request_list *rl, int rw_flags, in __get_request() argument
1063 struct request_queue *q = rl->q; in __get_request()
1078 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { in __get_request()
1079 if (rl->count[is_sync]+1 >= q->nr_requests) { in __get_request()
1086 if (!blk_rl_full(rl, is_sync)) { in __get_request()
1088 blk_set_rl_full(rl, is_sync); in __get_request()
1101 blk_set_congested(rl, is_sync); in __get_request()
1109 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) in __get_request()
1113 rl->count[is_sync]++; in __get_request()
1114 rl->starved[is_sync] = 0; in __get_request()
1138 rq = mempool_alloc(rl->rq_pool, gfp_mask); in __get_request()
1143 blk_rq_set_rl(rq, rl); in __get_request()
1203 freed_request(rl, rw_flags); in __get_request()
1213 if (unlikely(rl->count[is_sync] == 0)) in __get_request()
1214 rl->starved[is_sync] = 1; in __get_request()
1237 struct request_list *rl; in get_request() local
1240 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ in get_request()
1242 rq = __get_request(rl, rw_flags, bio, gfp_mask); in get_request()
1247 blk_put_rl(rl); in get_request()
1252 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, in get_request()
1268 finish_wait(&rl->wait[is_sync], &wait); in get_request()
1484 struct request_list *rl = blk_rq_rl(req); in __blk_put_request() local
1489 blk_free_request(rl, req); in __blk_put_request()
1490 freed_request(rl, flags); in __blk_put_request()
1491 blk_put_rl(rl); in __blk_put_request()