Lines Matching refs:rl

419 		struct request_list *rl;  in __blk_drain_queue()  local
421 blk_queue_for_each_rl(rl, q) in __blk_drain_queue()
422 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) in __blk_drain_queue()
423 wake_up_all(&rl->wait[i]); in __blk_drain_queue()
483 struct request_list *rl; in blk_set_queue_dying() local
485 blk_queue_for_each_rl(rl, q) { in blk_set_queue_dying()
486 if (rl->rq_pool) { in blk_set_queue_dying()
487 wake_up(&rl->wait[BLK_RW_SYNC]); in blk_set_queue_dying()
488 wake_up(&rl->wait[BLK_RW_ASYNC]); in blk_set_queue_dying()
574 int blk_init_rl(struct request_list *rl, struct request_queue *q, in blk_init_rl() argument
577 if (unlikely(rl->rq_pool)) in blk_init_rl()
580 rl->q = q; in blk_init_rl()
581 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; in blk_init_rl()
582 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; in blk_init_rl()
583 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); in blk_init_rl()
584 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); in blk_init_rl()
586 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, in blk_init_rl()
590 if (!rl->rq_pool) in blk_init_rl()
596 void blk_exit_rl(struct request_list *rl) in blk_exit_rl() argument
598 if (rl->rq_pool) in blk_exit_rl()
599 mempool_destroy(rl->rq_pool); in blk_exit_rl()
799 static inline void blk_free_request(struct request_list *rl, struct request *rq) in blk_free_request() argument
802 elv_put_request(rl->q, rq); in blk_free_request()
807 mempool_free(rq, rl->rq_pool); in blk_free_request()
844 static void __freed_request(struct request_list *rl, int sync) in __freed_request() argument
846 struct request_queue *q = rl->q; in __freed_request()
852 if (rl == &q->root_rl && in __freed_request()
853 rl->count[sync] < queue_congestion_off_threshold(q)) in __freed_request()
856 if (rl->count[sync] + 1 <= q->nr_requests) { in __freed_request()
857 if (waitqueue_active(&rl->wait[sync])) in __freed_request()
858 wake_up(&rl->wait[sync]); in __freed_request()
860 blk_clear_rl_full(rl, sync); in __freed_request()
868 static void freed_request(struct request_list *rl, unsigned int flags) in freed_request() argument
870 struct request_queue *q = rl->q; in freed_request()
874 rl->count[sync]--; in freed_request()
878 __freed_request(rl, sync); in freed_request()
880 if (unlikely(rl->starved[sync ^ 1])) in freed_request()
881 __freed_request(rl, sync ^ 1); in freed_request()
886 struct request_list *rl; in blk_update_nr_requests() local
893 rl = &q->root_rl; in blk_update_nr_requests()
895 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) in blk_update_nr_requests()
897 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) in blk_update_nr_requests()
900 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) in blk_update_nr_requests()
902 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) in blk_update_nr_requests()
905 blk_queue_for_each_rl(rl, q) { in blk_update_nr_requests()
906 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in blk_update_nr_requests()
907 blk_set_rl_full(rl, BLK_RW_SYNC); in blk_update_nr_requests()
909 blk_clear_rl_full(rl, BLK_RW_SYNC); in blk_update_nr_requests()
910 wake_up(&rl->wait[BLK_RW_SYNC]); in blk_update_nr_requests()
913 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { in blk_update_nr_requests()
914 blk_set_rl_full(rl, BLK_RW_ASYNC); in blk_update_nr_requests()
916 blk_clear_rl_full(rl, BLK_RW_ASYNC); in blk_update_nr_requests()
917 wake_up(&rl->wait[BLK_RW_ASYNC]); in blk_update_nr_requests()
974 static struct request *__get_request(struct request_list *rl, int rw_flags, in __get_request() argument
977 struct request_queue *q = rl->q; in __get_request()
992 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { in __get_request()
993 if (rl->count[is_sync]+1 >= q->nr_requests) { in __get_request()
1000 if (!blk_rl_full(rl, is_sync)) { in __get_request()
1002 blk_set_rl_full(rl, is_sync); in __get_request()
1019 if (rl == &q->root_rl) in __get_request()
1028 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) in __get_request()
1032 rl->count[is_sync]++; in __get_request()
1033 rl->starved[is_sync] = 0; in __get_request()
1057 rq = mempool_alloc(rl->rq_pool, gfp_mask); in __get_request()
1062 blk_rq_set_rl(rq, rl); in __get_request()
1122 freed_request(rl, rw_flags); in __get_request()
1132 if (unlikely(rl->count[is_sync] == 0)) in __get_request()
1133 rl->starved[is_sync] = 1; in __get_request()
1156 struct request_list *rl; in get_request() local
1159 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ in get_request()
1161 rq = __get_request(rl, rw_flags, bio, gfp_mask); in get_request()
1166 blk_put_rl(rl); in get_request()
1171 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, in get_request()
1187 finish_wait(&rl->wait[is_sync], &wait); in get_request()
1403 struct request_list *rl = blk_rq_rl(req); in __blk_put_request() local
1408 blk_free_request(rl, req); in __blk_put_request()
1409 freed_request(rl, flags); in __blk_put_request()
1410 blk_put_rl(rl); in __blk_put_request()