Lines Matching refs:rq

97 void blk_rq_init(struct request_queue *q, struct request *rq)  in blk_rq_init()  argument
99 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
101 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
102 INIT_LIST_HEAD(&rq->timeout_list); in blk_rq_init()
103 rq->cpu = -1; in blk_rq_init()
104 rq->q = q; in blk_rq_init()
105 rq->__sector = (sector_t) -1; in blk_rq_init()
106 INIT_HLIST_NODE(&rq->hash); in blk_rq_init()
107 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init()
108 rq->cmd = rq->__cmd; in blk_rq_init()
109 rq->cmd_len = BLK_MAX_CDB; in blk_rq_init()
110 rq->tag = -1; in blk_rq_init()
111 rq->start_time = jiffies; in blk_rq_init()
112 set_start_time_ns(rq); in blk_rq_init()
113 rq->part = NULL; in blk_rq_init()
117 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
125 if (unlikely(rq->cmd_flags & REQ_QUIET)) in req_bio_endio()
131 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio()
135 void blk_dump_rq_flags(struct request *rq, char *msg) in blk_dump_rq_flags() argument
140 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, in blk_dump_rq_flags()
141 (unsigned long long) rq->cmd_flags); in blk_dump_rq_flags()
144 (unsigned long long)blk_rq_pos(rq), in blk_dump_rq_flags()
145 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); in blk_dump_rq_flags()
147 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
149 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { in blk_dump_rq_flags()
152 printk("%02x ", rq->cmd[bit]); in blk_dump_rq_flags()
799 static inline void blk_free_request(struct request_list *rl, struct request *rq) in blk_free_request() argument
801 if (rq->cmd_flags & REQ_ELVPRIV) { in blk_free_request()
802 elv_put_request(rl->q, rq); in blk_free_request()
803 if (rq->elv.icq) in blk_free_request()
804 put_io_context(rq->elv.icq->ioc); in blk_free_request()
807 mempool_free(rq, rl->rq_pool); in blk_free_request()
978 struct request *rq; in __get_request() local
1057 rq = mempool_alloc(rl->rq_pool, gfp_mask); in __get_request()
1058 if (!rq) in __get_request()
1061 blk_rq_init(q, rq); in __get_request()
1062 blk_rq_set_rl(rq, rl); in __get_request()
1063 rq->cmd_flags = rw_flags | REQ_ALLOCED; in __get_request()
1074 rq->elv.icq = icq; in __get_request()
1075 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) in __get_request()
1093 return rq; in __get_request()
1105 rq->cmd_flags &= ~REQ_ELVPRIV; in __get_request()
1106 rq->elv.icq = NULL; in __get_request()
1157 struct request *rq; in get_request() local
1161 rq = __get_request(rl, rw_flags, bio, gfp_mask); in get_request()
1162 if (!IS_ERR(rq)) in get_request()
1163 return rq; in get_request()
1167 return rq; in get_request()
1195 struct request *rq; in blk_old_get_request() local
1203 rq = get_request(q, rw, NULL, gfp_mask); in blk_old_get_request()
1204 if (IS_ERR(rq)) in blk_old_get_request()
1208 return rq; in blk_old_get_request()
1254 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); in blk_make_request() local
1256 if (IS_ERR(rq)) in blk_make_request()
1257 return rq; in blk_make_request()
1259 blk_rq_set_block_pc(rq); in blk_make_request()
1266 ret = blk_rq_append_bio(q, rq, bounce_bio); in blk_make_request()
1268 blk_put_request(rq); in blk_make_request()
1273 return rq; in blk_make_request()
1282 void blk_rq_set_block_pc(struct request *rq) in blk_rq_set_block_pc() argument
1284 rq->cmd_type = REQ_TYPE_BLOCK_PC; in blk_rq_set_block_pc()
1285 rq->__data_len = 0; in blk_rq_set_block_pc()
1286 rq->__sector = (sector_t) -1; in blk_rq_set_block_pc()
1287 rq->bio = rq->biotail = NULL; in blk_rq_set_block_pc()
1288 memset(rq->__cmd, 0, sizeof(rq->__cmd)); in blk_rq_set_block_pc()
1302 void blk_requeue_request(struct request_queue *q, struct request *rq) in blk_requeue_request() argument
1304 blk_delete_timer(rq); in blk_requeue_request()
1305 blk_clear_rq_complete(rq); in blk_requeue_request()
1306 trace_block_rq_requeue(q, rq); in blk_requeue_request()
1308 if (rq->cmd_flags & REQ_QUEUED) in blk_requeue_request()
1309 blk_queue_end_tag(q, rq); in blk_requeue_request()
1311 BUG_ON(blk_queued_rq(rq)); in blk_requeue_request()
1313 elv_requeue_request(q, rq); in blk_requeue_request()
1317 static void add_acct_request(struct request_queue *q, struct request *rq, in add_acct_request() argument
1320 blk_account_io_start(rq, true); in add_acct_request()
1321 __elv_add_request(q, rq, where); in add_acct_request()
1368 static void blk_pm_put_request(struct request *rq) in blk_pm_put_request() argument
1370 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) in blk_pm_put_request()
1371 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_put_request()
1374 static inline void blk_pm_put_request(struct request *rq) {} in blk_pm_put_request() argument
1444 void blk_add_request_payload(struct request *rq, struct page *page, in blk_add_request_payload() argument
1447 struct bio *bio = rq->bio; in blk_add_request_payload()
1457 rq->__data_len = rq->resid_len = len; in blk_add_request_payload()
1458 rq->nr_phys_segments = 1; in blk_add_request_payload()
1531 struct request *rq; in blk_attempt_plug_merge() local
1545 list_for_each_entry_reverse(rq, plug_list, queuelist) { in blk_attempt_plug_merge()
1548 if (rq->q == q) in blk_attempt_plug_merge()
1551 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) in blk_attempt_plug_merge()
1554 el_ret = blk_try_merge(rq, bio); in blk_attempt_plug_merge()
1556 ret = bio_attempt_back_merge(q, rq, bio); in blk_attempt_plug_merge()
1560 ret = bio_attempt_front_merge(q, rq, bio); in blk_attempt_plug_merge()
2024 int blk_rq_check_limits(struct request_queue *q, struct request *rq) in blk_rq_check_limits() argument
2026 if (!rq_mergeable(rq)) in blk_rq_check_limits()
2029 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { in blk_rq_check_limits()
2040 blk_recalc_rq_segments(rq); in blk_rq_check_limits()
2041 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_rq_check_limits()
2055 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) in blk_insert_cloned_request() argument
2060 if (blk_rq_check_limits(q, rq)) in blk_insert_cloned_request()
2063 if (rq->rq_disk && in blk_insert_cloned_request()
2064 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) in blk_insert_cloned_request()
2069 blk_account_io_start(rq, true); in blk_insert_cloned_request()
2070 blk_mq_insert_request(rq, false, true, false); in blk_insert_cloned_request()
2084 BUG_ON(blk_queued_rq(rq)); in blk_insert_cloned_request()
2086 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) in blk_insert_cloned_request()
2089 add_acct_request(q, rq, where); in blk_insert_cloned_request()
2114 unsigned int blk_rq_err_bytes(const struct request *rq) in blk_rq_err_bytes() argument
2116 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; in blk_rq_err_bytes()
2120 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) in blk_rq_err_bytes()
2121 return blk_rq_bytes(rq); in blk_rq_err_bytes()
2130 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_err_bytes()
2137 BUG_ON(blk_rq_bytes(rq) && !bytes); in blk_rq_err_bytes()
2188 struct request *rq) in blk_pm_peek_request() argument
2191 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) in blk_pm_peek_request()
2194 return rq; in blk_pm_peek_request()
2198 struct request *rq) in blk_pm_peek_request() argument
2200 return rq; in blk_pm_peek_request()
2204 void blk_account_io_start(struct request *rq, bool new_io) in blk_account_io_start() argument
2207 int rw = rq_data_dir(rq); in blk_account_io_start()
2210 if (!blk_do_io_stat(rq)) in blk_account_io_start()
2216 part = rq->part; in blk_account_io_start()
2219 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); in blk_account_io_start()
2229 part = &rq->rq_disk->part0; in blk_account_io_start()
2234 rq->part = part; in blk_account_io_start()
2258 struct request *rq; in blk_peek_request() local
2261 while ((rq = __elv_next_request(q)) != NULL) { in blk_peek_request()
2263 rq = blk_pm_peek_request(q, rq); in blk_peek_request()
2264 if (!rq) in blk_peek_request()
2267 if (!(rq->cmd_flags & REQ_STARTED)) { in blk_peek_request()
2273 if (rq->cmd_flags & REQ_SORTED) in blk_peek_request()
2274 elv_activate_rq(q, rq); in blk_peek_request()
2281 rq->cmd_flags |= REQ_STARTED; in blk_peek_request()
2282 trace_block_rq_issue(q, rq); in blk_peek_request()
2285 if (!q->boundary_rq || q->boundary_rq == rq) { in blk_peek_request()
2286 q->end_sector = rq_end_sector(rq); in blk_peek_request()
2290 if (rq->cmd_flags & REQ_DONTPREP) in blk_peek_request()
2293 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_peek_request()
2300 rq->nr_phys_segments++; in blk_peek_request()
2306 ret = q->prep_rq_fn(q, rq); in blk_peek_request()
2316 if (q->dma_drain_size && blk_rq_bytes(rq) && in blk_peek_request()
2317 !(rq->cmd_flags & REQ_DONTPREP)) { in blk_peek_request()
2322 --rq->nr_phys_segments; in blk_peek_request()
2325 rq = NULL; in blk_peek_request()
2328 rq->cmd_flags |= REQ_QUIET; in blk_peek_request()
2333 blk_start_request(rq); in blk_peek_request()
2334 __blk_end_request_all(rq, -EIO); in blk_peek_request()
2341 return rq; in blk_peek_request()
2345 void blk_dequeue_request(struct request *rq) in blk_dequeue_request() argument
2347 struct request_queue *q = rq->q; in blk_dequeue_request()
2349 BUG_ON(list_empty(&rq->queuelist)); in blk_dequeue_request()
2350 BUG_ON(ELV_ON_HASH(rq)); in blk_dequeue_request()
2352 list_del_init(&rq->queuelist); in blk_dequeue_request()
2359 if (blk_account_rq(rq)) { in blk_dequeue_request()
2360 q->in_flight[rq_is_sync(rq)]++; in blk_dequeue_request()
2361 set_io_start_time_ns(rq); in blk_dequeue_request()
2413 struct request *rq; in blk_fetch_request() local
2415 rq = blk_peek_request(q); in blk_fetch_request()
2416 if (rq) in blk_fetch_request()
2417 blk_start_request(rq); in blk_fetch_request()
2418 return rq; in blk_fetch_request()
2559 static bool blk_update_bidi_request(struct request *rq, int error, in blk_update_bidi_request() argument
2563 if (blk_update_request(rq, error, nr_bytes)) in blk_update_bidi_request()
2567 if (unlikely(blk_bidi_rq(rq)) && in blk_update_bidi_request()
2568 blk_update_request(rq->next_rq, error, bidi_bytes)) in blk_update_bidi_request()
2571 if (blk_queue_add_random(rq->q)) in blk_update_bidi_request()
2572 add_disk_randomness(rq->rq_disk); in blk_update_bidi_request()
2645 static bool blk_end_bidi_request(struct request *rq, int error, in blk_end_bidi_request() argument
2648 struct request_queue *q = rq->q; in blk_end_bidi_request()
2651 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) in blk_end_bidi_request()
2655 blk_finish_request(rq, error); in blk_end_bidi_request()
2676 bool __blk_end_bidi_request(struct request *rq, int error, in __blk_end_bidi_request() argument
2679 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) in __blk_end_bidi_request()
2682 blk_finish_request(rq, error); in __blk_end_bidi_request()
2701 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) in blk_end_request() argument
2703 return blk_end_bidi_request(rq, error, nr_bytes, 0); in blk_end_request()
2715 void blk_end_request_all(struct request *rq, int error) in blk_end_request_all() argument
2720 if (unlikely(blk_bidi_rq(rq))) in blk_end_request_all()
2721 bidi_bytes = blk_rq_bytes(rq->next_rq); in blk_end_request_all()
2723 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); in blk_end_request_all()
2740 bool blk_end_request_cur(struct request *rq, int error) in blk_end_request_cur() argument
2742 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); in blk_end_request_cur()
2758 bool blk_end_request_err(struct request *rq, int error) in blk_end_request_err() argument
2761 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); in blk_end_request_err()
2778 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) in __blk_end_request() argument
2780 return __blk_end_bidi_request(rq, error, nr_bytes, 0); in __blk_end_request()
2792 void __blk_end_request_all(struct request *rq, int error) in __blk_end_request_all() argument
2797 if (unlikely(blk_bidi_rq(rq))) in __blk_end_request_all()
2798 bidi_bytes = blk_rq_bytes(rq->next_rq); in __blk_end_request_all()
2800 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); in __blk_end_request_all()
2818 bool __blk_end_request_cur(struct request *rq, int error) in __blk_end_request_cur() argument
2820 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); in __blk_end_request_cur()
2837 bool __blk_end_request_err(struct request *rq, int error) in __blk_end_request_err() argument
2840 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); in __blk_end_request_err()
2844 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, in blk_rq_bio_prep() argument
2848 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; in blk_rq_bio_prep()
2851 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
2853 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep()
2854 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
2857 rq->rq_disk = bio->bi_bdev->bd_disk; in blk_rq_bio_prep()
2868 void rq_flush_dcache_pages(struct request *rq) in rq_flush_dcache_pages() argument
2873 rq_for_each_segment(bvec, rq, iter) in rq_flush_dcache_pages()
2914 void blk_rq_unprep_clone(struct request *rq) in blk_rq_unprep_clone() argument
2918 while ((bio = rq->bio) != NULL) { in blk_rq_unprep_clone()
2919 rq->bio = bio->bi_next; in blk_rq_unprep_clone()
2961 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, in blk_rq_prep_clone() argument
2979 if (rq->bio) { in blk_rq_prep_clone()
2980 rq->biotail->bi_next = bio; in blk_rq_prep_clone()
2981 rq->biotail = bio; in blk_rq_prep_clone()
2983 rq->bio = rq->biotail = bio; in blk_rq_prep_clone()
2986 __blk_rq_prep_clone(rq, rq_src); in blk_rq_prep_clone()
2993 blk_rq_unprep_clone(rq); in blk_rq_prep_clone()
3129 struct request *rq; in blk_flush_plug_list() local
3154 rq = list_entry_rq(list.next); in blk_flush_plug_list()
3155 list_del_init(&rq->queuelist); in blk_flush_plug_list()
3156 BUG_ON(!rq->q); in blk_flush_plug_list()
3157 if (rq->q != q) { in blk_flush_plug_list()
3163 q = rq->q; in blk_flush_plug_list()
3172 __blk_end_request_all(rq, -ENODEV); in blk_flush_plug_list()
3179 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) in blk_flush_plug_list()
3180 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); in blk_flush_plug_list()
3182 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); in blk_flush_plug_list()