Lines Matching refs:rq
122 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
124 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
126 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
127 INIT_LIST_HEAD(&rq->timeout_list); in blk_rq_init()
128 rq->cpu = -1; in blk_rq_init()
129 rq->q = q; in blk_rq_init()
130 rq->__sector = (sector_t) -1; in blk_rq_init()
131 INIT_HLIST_NODE(&rq->hash); in blk_rq_init()
132 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init()
133 rq->cmd = rq->__cmd; in blk_rq_init()
134 rq->cmd_len = BLK_MAX_CDB; in blk_rq_init()
135 rq->tag = -1; in blk_rq_init()
136 rq->start_time = jiffies; in blk_rq_init()
137 set_start_time_ns(rq); in blk_rq_init()
138 rq->part = NULL; in blk_rq_init()
142 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
148 if (unlikely(rq->cmd_flags & REQ_QUIET)) in req_bio_endio()
154 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio()
158 void blk_dump_rq_flags(struct request *rq, char *msg) in blk_dump_rq_flags() argument
163 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, in blk_dump_rq_flags()
164 (unsigned long long) rq->cmd_flags); in blk_dump_rq_flags()
167 (unsigned long long)blk_rq_pos(rq), in blk_dump_rq_flags()
168 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); in blk_dump_rq_flags()
170 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
172 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { in blk_dump_rq_flags()
175 printk("%02x ", rq->cmd[bit]); in blk_dump_rq_flags()
890 static inline void blk_free_request(struct request_list *rl, struct request *rq) in blk_free_request() argument
892 if (rq->cmd_flags & REQ_ELVPRIV) { in blk_free_request()
893 elv_put_request(rl->q, rq); in blk_free_request()
894 if (rq->elv.icq) in blk_free_request()
895 put_io_context(rq->elv.icq->ioc); in blk_free_request()
898 mempool_free(rq, rl->rq_pool); in blk_free_request()
1064 struct request *rq; in __get_request() local
1138 rq = mempool_alloc(rl->rq_pool, gfp_mask); in __get_request()
1139 if (!rq) in __get_request()
1142 blk_rq_init(q, rq); in __get_request()
1143 blk_rq_set_rl(rq, rl); in __get_request()
1144 rq->cmd_flags = rw_flags | REQ_ALLOCED; in __get_request()
1155 rq->elv.icq = icq; in __get_request()
1156 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) in __get_request()
1174 return rq; in __get_request()
1186 rq->cmd_flags &= ~REQ_ELVPRIV; in __get_request()
1187 rq->elv.icq = NULL; in __get_request()
1238 struct request *rq; in get_request() local
1242 rq = __get_request(rl, rw_flags, bio, gfp_mask); in get_request()
1243 if (!IS_ERR(rq)) in get_request()
1244 return rq; in get_request()
1248 return rq; in get_request()
1276 struct request *rq; in blk_old_get_request() local
1284 rq = get_request(q, rw, NULL, gfp_mask); in blk_old_get_request()
1285 if (IS_ERR(rq)) in blk_old_get_request()
1289 return rq; in blk_old_get_request()
1335 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); in blk_make_request() local
1337 if (IS_ERR(rq)) in blk_make_request()
1338 return rq; in blk_make_request()
1340 blk_rq_set_block_pc(rq); in blk_make_request()
1347 ret = blk_rq_append_bio(q, rq, bounce_bio); in blk_make_request()
1349 blk_put_request(rq); in blk_make_request()
1354 return rq; in blk_make_request()
1363 void blk_rq_set_block_pc(struct request *rq) in blk_rq_set_block_pc() argument
1365 rq->cmd_type = REQ_TYPE_BLOCK_PC; in blk_rq_set_block_pc()
1366 rq->__data_len = 0; in blk_rq_set_block_pc()
1367 rq->__sector = (sector_t) -1; in blk_rq_set_block_pc()
1368 rq->bio = rq->biotail = NULL; in blk_rq_set_block_pc()
1369 memset(rq->__cmd, 0, sizeof(rq->__cmd)); in blk_rq_set_block_pc()
1383 void blk_requeue_request(struct request_queue *q, struct request *rq) in blk_requeue_request() argument
1385 blk_delete_timer(rq); in blk_requeue_request()
1386 blk_clear_rq_complete(rq); in blk_requeue_request()
1387 trace_block_rq_requeue(q, rq); in blk_requeue_request()
1389 if (rq->cmd_flags & REQ_QUEUED) in blk_requeue_request()
1390 blk_queue_end_tag(q, rq); in blk_requeue_request()
1392 BUG_ON(blk_queued_rq(rq)); in blk_requeue_request()
1394 elv_requeue_request(q, rq); in blk_requeue_request()
1398 static void add_acct_request(struct request_queue *q, struct request *rq, in add_acct_request() argument
1401 blk_account_io_start(rq, true); in add_acct_request()
1402 __elv_add_request(q, rq, where); in add_acct_request()
1449 static void blk_pm_put_request(struct request *rq) in blk_pm_put_request() argument
1451 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) in blk_pm_put_request()
1452 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_put_request()
1455 static inline void blk_pm_put_request(struct request *rq) {} in blk_pm_put_request() argument
1525 void blk_add_request_payload(struct request *rq, struct page *page, in blk_add_request_payload() argument
1528 struct bio *bio = rq->bio; in blk_add_request_payload()
1538 rq->__data_len = rq->resid_len = len; in blk_add_request_payload()
1539 rq->nr_phys_segments = 1; in blk_add_request_payload()
1616 struct request *rq; in blk_attempt_plug_merge() local
1630 list_for_each_entry_reverse(rq, plug_list, queuelist) { in blk_attempt_plug_merge()
1633 if (rq->q == q) { in blk_attempt_plug_merge()
1641 *same_queue_rq = rq; in blk_attempt_plug_merge()
1644 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) in blk_attempt_plug_merge()
1647 el_ret = blk_try_merge(rq, bio); in blk_attempt_plug_merge()
1649 ret = bio_attempt_back_merge(q, rq, bio); in blk_attempt_plug_merge()
1653 ret = bio_attempt_front_merge(q, rq, bio); in blk_attempt_plug_merge()
1665 struct request *rq; in blk_plug_queued_count() local
1678 list_for_each_entry(rq, plug_list, queuelist) { in blk_plug_queued_count()
1679 if (rq->q == q) in blk_plug_queued_count()
2150 struct request *rq) in blk_cloned_rq_check_limits() argument
2152 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { in blk_cloned_rq_check_limits()
2163 blk_recalc_rq_segments(rq); in blk_cloned_rq_check_limits()
2164 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_cloned_rq_check_limits()
2177 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) in blk_insert_cloned_request() argument
2182 if (blk_cloned_rq_check_limits(q, rq)) in blk_insert_cloned_request()
2185 if (rq->rq_disk && in blk_insert_cloned_request()
2186 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) in blk_insert_cloned_request()
2191 blk_account_io_start(rq, true); in blk_insert_cloned_request()
2192 blk_mq_insert_request(rq, false, true, false); in blk_insert_cloned_request()
2206 BUG_ON(blk_queued_rq(rq)); in blk_insert_cloned_request()
2208 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) in blk_insert_cloned_request()
2211 add_acct_request(q, rq, where); in blk_insert_cloned_request()
2236 unsigned int blk_rq_err_bytes(const struct request *rq) in blk_rq_err_bytes() argument
2238 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; in blk_rq_err_bytes()
2242 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) in blk_rq_err_bytes()
2243 return blk_rq_bytes(rq); in blk_rq_err_bytes()
2252 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_err_bytes()
2259 BUG_ON(blk_rq_bytes(rq) && !bytes); in blk_rq_err_bytes()
2310 struct request *rq) in blk_pm_peek_request() argument
2313 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) in blk_pm_peek_request()
2316 return rq; in blk_pm_peek_request()
2320 struct request *rq) in blk_pm_peek_request() argument
2322 return rq; in blk_pm_peek_request()
2326 void blk_account_io_start(struct request *rq, bool new_io) in blk_account_io_start() argument
2329 int rw = rq_data_dir(rq); in blk_account_io_start()
2332 if (!blk_do_io_stat(rq)) in blk_account_io_start()
2338 part = rq->part; in blk_account_io_start()
2341 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); in blk_account_io_start()
2351 part = &rq->rq_disk->part0; in blk_account_io_start()
2356 rq->part = part; in blk_account_io_start()
2380 struct request *rq; in blk_peek_request() local
2383 while ((rq = __elv_next_request(q)) != NULL) { in blk_peek_request()
2385 rq = blk_pm_peek_request(q, rq); in blk_peek_request()
2386 if (!rq) in blk_peek_request()
2389 if (!(rq->cmd_flags & REQ_STARTED)) { in blk_peek_request()
2395 if (rq->cmd_flags & REQ_SORTED) in blk_peek_request()
2396 elv_activate_rq(q, rq); in blk_peek_request()
2403 rq->cmd_flags |= REQ_STARTED; in blk_peek_request()
2404 trace_block_rq_issue(q, rq); in blk_peek_request()
2407 if (!q->boundary_rq || q->boundary_rq == rq) { in blk_peek_request()
2408 q->end_sector = rq_end_sector(rq); in blk_peek_request()
2412 if (rq->cmd_flags & REQ_DONTPREP) in blk_peek_request()
2415 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_peek_request()
2422 rq->nr_phys_segments++; in blk_peek_request()
2428 ret = q->prep_rq_fn(q, rq); in blk_peek_request()
2438 if (q->dma_drain_size && blk_rq_bytes(rq) && in blk_peek_request()
2439 !(rq->cmd_flags & REQ_DONTPREP)) { in blk_peek_request()
2444 --rq->nr_phys_segments; in blk_peek_request()
2447 rq = NULL; in blk_peek_request()
2450 rq->cmd_flags |= REQ_QUIET; in blk_peek_request()
2455 blk_start_request(rq); in blk_peek_request()
2456 __blk_end_request_all(rq, -EIO); in blk_peek_request()
2463 return rq; in blk_peek_request()
2467 void blk_dequeue_request(struct request *rq) in blk_dequeue_request() argument
2469 struct request_queue *q = rq->q; in blk_dequeue_request()
2471 BUG_ON(list_empty(&rq->queuelist)); in blk_dequeue_request()
2472 BUG_ON(ELV_ON_HASH(rq)); in blk_dequeue_request()
2474 list_del_init(&rq->queuelist); in blk_dequeue_request()
2481 if (blk_account_rq(rq)) { in blk_dequeue_request()
2482 q->in_flight[rq_is_sync(rq)]++; in blk_dequeue_request()
2483 set_io_start_time_ns(rq); in blk_dequeue_request()
2535 struct request *rq; in blk_fetch_request() local
2537 rq = blk_peek_request(q); in blk_fetch_request()
2538 if (rq) in blk_fetch_request()
2539 blk_start_request(rq); in blk_fetch_request()
2540 return rq; in blk_fetch_request()
2681 static bool blk_update_bidi_request(struct request *rq, int error, in blk_update_bidi_request() argument
2685 if (blk_update_request(rq, error, nr_bytes)) in blk_update_bidi_request()
2689 if (unlikely(blk_bidi_rq(rq)) && in blk_update_bidi_request()
2690 blk_update_request(rq->next_rq, error, bidi_bytes)) in blk_update_bidi_request()
2693 if (blk_queue_add_random(rq->q)) in blk_update_bidi_request()
2694 add_disk_randomness(rq->rq_disk); in blk_update_bidi_request()
2767 static bool blk_end_bidi_request(struct request *rq, int error, in blk_end_bidi_request() argument
2770 struct request_queue *q = rq->q; in blk_end_bidi_request()
2773 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) in blk_end_bidi_request()
2777 blk_finish_request(rq, error); in blk_end_bidi_request()
2798 bool __blk_end_bidi_request(struct request *rq, int error, in __blk_end_bidi_request() argument
2801 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) in __blk_end_bidi_request()
2804 blk_finish_request(rq, error); in __blk_end_bidi_request()
2823 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) in blk_end_request() argument
2825 return blk_end_bidi_request(rq, error, nr_bytes, 0); in blk_end_request()
2837 void blk_end_request_all(struct request *rq, int error) in blk_end_request_all() argument
2842 if (unlikely(blk_bidi_rq(rq))) in blk_end_request_all()
2843 bidi_bytes = blk_rq_bytes(rq->next_rq); in blk_end_request_all()
2845 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); in blk_end_request_all()
2862 bool blk_end_request_cur(struct request *rq, int error) in blk_end_request_cur() argument
2864 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); in blk_end_request_cur()
2880 bool blk_end_request_err(struct request *rq, int error) in blk_end_request_err() argument
2883 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); in blk_end_request_err()
2900 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) in __blk_end_request() argument
2902 return __blk_end_bidi_request(rq, error, nr_bytes, 0); in __blk_end_request()
2914 void __blk_end_request_all(struct request *rq, int error) in __blk_end_request_all() argument
2919 if (unlikely(blk_bidi_rq(rq))) in __blk_end_request_all()
2920 bidi_bytes = blk_rq_bytes(rq->next_rq); in __blk_end_request_all()
2922 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); in __blk_end_request_all()
2940 bool __blk_end_request_cur(struct request *rq, int error) in __blk_end_request_cur() argument
2942 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); in __blk_end_request_cur()
2959 bool __blk_end_request_err(struct request *rq, int error) in __blk_end_request_err() argument
2962 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); in __blk_end_request_err()
2966 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, in blk_rq_bio_prep() argument
2970 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; in blk_rq_bio_prep()
2973 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
2975 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep()
2976 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
2979 rq->rq_disk = bio->bi_bdev->bd_disk; in blk_rq_bio_prep()
2990 void rq_flush_dcache_pages(struct request *rq) in rq_flush_dcache_pages() argument
2995 rq_for_each_segment(bvec, rq, iter) in rq_flush_dcache_pages()
3036 void blk_rq_unprep_clone(struct request *rq) in blk_rq_unprep_clone() argument
3040 while ((bio = rq->bio) != NULL) { in blk_rq_unprep_clone()
3041 rq->bio = bio->bi_next; in blk_rq_unprep_clone()
3083 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, in blk_rq_prep_clone() argument
3101 if (rq->bio) { in blk_rq_prep_clone()
3102 rq->biotail->bi_next = bio; in blk_rq_prep_clone()
3103 rq->biotail = bio; in blk_rq_prep_clone()
3105 rq->bio = rq->biotail = bio; in blk_rq_prep_clone()
3108 __blk_rq_prep_clone(rq, rq_src); in blk_rq_prep_clone()
3115 blk_rq_unprep_clone(rq); in blk_rq_prep_clone()
3250 struct request *rq; in blk_flush_plug_list() local
3275 rq = list_entry_rq(list.next); in blk_flush_plug_list()
3276 list_del_init(&rq->queuelist); in blk_flush_plug_list()
3277 BUG_ON(!rq->q); in blk_flush_plug_list()
3278 if (rq->q != q) { in blk_flush_plug_list()
3284 q = rq->q; in blk_flush_plug_list()
3293 __blk_end_request_all(rq, -ENODEV); in blk_flush_plug_list()
3300 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) in blk_flush_plug_list()
3301 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); in blk_flush_plug_list()
3303 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); in blk_flush_plug_list()