Lines Matching refs:rq

183 			       struct request *rq, unsigned int rw_flags)  in blk_mq_rq_ctx_init()  argument
188 INIT_LIST_HEAD(&rq->queuelist); in blk_mq_rq_ctx_init()
190 rq->q = q; in blk_mq_rq_ctx_init()
191 rq->mq_ctx = ctx; in blk_mq_rq_ctx_init()
192 rq->cmd_flags |= rw_flags; in blk_mq_rq_ctx_init()
194 rq->cpu = -1; in blk_mq_rq_ctx_init()
195 INIT_HLIST_NODE(&rq->hash); in blk_mq_rq_ctx_init()
196 RB_CLEAR_NODE(&rq->rb_node); in blk_mq_rq_ctx_init()
197 rq->rq_disk = NULL; in blk_mq_rq_ctx_init()
198 rq->part = NULL; in blk_mq_rq_ctx_init()
199 rq->start_time = jiffies; in blk_mq_rq_ctx_init()
201 rq->rl = NULL; in blk_mq_rq_ctx_init()
202 set_start_time_ns(rq); in blk_mq_rq_ctx_init()
203 rq->io_start_time_ns = 0; in blk_mq_rq_ctx_init()
205 rq->nr_phys_segments = 0; in blk_mq_rq_ctx_init()
207 rq->nr_integrity_segments = 0; in blk_mq_rq_ctx_init()
209 rq->special = NULL; in blk_mq_rq_ctx_init()
211 rq->errors = 0; in blk_mq_rq_ctx_init()
213 rq->cmd = rq->__cmd; in blk_mq_rq_ctx_init()
215 rq->extra_len = 0; in blk_mq_rq_ctx_init()
216 rq->sense_len = 0; in blk_mq_rq_ctx_init()
217 rq->resid_len = 0; in blk_mq_rq_ctx_init()
218 rq->sense = NULL; in blk_mq_rq_ctx_init()
220 INIT_LIST_HEAD(&rq->timeout_list); in blk_mq_rq_ctx_init()
221 rq->timeout = 0; in blk_mq_rq_ctx_init()
223 rq->end_io = NULL; in blk_mq_rq_ctx_init()
224 rq->end_io_data = NULL; in blk_mq_rq_ctx_init()
225 rq->next_rq = NULL; in blk_mq_rq_ctx_init()
233 struct request *rq; in __blk_mq_alloc_request() local
238 rq = data->hctx->tags->rqs[tag]; in __blk_mq_alloc_request()
241 rq->cmd_flags = REQ_MQ_INFLIGHT; in __blk_mq_alloc_request()
245 rq->tag = tag; in __blk_mq_alloc_request()
246 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); in __blk_mq_alloc_request()
247 return rq; in __blk_mq_alloc_request()
258 struct request *rq; in blk_mq_alloc_request() local
271 rq = __blk_mq_alloc_request(&alloc_data, rw); in blk_mq_alloc_request()
272 if (!rq && (gfp & __GFP_WAIT)) { in blk_mq_alloc_request()
280 rq = __blk_mq_alloc_request(&alloc_data, rw); in blk_mq_alloc_request()
284 if (!rq) { in blk_mq_alloc_request()
288 return rq; in blk_mq_alloc_request()
293 struct blk_mq_ctx *ctx, struct request *rq) in __blk_mq_free_request() argument
295 const int tag = rq->tag; in __blk_mq_free_request()
296 struct request_queue *q = rq->q; in __blk_mq_free_request()
298 if (rq->cmd_flags & REQ_MQ_INFLIGHT) in __blk_mq_free_request()
300 rq->cmd_flags = 0; in __blk_mq_free_request()
302 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); in __blk_mq_free_request()
307 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) in blk_mq_free_hctx_request() argument
309 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_free_hctx_request()
311 ctx->rq_completed[rq_is_sync(rq)]++; in blk_mq_free_hctx_request()
312 __blk_mq_free_request(hctx, ctx, rq); in blk_mq_free_hctx_request()
317 void blk_mq_free_request(struct request *rq) in blk_mq_free_request() argument
320 struct request_queue *q = rq->q; in blk_mq_free_request()
322 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_free_request()
323 blk_mq_free_hctx_request(hctx, rq); in blk_mq_free_request()
327 inline void __blk_mq_end_request(struct request *rq, int error) in __blk_mq_end_request() argument
329 blk_account_io_done(rq); in __blk_mq_end_request()
331 if (rq->end_io) { in __blk_mq_end_request()
332 rq->end_io(rq, error); in __blk_mq_end_request()
334 if (unlikely(blk_bidi_rq(rq))) in __blk_mq_end_request()
335 blk_mq_free_request(rq->next_rq); in __blk_mq_end_request()
336 blk_mq_free_request(rq); in __blk_mq_end_request()
341 void blk_mq_end_request(struct request *rq, int error) in blk_mq_end_request() argument
343 if (blk_update_request(rq, error, blk_rq_bytes(rq))) in blk_mq_end_request()
345 __blk_mq_end_request(rq, error); in blk_mq_end_request()
351 struct request *rq = data; in __blk_mq_complete_request_remote() local
353 rq->q->softirq_done_fn(rq); in __blk_mq_complete_request_remote()
356 static void blk_mq_ipi_complete_request(struct request *rq) in blk_mq_ipi_complete_request() argument
358 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_ipi_complete_request()
362 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { in blk_mq_ipi_complete_request()
363 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
368 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) in blk_mq_ipi_complete_request()
372 rq->csd.func = __blk_mq_complete_request_remote; in blk_mq_ipi_complete_request()
373 rq->csd.info = rq; in blk_mq_ipi_complete_request()
374 rq->csd.flags = 0; in blk_mq_ipi_complete_request()
375 smp_call_function_single_async(ctx->cpu, &rq->csd); in blk_mq_ipi_complete_request()
377 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
382 void __blk_mq_complete_request(struct request *rq) in __blk_mq_complete_request() argument
384 struct request_queue *q = rq->q; in __blk_mq_complete_request()
387 blk_mq_end_request(rq, rq->errors); in __blk_mq_complete_request()
389 blk_mq_ipi_complete_request(rq); in __blk_mq_complete_request()
400 void blk_mq_complete_request(struct request *rq) in blk_mq_complete_request() argument
402 struct request_queue *q = rq->q; in blk_mq_complete_request()
406 if (!blk_mark_rq_complete(rq)) in blk_mq_complete_request()
407 __blk_mq_complete_request(rq); in blk_mq_complete_request()
411 int blk_mq_request_started(struct request *rq) in blk_mq_request_started() argument
413 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); in blk_mq_request_started()
417 void blk_mq_start_request(struct request *rq) in blk_mq_start_request() argument
419 struct request_queue *q = rq->q; in blk_mq_start_request()
421 trace_block_rq_issue(q, rq); in blk_mq_start_request()
423 rq->resid_len = blk_rq_bytes(rq); in blk_mq_start_request()
424 if (unlikely(blk_bidi_rq(rq))) in blk_mq_start_request()
425 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); in blk_mq_start_request()
427 blk_add_timer(rq); in blk_mq_start_request()
441 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) in blk_mq_start_request()
442 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); in blk_mq_start_request()
443 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) in blk_mq_start_request()
444 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_mq_start_request()
446 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_mq_start_request()
452 rq->nr_phys_segments++; in blk_mq_start_request()
457 static void __blk_mq_requeue_request(struct request *rq) in __blk_mq_requeue_request() argument
459 struct request_queue *q = rq->q; in __blk_mq_requeue_request()
461 trace_block_rq_requeue(q, rq); in __blk_mq_requeue_request()
463 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { in __blk_mq_requeue_request()
464 if (q->dma_drain_size && blk_rq_bytes(rq)) in __blk_mq_requeue_request()
465 rq->nr_phys_segments--; in __blk_mq_requeue_request()
469 void blk_mq_requeue_request(struct request *rq) in blk_mq_requeue_request() argument
471 __blk_mq_requeue_request(rq); in blk_mq_requeue_request()
473 BUG_ON(blk_queued_rq(rq)); in blk_mq_requeue_request()
474 blk_mq_add_to_requeue_list(rq, true); in blk_mq_requeue_request()
483 struct request *rq, *next; in blk_mq_requeue_work() local
490 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { in blk_mq_requeue_work()
491 if (!(rq->cmd_flags & REQ_SOFTBARRIER)) in blk_mq_requeue_work()
494 rq->cmd_flags &= ~REQ_SOFTBARRIER; in blk_mq_requeue_work()
495 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
496 blk_mq_insert_request(rq, true, false, false); in blk_mq_requeue_work()
500 rq = list_entry(rq_list.next, struct request, queuelist); in blk_mq_requeue_work()
501 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
502 blk_mq_insert_request(rq, false, false, false); in blk_mq_requeue_work()
512 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) in blk_mq_add_to_requeue_list() argument
514 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list()
521 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); in blk_mq_add_to_requeue_list()
525 rq->cmd_flags |= REQ_SOFTBARRIER; in blk_mq_add_to_requeue_list()
526 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
528 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
556 struct request *rq; in blk_mq_abort_requeue_list() local
558 rq = list_first_entry(&rq_list, struct request, queuelist); in blk_mq_abort_requeue_list()
559 list_del_init(&rq->queuelist); in blk_mq_abort_requeue_list()
560 rq->errors = -EIO; in blk_mq_abort_requeue_list()
561 blk_mq_end_request(rq, rq->errors); in blk_mq_abort_requeue_list()
566 static inline bool is_flush_request(struct request *rq, in is_flush_request() argument
569 return ((rq->cmd_flags & REQ_FLUSH_SEQ) && in is_flush_request()
575 struct request *rq = tags->rqs[tag]; in blk_mq_tag_to_rq() local
577 struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx); in blk_mq_tag_to_rq()
579 if (!is_flush_request(rq, fq, tag)) in blk_mq_tag_to_rq()
580 return rq; in blk_mq_tag_to_rq()
628 struct request *rq, void *priv, bool reserved) in blk_mq_check_expired() argument
632 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { in blk_mq_check_expired()
637 if (unlikely(blk_queue_dying(rq->q))) { in blk_mq_check_expired()
638 rq->errors = -EIO; in blk_mq_check_expired()
639 blk_mq_complete_request(rq); in blk_mq_check_expired()
643 if (rq->cmd_flags & REQ_NO_TIMEOUT) in blk_mq_check_expired()
646 if (time_after_eq(jiffies, rq->deadline)) { in blk_mq_check_expired()
647 if (!blk_mark_rq_complete(rq)) in blk_mq_check_expired()
648 blk_mq_rq_timed_out(rq, reserved); in blk_mq_check_expired()
649 } else if (!data->next_set || time_after(data->next, rq->deadline)) { in blk_mq_check_expired()
650 data->next = rq->deadline; in blk_mq_check_expired()
696 struct request *rq; in blk_mq_attempt_merge() local
699 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { in blk_mq_attempt_merge()
705 if (!blk_rq_merge_ok(rq, bio)) in blk_mq_attempt_merge()
708 el_ret = blk_try_merge(rq, bio); in blk_mq_attempt_merge()
710 if (bio_attempt_back_merge(q, rq, bio)) { in blk_mq_attempt_merge()
716 if (bio_attempt_front_merge(q, rq, bio)) { in blk_mq_attempt_merge()
770 struct request *rq; in __blk_mq_run_hw_queue() local
813 rq = list_first_entry(&rq_list, struct request, queuelist); in __blk_mq_run_hw_queue()
814 list_del_init(&rq->queuelist); in __blk_mq_run_hw_queue()
816 bd.rq = rq; in __blk_mq_run_hw_queue()
826 list_add(&rq->queuelist, &rq_list); in __blk_mq_run_hw_queue()
827 __blk_mq_requeue_request(rq); in __blk_mq_run_hw_queue()
832 rq->errors = -EIO; in __blk_mq_run_hw_queue()
833 blk_mq_end_request(rq, rq->errors); in __blk_mq_run_hw_queue()
1019 struct request *rq, bool at_head) in __blk_mq_insert_request() argument
1021 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_request()
1023 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_request()
1026 list_add(&rq->queuelist, &ctx->rq_list); in __blk_mq_insert_request()
1028 list_add_tail(&rq->queuelist, &ctx->rq_list); in __blk_mq_insert_request()
1033 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, in blk_mq_insert_request() argument
1036 struct request_queue *q = rq->q; in blk_mq_insert_request()
1038 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; in blk_mq_insert_request()
1042 rq->mq_ctx = ctx = current_ctx; in blk_mq_insert_request()
1047 __blk_mq_insert_request(hctx, rq, at_head); in blk_mq_insert_request()
1080 struct request *rq; in blk_mq_insert_requests() local
1082 rq = list_first_entry(list, struct request, queuelist); in blk_mq_insert_requests()
1083 list_del_init(&rq->queuelist); in blk_mq_insert_requests()
1084 rq->mq_ctx = ctx; in blk_mq_insert_requests()
1085 __blk_mq_insert_request(hctx, rq, false); in blk_mq_insert_requests()
1107 struct request *rq; in blk_mq_flush_plug_list() local
1121 rq = list_entry_rq(list.next); in blk_mq_flush_plug_list()
1122 list_del_init(&rq->queuelist); in blk_mq_flush_plug_list()
1123 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
1124 if (rq->mq_ctx != this_ctx) { in blk_mq_flush_plug_list()
1131 this_ctx = rq->mq_ctx; in blk_mq_flush_plug_list()
1132 this_q = rq->q; in blk_mq_flush_plug_list()
1137 list_add_tail(&rq->queuelist, &ctx_list); in blk_mq_flush_plug_list()
1150 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) in blk_mq_bio_to_request() argument
1152 init_request_from_bio(rq, bio); in blk_mq_bio_to_request()
1154 if (blk_do_io_stat(rq)) in blk_mq_bio_to_request()
1155 blk_account_io_start(rq, 1); in blk_mq_bio_to_request()
1166 struct request *rq, struct bio *bio) in blk_mq_merge_queue_io() argument
1169 blk_mq_bio_to_request(rq, bio); in blk_mq_merge_queue_io()
1172 __blk_mq_insert_request(hctx, rq, false); in blk_mq_merge_queue_io()
1180 blk_mq_bio_to_request(rq, bio); in blk_mq_merge_queue_io()
1185 __blk_mq_free_request(hctx, ctx, rq); in blk_mq_merge_queue_io()
1201 struct request *rq; in blk_mq_map_request() local
1219 rq = __blk_mq_alloc_request(&alloc_data, rw); in blk_mq_map_request()
1220 if (unlikely(!rq)) { in blk_mq_map_request()
1229 rq = __blk_mq_alloc_request(&alloc_data, rw); in blk_mq_map_request()
1237 return rq; in blk_mq_map_request()
1250 struct request *rq; in blk_mq_make_request() local
1259 rq = blk_mq_map_request(q, bio, &data); in blk_mq_make_request()
1260 if (unlikely(!rq)) in blk_mq_make_request()
1264 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1265 blk_insert_flush(rq); in blk_mq_make_request()
1276 .rq = rq, in blk_mq_make_request()
1282 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1293 __blk_mq_requeue_request(rq); in blk_mq_make_request()
1296 rq->errors = -EIO; in blk_mq_make_request()
1297 blk_mq_end_request(rq, rq->errors); in blk_mq_make_request()
1303 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { in blk_mq_make_request()
1327 struct request *rq; in blk_sq_make_request() local
1346 rq = blk_mq_map_request(q, bio, &data); in blk_sq_make_request()
1347 if (unlikely(!rq)) in blk_sq_make_request()
1351 blk_mq_bio_to_request(rq, bio); in blk_sq_make_request()
1352 blk_insert_flush(rq); in blk_sq_make_request()
1365 blk_mq_bio_to_request(rq, bio); in blk_sq_make_request()
1372 list_add_tail(&rq->queuelist, &plug->mq_list); in blk_sq_make_request()
1378 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { in blk_sq_make_request()
1570 struct request *rq; in blk_mq_hctx_cpu_offline() local
1572 rq = list_first_entry(&tmp, struct request, queuelist); in blk_mq_hctx_cpu_offline()
1573 rq->mq_ctx = ctx; in blk_mq_hctx_cpu_offline()
1574 list_move_tail(&rq->queuelist, &ctx->rq_list); in blk_mq_hctx_cpu_offline()