1/* 2 * Functions related to setting various queue properties from drivers 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/bio.h> 7#include <linux/blkdev.h> 8#include <linux/blk-mq.h> 9#include <linux/sched/sysctl.h> 10 11#include "blk.h" 12 13/* 14 * for max sense size 15 */ 16#include <scsi/scsi_cmnd.h> 17 18/** 19 * blk_end_sync_rq - executes a completion event on a request 20 * @rq: request to complete 21 * @error: end I/O status of the request 22 */ 23static void blk_end_sync_rq(struct request *rq, int error) 24{ 25 struct completion *waiting = rq->end_io_data; 26 27 rq->end_io_data = NULL; 28 29 /* 30 * complete last, if this is a stack request the process (and thus 31 * the rq pointer) could be invalid right after this complete() 32 */ 33 complete(waiting); 34} 35 36/** 37 * blk_execute_rq_nowait - insert a request into queue for execution 38 * @q: queue to insert the request in 39 * @bd_disk: matching gendisk 40 * @rq: request to insert 41 * @at_head: insert request at head or tail of queue 42 * @done: I/O completion handler 43 * 44 * Description: 45 * Insert a fully prepared request at the back of the I/O scheduler queue 46 * for execution. Don't wait for completion. 47 * 48 * Note: 49 * This function will invoke @done directly if the queue is dead. 50 */ 51void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, 52 struct request *rq, int at_head, 53 rq_end_io_fn *done) 54{ 55 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 56 bool is_pm_resume; 57 58 WARN_ON(irqs_disabled()); 59 WARN_ON(rq->cmd_type == REQ_TYPE_FS); 60 61 rq->rq_disk = bd_disk; 62 rq->end_io = done; 63 64 /* 65 * don't check dying flag for MQ because the request won't 66 * be resued after dying flag is set 67 */ 68 if (q->mq_ops) { 69 blk_mq_insert_request(rq, at_head, true, false); 70 return; 71 } 72 73 /* 74 * need to check this before __blk_run_queue(), because rq can 75 * be freed before that returns. 76 */ 77 is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; 78 79 spin_lock_irq(q->queue_lock); 80 81 if (unlikely(blk_queue_dying(q))) { 82 rq->cmd_flags |= REQ_QUIET; 83 rq->errors = -ENXIO; 84 __blk_end_request_all(rq, rq->errors); 85 spin_unlock_irq(q->queue_lock); 86 return; 87 } 88 89 __elv_add_request(q, rq, where); 90 __blk_run_queue(q); 91 /* the queue is stopped so it won't be run */ 92 if (is_pm_resume) 93 __blk_run_queue_uncond(q); 94 spin_unlock_irq(q->queue_lock); 95} 96EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 97 98/** 99 * blk_execute_rq - insert a request into queue for execution 100 * @q: queue to insert the request in 101 * @bd_disk: matching gendisk 102 * @rq: request to insert 103 * @at_head: insert request at head or tail of queue 104 * 105 * Description: 106 * Insert a fully prepared request at the back of the I/O scheduler queue 107 * for execution and wait for completion. 108 */ 109int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, 110 struct request *rq, int at_head) 111{ 112 DECLARE_COMPLETION_ONSTACK(wait); 113 char sense[SCSI_SENSE_BUFFERSIZE]; 114 int err = 0; 115 unsigned long hang_check; 116 117 if (!rq->sense) { 118 memset(sense, 0, sizeof(sense)); 119 rq->sense = sense; 120 rq->sense_len = 0; 121 } 122 123 rq->end_io_data = &wait; 124 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); 125 126 /* Prevent hang_check timer from firing at us during very long I/O */ 127 hang_check = sysctl_hung_task_timeout_secs; 128 if (hang_check) 129 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); 130 else 131 wait_for_completion_io(&wait); 132 133 if (rq->errors) 134 err = -EIO; 135 136 if (rq->sense == sense) { 137 rq->sense = NULL; 138 rq->sense_len = 0; 139 } 140 141 return err; 142} 143EXPORT_SYMBOL(blk_execute_rq); 144