1/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/blk-mq.h>
9#include <linux/sched/sysctl.h>
10
11#include "blk.h"
12
13/*
14 * for max sense size
15 */
16#include <scsi/scsi_cmnd.h>
17
18/**
19 * blk_end_sync_rq - executes a completion event on a request
20 * @rq: request to complete
21 * @error: end I/O status of the request
22 */
23static void blk_end_sync_rq(struct request *rq, int error)
24{
25	struct completion *waiting = rq->end_io_data;
26
27	rq->end_io_data = NULL;
28
29	/*
30	 * complete last, if this is a stack request the process (and thus
31	 * the rq pointer) could be invalid right after this complete()
32	 */
33	complete(waiting);
34}
35
36/**
37 * blk_execute_rq_nowait - insert a request into queue for execution
38 * @q:		queue to insert the request in
39 * @bd_disk:	matching gendisk
40 * @rq:		request to insert
41 * @at_head:    insert request at head or tail of queue
42 * @done:	I/O completion handler
43 *
44 * Description:
45 *    Insert a fully prepared request at the back of the I/O scheduler queue
46 *    for execution.  Don't wait for completion.
47 *
48 * Note:
49 *    This function will invoke @done directly if the queue is dead.
50 */
51void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
52			   struct request *rq, int at_head,
53			   rq_end_io_fn *done)
54{
55	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
56
57	WARN_ON(irqs_disabled());
58	WARN_ON(rq->cmd_type == REQ_TYPE_FS);
59
60	rq->rq_disk = bd_disk;
61	rq->end_io = done;
62
63	/*
64	 * don't check dying flag for MQ because the request won't
65	 * be resued after dying flag is set
66	 */
67	if (q->mq_ops) {
68		blk_mq_insert_request(rq, at_head, true, false);
69		return;
70	}
71
72	spin_lock_irq(q->queue_lock);
73
74	if (unlikely(blk_queue_dying(q))) {
75		rq->cmd_flags |= REQ_QUIET;
76		rq->errors = -ENXIO;
77		__blk_end_request_all(rq, rq->errors);
78		spin_unlock_irq(q->queue_lock);
79		return;
80	}
81
82	__elv_add_request(q, rq, where);
83	__blk_run_queue(q);
84	spin_unlock_irq(q->queue_lock);
85}
86EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
87
88/**
89 * blk_execute_rq - insert a request into queue for execution
90 * @q:		queue to insert the request in
91 * @bd_disk:	matching gendisk
92 * @rq:		request to insert
93 * @at_head:    insert request at head or tail of queue
94 *
95 * Description:
96 *    Insert a fully prepared request at the back of the I/O scheduler queue
97 *    for execution and wait for completion.
98 */
99int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
100		   struct request *rq, int at_head)
101{
102	DECLARE_COMPLETION_ONSTACK(wait);
103	char sense[SCSI_SENSE_BUFFERSIZE];
104	int err = 0;
105	unsigned long hang_check;
106
107	if (!rq->sense) {
108		memset(sense, 0, sizeof(sense));
109		rq->sense = sense;
110		rq->sense_len = 0;
111	}
112
113	rq->end_io_data = &wait;
114	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
115
116	/* Prevent hang_check timer from firing at us during very long I/O */
117	hang_check = sysctl_hung_task_timeout_secs;
118	if (hang_check)
119		while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
120	else
121		wait_for_completion_io(&wait);
122
123	if (rq->errors)
124		err = -EIO;
125
126	if (rq->sense == sense)	{
127		rq->sense = NULL;
128		rq->sense_len = 0;
129	}
130
131	return err;
132}
133EXPORT_SYMBOL(blk_execute_rq);
134