1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/uio.h>
9
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13		      struct bio *bio)
14{
15	if (!rq->bio)
16		blk_rq_bio_prep(q, rq, bio);
17	else if (!ll_back_merge_fn(q, rq, bio))
18		return -EINVAL;
19	else {
20		rq->biotail->bi_next = bio;
21		rq->biotail = bio;
22
23		rq->__data_len += bio->bi_iter.bi_size;
24	}
25	return 0;
26}
27
28static int __blk_rq_unmap_user(struct bio *bio)
29{
30	int ret = 0;
31
32	if (bio) {
33		if (bio_flagged(bio, BIO_USER_MAPPED))
34			bio_unmap_user(bio);
35		else
36			ret = bio_uncopy_user(bio);
37	}
38
39	return ret;
40}
41
42/**
43 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
44 * @q:		request queue where request should be inserted
45 * @rq:		request to map data to
46 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
47 * @iter:	iovec iterator
48 * @gfp_mask:	memory allocation flags
49 *
50 * Description:
51 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
52 *    a kernel bounce buffer is used.
53 *
54 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
55 *    still in process context.
56 *
57 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
58 *    before being submitted to the device, as pages mapped may be out of
59 *    reach. It's the callers responsibility to make sure this happens. The
60 *    original bio must be passed back in to blk_rq_unmap_user() for proper
61 *    unmapping.
62 */
63int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
64			struct rq_map_data *map_data,
65			const struct iov_iter *iter, gfp_t gfp_mask)
66{
67	struct bio *bio;
68	int unaligned = 0;
69	struct iov_iter i;
70	struct iovec iov;
71
72	if (!iter || !iter->count)
73		return -EINVAL;
74
75	iov_for_each(iov, i, *iter) {
76		unsigned long uaddr = (unsigned long) iov.iov_base;
77
78		if (!iov.iov_len)
79			return -EINVAL;
80
81		/*
82		 * Keep going so we check length of all segments
83		 */
84		if (uaddr & queue_dma_alignment(q))
85			unaligned = 1;
86	}
87
88	if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
89		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
90	else
91		bio = bio_map_user_iov(q, iter, gfp_mask);
92
93	if (IS_ERR(bio))
94		return PTR_ERR(bio);
95
96	if (map_data && map_data->null_mapped)
97		bio->bi_flags |= (1 << BIO_NULL_MAPPED);
98
99	if (bio->bi_iter.bi_size != iter->count) {
100		/*
101		 * Grab an extra reference to this bio, as bio_unmap_user()
102		 * expects to be able to drop it twice as it happens on the
103		 * normal IO completion path
104		 */
105		bio_get(bio);
106		bio_endio(bio, 0);
107		__blk_rq_unmap_user(bio);
108		return -EINVAL;
109	}
110
111	if (!bio_flagged(bio, BIO_USER_MAPPED))
112		rq->cmd_flags |= REQ_COPY_USER;
113
114	blk_queue_bounce(q, &bio);
115	bio_get(bio);
116	blk_rq_bio_prep(q, rq, bio);
117	return 0;
118}
119EXPORT_SYMBOL(blk_rq_map_user_iov);
120
121int blk_rq_map_user(struct request_queue *q, struct request *rq,
122		    struct rq_map_data *map_data, void __user *ubuf,
123		    unsigned long len, gfp_t gfp_mask)
124{
125	struct iovec iov;
126	struct iov_iter i;
127	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
128
129	if (unlikely(ret < 0))
130		return ret;
131
132	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
133}
134EXPORT_SYMBOL(blk_rq_map_user);
135
136/**
137 * blk_rq_unmap_user - unmap a request with user data
138 * @bio:	       start of bio list
139 *
140 * Description:
141 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
142 *    supply the original rq->bio from the blk_rq_map_user() return, since
143 *    the I/O completion may have changed rq->bio.
144 */
145int blk_rq_unmap_user(struct bio *bio)
146{
147	struct bio *mapped_bio;
148	int ret = 0, ret2;
149
150	while (bio) {
151		mapped_bio = bio;
152		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
153			mapped_bio = bio->bi_private;
154
155		ret2 = __blk_rq_unmap_user(mapped_bio);
156		if (ret2 && !ret)
157			ret = ret2;
158
159		mapped_bio = bio;
160		bio = bio->bi_next;
161		bio_put(mapped_bio);
162	}
163
164	return ret;
165}
166EXPORT_SYMBOL(blk_rq_unmap_user);
167
168/**
169 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
170 * @q:		request queue where request should be inserted
171 * @rq:		request to fill
172 * @kbuf:	the kernel buffer
173 * @len:	length of user data
174 * @gfp_mask:	memory allocation flags
175 *
176 * Description:
177 *    Data will be mapped directly if possible. Otherwise a bounce
178 *    buffer is used. Can be called multiple times to append multiple
179 *    buffers.
180 */
181int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
182		    unsigned int len, gfp_t gfp_mask)
183{
184	int reading = rq_data_dir(rq) == READ;
185	unsigned long addr = (unsigned long) kbuf;
186	int do_copy = 0;
187	struct bio *bio;
188	int ret;
189
190	if (len > (queue_max_hw_sectors(q) << 9))
191		return -EINVAL;
192	if (!len || !kbuf)
193		return -EINVAL;
194
195	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
196	if (do_copy)
197		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
198	else
199		bio = bio_map_kern(q, kbuf, len, gfp_mask);
200
201	if (IS_ERR(bio))
202		return PTR_ERR(bio);
203
204	if (!reading)
205		bio->bi_rw |= REQ_WRITE;
206
207	if (do_copy)
208		rq->cmd_flags |= REQ_COPY_USER;
209
210	ret = blk_rq_append_bio(q, rq, bio);
211	if (unlikely(ret)) {
212		/* request is too big */
213		bio_put(bio);
214		return ret;
215	}
216
217	blk_queue_bounce(q, &rq->bio);
218	return 0;
219}
220EXPORT_SYMBOL(blk_rq_map_kern);
221