This source file includes following definitions.
- iomap_dio_iopoll
- iomap_dio_submit_bio
- iomap_dio_complete
- iomap_dio_complete_work
- iomap_dio_set_error
- iomap_dio_bio_end_io
- iomap_dio_zero
- iomap_dio_bio_actor
- iomap_dio_hole_actor
- iomap_dio_inline_actor
- iomap_dio_actor
- iomap_dio_rw
1
2
3
4
5
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/backing-dev.h>
11 #include <linux/uio.h>
12 #include <linux/task_io_accounting_ops.h>
13
14 #include "../internal.h"
15
16
17
18
19
20 #define IOMAP_DIO_WRITE_FUA (1 << 28)
21 #define IOMAP_DIO_NEED_SYNC (1 << 29)
22 #define IOMAP_DIO_WRITE (1 << 30)
23 #define IOMAP_DIO_DIRTY (1 << 31)
24
25 struct iomap_dio {
26 struct kiocb *iocb;
27 const struct iomap_dio_ops *dops;
28 loff_t i_size;
29 loff_t size;
30 atomic_t ref;
31 unsigned flags;
32 int error;
33 bool wait_for_completion;
34
35 union {
36
37 struct {
38 struct iov_iter *iter;
39 struct task_struct *waiter;
40 struct request_queue *last_queue;
41 blk_qc_t cookie;
42 } submit;
43
44
45 struct {
46 struct work_struct work;
47 } aio;
48 };
49 };
50
51 int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
52 {
53 struct request_queue *q = READ_ONCE(kiocb->private);
54
55 if (!q)
56 return 0;
57 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
58 }
59 EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
60
61 static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
62 struct bio *bio)
63 {
64 atomic_inc(&dio->ref);
65
66 if (dio->iocb->ki_flags & IOCB_HIPRI)
67 bio_set_polled(bio, dio->iocb);
68
69 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
70 dio->submit.cookie = submit_bio(bio);
71 }
72
73 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
74 {
75 const struct iomap_dio_ops *dops = dio->dops;
76 struct kiocb *iocb = dio->iocb;
77 struct inode *inode = file_inode(iocb->ki_filp);
78 loff_t offset = iocb->ki_pos;
79 ssize_t ret = dio->error;
80
81 if (dops && dops->end_io)
82 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
83
84 if (likely(!ret)) {
85 ret = dio->size;
86
87 if (offset + ret > dio->i_size &&
88 !(dio->flags & IOMAP_DIO_WRITE))
89 ret = dio->i_size - offset;
90 iocb->ki_pos += ret;
91 }
92
93
94
95
96
97
98
99
100
101
102
103
104
105 if (!dio->error &&
106 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
107 int err;
108 err = invalidate_inode_pages2_range(inode->i_mapping,
109 offset >> PAGE_SHIFT,
110 (offset + dio->size - 1) >> PAGE_SHIFT);
111 if (err)
112 dio_warn_stale_pagecache(iocb->ki_filp);
113 }
114
115
116
117
118
119 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
120 ret = generic_write_sync(iocb, ret);
121
122 inode_dio_end(file_inode(iocb->ki_filp));
123 kfree(dio);
124
125 return ret;
126 }
127
128 static void iomap_dio_complete_work(struct work_struct *work)
129 {
130 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
131 struct kiocb *iocb = dio->iocb;
132
133 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
134 }
135
136
137
138
139
140
141 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
142 {
143 cmpxchg(&dio->error, 0, ret);
144 }
145
146 static void iomap_dio_bio_end_io(struct bio *bio)
147 {
148 struct iomap_dio *dio = bio->bi_private;
149 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
150
151 if (bio->bi_status)
152 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
153
154 if (atomic_dec_and_test(&dio->ref)) {
155 if (dio->wait_for_completion) {
156 struct task_struct *waiter = dio->submit.waiter;
157 WRITE_ONCE(dio->submit.waiter, NULL);
158 blk_wake_io_task(waiter);
159 } else if (dio->flags & IOMAP_DIO_WRITE) {
160 struct inode *inode = file_inode(dio->iocb->ki_filp);
161
162 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
163 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
164 } else {
165 iomap_dio_complete_work(&dio->aio.work);
166 }
167 }
168
169 if (should_dirty) {
170 bio_check_pages_dirty(bio);
171 } else {
172 bio_release_pages(bio, false);
173 bio_put(bio);
174 }
175 }
176
177 static void
178 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
179 unsigned len)
180 {
181 struct page *page = ZERO_PAGE(0);
182 int flags = REQ_SYNC | REQ_IDLE;
183 struct bio *bio;
184
185 bio = bio_alloc(GFP_KERNEL, 1);
186 bio_set_dev(bio, iomap->bdev);
187 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
188 bio->bi_private = dio;
189 bio->bi_end_io = iomap_dio_bio_end_io;
190
191 get_page(page);
192 __bio_add_page(bio, page, len, 0);
193 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
194 iomap_dio_submit_bio(dio, iomap, bio);
195 }
196
197 static loff_t
198 iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
199 struct iomap_dio *dio, struct iomap *iomap)
200 {
201 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
202 unsigned int fs_block_size = i_blocksize(inode), pad;
203 unsigned int align = iov_iter_alignment(dio->submit.iter);
204 struct iov_iter iter;
205 struct bio *bio;
206 bool need_zeroout = false;
207 bool use_fua = false;
208 int nr_pages, ret = 0;
209 size_t copied = 0;
210
211 if ((pos | length | align) & ((1 << blkbits) - 1))
212 return -EINVAL;
213
214 if (iomap->type == IOMAP_UNWRITTEN) {
215 dio->flags |= IOMAP_DIO_UNWRITTEN;
216 need_zeroout = true;
217 }
218
219 if (iomap->flags & IOMAP_F_SHARED)
220 dio->flags |= IOMAP_DIO_COW;
221
222 if (iomap->flags & IOMAP_F_NEW) {
223 need_zeroout = true;
224 } else if (iomap->type == IOMAP_MAPPED) {
225
226
227
228
229
230
231
232 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
233 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
234 blk_queue_fua(bdev_get_queue(iomap->bdev)))
235 use_fua = true;
236 }
237
238
239
240
241
242 iter = *dio->submit.iter;
243 iov_iter_truncate(&iter, length);
244
245 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
246 if (nr_pages <= 0)
247 return nr_pages;
248
249 if (need_zeroout) {
250
251 pad = pos & (fs_block_size - 1);
252 if (pad)
253 iomap_dio_zero(dio, iomap, pos - pad, pad);
254 }
255
256 do {
257 size_t n;
258 if (dio->error) {
259 iov_iter_revert(dio->submit.iter, copied);
260 return 0;
261 }
262
263 bio = bio_alloc(GFP_KERNEL, nr_pages);
264 bio_set_dev(bio, iomap->bdev);
265 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
266 bio->bi_write_hint = dio->iocb->ki_hint;
267 bio->bi_ioprio = dio->iocb->ki_ioprio;
268 bio->bi_private = dio;
269 bio->bi_end_io = iomap_dio_bio_end_io;
270
271 ret = bio_iov_iter_get_pages(bio, &iter);
272 if (unlikely(ret)) {
273
274
275
276
277
278
279 bio_put(bio);
280 goto zero_tail;
281 }
282
283 n = bio->bi_iter.bi_size;
284 if (dio->flags & IOMAP_DIO_WRITE) {
285 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
286 if (use_fua)
287 bio->bi_opf |= REQ_FUA;
288 else
289 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
290 task_io_account_write(n);
291 } else {
292 bio->bi_opf = REQ_OP_READ;
293 if (dio->flags & IOMAP_DIO_DIRTY)
294 bio_set_pages_dirty(bio);
295 }
296
297 iov_iter_advance(dio->submit.iter, n);
298
299 dio->size += n;
300 pos += n;
301 copied += n;
302
303 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
304 iomap_dio_submit_bio(dio, iomap, bio);
305 } while (nr_pages);
306
307
308
309
310
311
312
313 zero_tail:
314 if (need_zeroout ||
315 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
316
317 pad = pos & (fs_block_size - 1);
318 if (pad)
319 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
320 }
321 if (copied)
322 return copied;
323 return ret;
324 }
325
326 static loff_t
327 iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
328 {
329 length = iov_iter_zero(length, dio->submit.iter);
330 dio->size += length;
331 return length;
332 }
333
334 static loff_t
335 iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
336 struct iomap_dio *dio, struct iomap *iomap)
337 {
338 struct iov_iter *iter = dio->submit.iter;
339 size_t copied;
340
341 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
342
343 if (dio->flags & IOMAP_DIO_WRITE) {
344 loff_t size = inode->i_size;
345
346 if (pos > size)
347 memset(iomap->inline_data + size, 0, pos - size);
348 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
349 if (copied) {
350 if (pos + copied > size)
351 i_size_write(inode, pos + copied);
352 mark_inode_dirty(inode);
353 }
354 } else {
355 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
356 }
357 dio->size += copied;
358 return copied;
359 }
360
361 static loff_t
362 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
363 void *data, struct iomap *iomap)
364 {
365 struct iomap_dio *dio = data;
366
367 switch (iomap->type) {
368 case IOMAP_HOLE:
369 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
370 return -EIO;
371 return iomap_dio_hole_actor(length, dio);
372 case IOMAP_UNWRITTEN:
373 if (!(dio->flags & IOMAP_DIO_WRITE))
374 return iomap_dio_hole_actor(length, dio);
375 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
376 case IOMAP_MAPPED:
377 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
378 case IOMAP_INLINE:
379 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
380 default:
381 WARN_ON_ONCE(1);
382 return -EIO;
383 }
384 }
385
386
387
388
389
390
391
392
393
394
395 ssize_t
396 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
397 const struct iomap_ops *ops, const struct iomap_dio_ops *dops)
398 {
399 struct address_space *mapping = iocb->ki_filp->f_mapping;
400 struct inode *inode = file_inode(iocb->ki_filp);
401 size_t count = iov_iter_count(iter);
402 loff_t pos = iocb->ki_pos, start = pos;
403 loff_t end = iocb->ki_pos + count - 1, ret = 0;
404 unsigned int flags = IOMAP_DIRECT;
405 bool wait_for_completion = is_sync_kiocb(iocb);
406 struct blk_plug plug;
407 struct iomap_dio *dio;
408
409 lockdep_assert_held(&inode->i_rwsem);
410
411 if (!count)
412 return 0;
413
414 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
415 if (!dio)
416 return -ENOMEM;
417
418 dio->iocb = iocb;
419 atomic_set(&dio->ref, 1);
420 dio->size = 0;
421 dio->i_size = i_size_read(inode);
422 dio->dops = dops;
423 dio->error = 0;
424 dio->flags = 0;
425
426 dio->submit.iter = iter;
427 dio->submit.waiter = current;
428 dio->submit.cookie = BLK_QC_T_NONE;
429 dio->submit.last_queue = NULL;
430
431 if (iov_iter_rw(iter) == READ) {
432 if (pos >= dio->i_size)
433 goto out_free_dio;
434
435 if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
436 dio->flags |= IOMAP_DIO_DIRTY;
437 } else {
438 flags |= IOMAP_WRITE;
439 dio->flags |= IOMAP_DIO_WRITE;
440
441
442 if (iocb->ki_flags & IOCB_DSYNC)
443 dio->flags |= IOMAP_DIO_NEED_SYNC;
444
445
446
447
448
449
450
451 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
452 dio->flags |= IOMAP_DIO_WRITE_FUA;
453 }
454
455 if (iocb->ki_flags & IOCB_NOWAIT) {
456 if (filemap_range_has_page(mapping, start, end)) {
457 ret = -EAGAIN;
458 goto out_free_dio;
459 }
460 flags |= IOMAP_NOWAIT;
461 }
462
463 ret = filemap_write_and_wait_range(mapping, start, end);
464 if (ret)
465 goto out_free_dio;
466
467
468
469
470
471
472
473 ret = invalidate_inode_pages2_range(mapping,
474 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
475 if (ret)
476 dio_warn_stale_pagecache(iocb->ki_filp);
477 ret = 0;
478
479 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
480 !inode->i_sb->s_dio_done_wq) {
481 ret = sb_init_dio_done_wq(inode->i_sb);
482 if (ret < 0)
483 goto out_free_dio;
484 }
485
486 inode_dio_begin(inode);
487
488 blk_start_plug(&plug);
489 do {
490 ret = iomap_apply(inode, pos, count, flags, ops, dio,
491 iomap_dio_actor);
492 if (ret <= 0) {
493
494 if (ret == -ENOTBLK) {
495 wait_for_completion = true;
496 ret = 0;
497 }
498 break;
499 }
500 pos += ret;
501
502 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
503
504
505
506
507
508 iov_iter_revert(iter, pos - dio->i_size);
509 break;
510 }
511 } while ((count = iov_iter_count(iter)) > 0);
512 blk_finish_plug(&plug);
513
514 if (ret < 0)
515 iomap_dio_set_error(dio, ret);
516
517
518
519
520
521 if (dio->flags & IOMAP_DIO_WRITE_FUA)
522 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
523
524 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
525 WRITE_ONCE(iocb->private, dio->submit.last_queue);
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542 dio->wait_for_completion = wait_for_completion;
543 if (!atomic_dec_and_test(&dio->ref)) {
544 if (!wait_for_completion)
545 return -EIOCBQUEUED;
546
547 for (;;) {
548 set_current_state(TASK_UNINTERRUPTIBLE);
549 if (!READ_ONCE(dio->submit.waiter))
550 break;
551
552 if (!(iocb->ki_flags & IOCB_HIPRI) ||
553 !dio->submit.last_queue ||
554 !blk_poll(dio->submit.last_queue,
555 dio->submit.cookie, true))
556 io_schedule();
557 }
558 __set_current_state(TASK_RUNNING);
559 }
560
561 return iomap_dio_complete(dio);
562
563 out_free_dio:
564 kfree(dio);
565 return ret;
566 }
567 EXPORT_SYMBOL_GPL(iomap_dio_rw);