This source file includes following definitions.
- dio_pages_present
- dio_refill_pages
- dio_get_page
- dio_warn_stale_pagecache
- dio_complete
- dio_aio_complete_work
- dio_bio_end_aio
- dio_bio_end_io
- dio_end_io
- dio_bio_alloc
- dio_bio_submit
- dio_cleanup
- dio_await_one
- dio_bio_complete
- dio_await_completion
- dio_bio_reap
- sb_init_dio_done_wq
- dio_set_defer_completion
- get_more_blocks
- dio_new_bio
- dio_bio_add_page
- dio_send_cur_page
- submit_page_section
- dio_zero_block
- do_direct_IO
- drop_refcount
- do_blockdev_direct_IO
- __blockdev_direct_IO
- dio_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/fs.h>
27 #include <linux/mm.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
30 #include <linux/pagemap.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/bio.h>
33 #include <linux/wait.h>
34 #include <linux/err.h>
35 #include <linux/blkdev.h>
36 #include <linux/buffer_head.h>
37 #include <linux/rwsem.h>
38 #include <linux/uio.h>
39 #include <linux/atomic.h>
40 #include <linux/prefetch.h>
41
42
43
44
45
46 #define DIO_PAGES 64
47
48
49
50
51 #define DIO_COMPLETE_ASYNC 0x01
52 #define DIO_COMPLETE_INVALIDATE 0x02
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68 struct dio_submit {
69 struct bio *bio;
70 unsigned blkbits;
71 unsigned blkfactor;
72
73
74
75
76 unsigned start_zero_done;
77
78
79 int pages_in_io;
80 sector_t block_in_file;
81
82 unsigned blocks_available;
83 int reap_counter;
84 sector_t final_block_in_request;
85 int boundary;
86 get_block_t *get_block;
87 dio_submit_t *submit_io;
88
89 loff_t logical_offset_in_bio;
90 sector_t final_block_in_bio;
91 sector_t next_block_for_io;
92
93
94
95
96
97
98
99 struct page *cur_page;
100 unsigned cur_page_offset;
101 unsigned cur_page_len;
102 sector_t cur_page_block;
103 loff_t cur_page_fs_offset;
104
105 struct iov_iter *iter;
106
107
108
109
110 unsigned head;
111 unsigned tail;
112 size_t from, to;
113 };
114
115
116 struct dio {
117 int flags;
118 int op;
119 int op_flags;
120 blk_qc_t bio_cookie;
121 struct gendisk *bio_disk;
122 struct inode *inode;
123 loff_t i_size;
124 dio_iodone_t *end_io;
125
126 void *private;
127
128
129 spinlock_t bio_lock;
130 int page_errors;
131 int is_async;
132 bool defer_completion;
133 bool should_dirty;
134 int io_error;
135 unsigned long refcount;
136 struct bio *bio_list;
137 struct task_struct *waiter;
138
139
140 struct kiocb *iocb;
141 ssize_t result;
142
143
144
145
146
147
148 union {
149 struct page *pages[DIO_PAGES];
150 struct work_struct complete_work;
151 };
152 } ____cacheline_aligned_in_smp;
153
154 static struct kmem_cache *dio_cache __read_mostly;
155
156
157
158
159 static inline unsigned dio_pages_present(struct dio_submit *sdio)
160 {
161 return sdio->tail - sdio->head;
162 }
163
164
165
166
167 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
168 {
169 ssize_t ret;
170
171 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
172 &sdio->from);
173
174 if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
175 struct page *page = ZERO_PAGE(0);
176
177
178
179
180
181 if (dio->page_errors == 0)
182 dio->page_errors = ret;
183 get_page(page);
184 dio->pages[0] = page;
185 sdio->head = 0;
186 sdio->tail = 1;
187 sdio->from = 0;
188 sdio->to = PAGE_SIZE;
189 return 0;
190 }
191
192 if (ret >= 0) {
193 iov_iter_advance(sdio->iter, ret);
194 ret += sdio->from;
195 sdio->head = 0;
196 sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
197 sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
198 return 0;
199 }
200 return ret;
201 }
202
203
204
205
206
207
208
209 static inline struct page *dio_get_page(struct dio *dio,
210 struct dio_submit *sdio)
211 {
212 if (dio_pages_present(sdio) == 0) {
213 int ret;
214
215 ret = dio_refill_pages(dio, sdio);
216 if (ret)
217 return ERR_PTR(ret);
218 BUG_ON(dio_pages_present(sdio) == 0);
219 }
220 return dio->pages[sdio->head];
221 }
222
223
224
225
226 void dio_warn_stale_pagecache(struct file *filp)
227 {
228 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
229 char pathname[128];
230 struct inode *inode = file_inode(filp);
231 char *path;
232
233 errseq_set(&inode->i_mapping->wb_err, -EIO);
234 if (__ratelimit(&_rs)) {
235 path = file_path(filp, pathname, sizeof(pathname));
236 if (IS_ERR(path))
237 path = "(unknown)";
238 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
239 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
240 current->comm);
241 }
242 }
243
244
245
246
247
248
249
250
251
252
253
254
255 static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
256 {
257 loff_t offset = dio->iocb->ki_pos;
258 ssize_t transferred = 0;
259 int err;
260
261
262
263
264
265
266
267 if (ret == -EIOCBQUEUED)
268 ret = 0;
269
270 if (dio->result) {
271 transferred = dio->result;
272
273
274 if ((dio->op == REQ_OP_READ) &&
275 ((offset + transferred) > dio->i_size))
276 transferred = dio->i_size - offset;
277
278 if (unlikely(ret == -EFAULT) && transferred)
279 ret = 0;
280 }
281
282 if (ret == 0)
283 ret = dio->page_errors;
284 if (ret == 0)
285 ret = dio->io_error;
286 if (ret == 0)
287 ret = transferred;
288
289 if (dio->end_io) {
290
291 err = dio->end_io(dio->iocb, offset, ret, dio->private);
292 if (err)
293 ret = err;
294 }
295
296
297
298
299
300
301
302
303
304
305
306
307
308 if (flags & DIO_COMPLETE_INVALIDATE &&
309 ret > 0 && dio->op == REQ_OP_WRITE &&
310 dio->inode->i_mapping->nrpages) {
311 err = invalidate_inode_pages2_range(dio->inode->i_mapping,
312 offset >> PAGE_SHIFT,
313 (offset + ret - 1) >> PAGE_SHIFT);
314 if (err)
315 dio_warn_stale_pagecache(dio->iocb->ki_filp);
316 }
317
318 inode_dio_end(dio->inode);
319
320 if (flags & DIO_COMPLETE_ASYNC) {
321
322
323
324
325
326 dio->iocb->ki_pos += transferred;
327
328 if (ret > 0 && dio->op == REQ_OP_WRITE)
329 ret = generic_write_sync(dio->iocb, ret);
330 dio->iocb->ki_complete(dio->iocb, ret, 0);
331 }
332
333 kmem_cache_free(dio_cache, dio);
334 return ret;
335 }
336
337 static void dio_aio_complete_work(struct work_struct *work)
338 {
339 struct dio *dio = container_of(work, struct dio, complete_work);
340
341 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
342 }
343
344 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
345
346
347
348
349 static void dio_bio_end_aio(struct bio *bio)
350 {
351 struct dio *dio = bio->bi_private;
352 unsigned long remaining;
353 unsigned long flags;
354 bool defer_completion = false;
355
356
357 dio_bio_complete(dio, bio);
358
359 spin_lock_irqsave(&dio->bio_lock, flags);
360 remaining = --dio->refcount;
361 if (remaining == 1 && dio->waiter)
362 wake_up_process(dio->waiter);
363 spin_unlock_irqrestore(&dio->bio_lock, flags);
364
365 if (remaining == 0) {
366
367
368
369
370
371
372
373
374 if (dio->result)
375 defer_completion = dio->defer_completion ||
376 (dio->op == REQ_OP_WRITE &&
377 dio->inode->i_mapping->nrpages);
378 if (defer_completion) {
379 INIT_WORK(&dio->complete_work, dio_aio_complete_work);
380 queue_work(dio->inode->i_sb->s_dio_done_wq,
381 &dio->complete_work);
382 } else {
383 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
384 }
385 }
386 }
387
388
389
390
391
392
393
394
395 static void dio_bio_end_io(struct bio *bio)
396 {
397 struct dio *dio = bio->bi_private;
398 unsigned long flags;
399
400 spin_lock_irqsave(&dio->bio_lock, flags);
401 bio->bi_private = dio->bio_list;
402 dio->bio_list = bio;
403 if (--dio->refcount == 1 && dio->waiter)
404 wake_up_process(dio->waiter);
405 spin_unlock_irqrestore(&dio->bio_lock, flags);
406 }
407
408
409
410
411
412
413
414
415
416 void dio_end_io(struct bio *bio)
417 {
418 struct dio *dio = bio->bi_private;
419
420 if (dio->is_async)
421 dio_bio_end_aio(bio);
422 else
423 dio_bio_end_io(bio);
424 }
425 EXPORT_SYMBOL_GPL(dio_end_io);
426
427 static inline void
428 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
429 struct block_device *bdev,
430 sector_t first_sector, int nr_vecs)
431 {
432 struct bio *bio;
433
434
435
436
437
438 bio = bio_alloc(GFP_KERNEL, nr_vecs);
439
440 bio_set_dev(bio, bdev);
441 bio->bi_iter.bi_sector = first_sector;
442 bio_set_op_attrs(bio, dio->op, dio->op_flags);
443 if (dio->is_async)
444 bio->bi_end_io = dio_bio_end_aio;
445 else
446 bio->bi_end_io = dio_bio_end_io;
447
448 bio->bi_write_hint = dio->iocb->ki_hint;
449
450 sdio->bio = bio;
451 sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
452 }
453
454
455
456
457
458
459
460
461 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
462 {
463 struct bio *bio = sdio->bio;
464 unsigned long flags;
465
466 bio->bi_private = dio;
467
468 spin_lock_irqsave(&dio->bio_lock, flags);
469 dio->refcount++;
470 spin_unlock_irqrestore(&dio->bio_lock, flags);
471
472 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
473 bio_set_pages_dirty(bio);
474
475 dio->bio_disk = bio->bi_disk;
476
477 if (sdio->submit_io) {
478 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
479 dio->bio_cookie = BLK_QC_T_NONE;
480 } else
481 dio->bio_cookie = submit_bio(bio);
482
483 sdio->bio = NULL;
484 sdio->boundary = 0;
485 sdio->logical_offset_in_bio = 0;
486 }
487
488
489
490
491 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
492 {
493 while (sdio->head < sdio->tail)
494 put_page(dio->pages[sdio->head++]);
495 }
496
497
498
499
500
501
502
503 static struct bio *dio_await_one(struct dio *dio)
504 {
505 unsigned long flags;
506 struct bio *bio = NULL;
507
508 spin_lock_irqsave(&dio->bio_lock, flags);
509
510
511
512
513
514
515
516 while (dio->refcount > 1 && dio->bio_list == NULL) {
517 __set_current_state(TASK_UNINTERRUPTIBLE);
518 dio->waiter = current;
519 spin_unlock_irqrestore(&dio->bio_lock, flags);
520 if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
521 !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
522 io_schedule();
523
524 spin_lock_irqsave(&dio->bio_lock, flags);
525 dio->waiter = NULL;
526 }
527 if (dio->bio_list) {
528 bio = dio->bio_list;
529 dio->bio_list = bio->bi_private;
530 }
531 spin_unlock_irqrestore(&dio->bio_lock, flags);
532 return bio;
533 }
534
535
536
537
538 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
539 {
540 blk_status_t err = bio->bi_status;
541 bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty;
542
543 if (err) {
544 if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
545 dio->io_error = -EAGAIN;
546 else
547 dio->io_error = -EIO;
548 }
549
550 if (dio->is_async && should_dirty) {
551 bio_check_pages_dirty(bio);
552 } else {
553 bio_release_pages(bio, should_dirty);
554 bio_put(bio);
555 }
556 return err;
557 }
558
559
560
561
562
563
564
565
566 static void dio_await_completion(struct dio *dio)
567 {
568 struct bio *bio;
569 do {
570 bio = dio_await_one(dio);
571 if (bio)
572 dio_bio_complete(dio, bio);
573 } while (bio);
574 }
575
576
577
578
579
580
581
582
583 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
584 {
585 int ret = 0;
586
587 if (sdio->reap_counter++ >= 64) {
588 while (dio->bio_list) {
589 unsigned long flags;
590 struct bio *bio;
591 int ret2;
592
593 spin_lock_irqsave(&dio->bio_lock, flags);
594 bio = dio->bio_list;
595 dio->bio_list = bio->bi_private;
596 spin_unlock_irqrestore(&dio->bio_lock, flags);
597 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
598 if (ret == 0)
599 ret = ret2;
600 }
601 sdio->reap_counter = 0;
602 }
603 return ret;
604 }
605
606
607
608
609
610
611
612 int sb_init_dio_done_wq(struct super_block *sb)
613 {
614 struct workqueue_struct *old;
615 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
616 WQ_MEM_RECLAIM, 0,
617 sb->s_id);
618 if (!wq)
619 return -ENOMEM;
620
621
622
623 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
624
625 if (old)
626 destroy_workqueue(wq);
627 return 0;
628 }
629
630 static int dio_set_defer_completion(struct dio *dio)
631 {
632 struct super_block *sb = dio->inode->i_sb;
633
634 if (dio->defer_completion)
635 return 0;
636 dio->defer_completion = true;
637 if (!sb->s_dio_done_wq)
638 return sb_init_dio_done_wq(sb);
639 return 0;
640 }
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
666 struct buffer_head *map_bh)
667 {
668 int ret;
669 sector_t fs_startblk;
670 sector_t fs_endblk;
671 unsigned long fs_count;
672 int create;
673 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
674 loff_t i_size;
675
676
677
678
679
680 ret = dio->page_errors;
681 if (ret == 0) {
682 BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
683 fs_startblk = sdio->block_in_file >> sdio->blkfactor;
684 fs_endblk = (sdio->final_block_in_request - 1) >>
685 sdio->blkfactor;
686 fs_count = fs_endblk - fs_startblk + 1;
687
688 map_bh->b_state = 0;
689 map_bh->b_size = fs_count << i_blkbits;
690
691
692
693
694
695
696
697
698
699
700
701
702 create = dio->op == REQ_OP_WRITE;
703 if (dio->flags & DIO_SKIP_HOLES) {
704 i_size = i_size_read(dio->inode);
705 if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
706 create = 0;
707 }
708
709 ret = (*sdio->get_block)(dio->inode, fs_startblk,
710 map_bh, create);
711
712
713 dio->private = map_bh->b_private;
714
715 if (ret == 0 && buffer_defer_completion(map_bh))
716 ret = dio_set_defer_completion(dio);
717 }
718 return ret;
719 }
720
721
722
723
724 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
725 sector_t start_sector, struct buffer_head *map_bh)
726 {
727 sector_t sector;
728 int ret, nr_pages;
729
730 ret = dio_bio_reap(dio, sdio);
731 if (ret)
732 goto out;
733 sector = start_sector << (sdio->blkbits - 9);
734 nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
735 BUG_ON(nr_pages <= 0);
736 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
737 sdio->boundary = 0;
738 out:
739 return ret;
740 }
741
742
743
744
745
746
747
748
749 static inline int dio_bio_add_page(struct dio_submit *sdio)
750 {
751 int ret;
752
753 ret = bio_add_page(sdio->bio, sdio->cur_page,
754 sdio->cur_page_len, sdio->cur_page_offset);
755 if (ret == sdio->cur_page_len) {
756
757
758
759 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
760 sdio->pages_in_io--;
761 get_page(sdio->cur_page);
762 sdio->final_block_in_bio = sdio->cur_page_block +
763 (sdio->cur_page_len >> sdio->blkbits);
764 ret = 0;
765 } else {
766 ret = 1;
767 }
768 return ret;
769 }
770
771
772
773
774
775
776
777
778
779
780
781 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
782 struct buffer_head *map_bh)
783 {
784 int ret = 0;
785
786 if (sdio->bio) {
787 loff_t cur_offset = sdio->cur_page_fs_offset;
788 loff_t bio_next_offset = sdio->logical_offset_in_bio +
789 sdio->bio->bi_iter.bi_size;
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805 if (sdio->final_block_in_bio != sdio->cur_page_block ||
806 cur_offset != bio_next_offset)
807 dio_bio_submit(dio, sdio);
808 }
809
810 if (sdio->bio == NULL) {
811 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
812 if (ret)
813 goto out;
814 }
815
816 if (dio_bio_add_page(sdio) != 0) {
817 dio_bio_submit(dio, sdio);
818 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
819 if (ret == 0) {
820 ret = dio_bio_add_page(sdio);
821 BUG_ON(ret != 0);
822 }
823 }
824 out:
825 return ret;
826 }
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845 static inline int
846 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
847 unsigned offset, unsigned len, sector_t blocknr,
848 struct buffer_head *map_bh)
849 {
850 int ret = 0;
851
852 if (dio->op == REQ_OP_WRITE) {
853
854
855
856 task_io_account_write(len);
857 }
858
859
860
861
862 if (sdio->cur_page == page &&
863 sdio->cur_page_offset + sdio->cur_page_len == offset &&
864 sdio->cur_page_block +
865 (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
866 sdio->cur_page_len += len;
867 goto out;
868 }
869
870
871
872
873 if (sdio->cur_page) {
874 ret = dio_send_cur_page(dio, sdio, map_bh);
875 put_page(sdio->cur_page);
876 sdio->cur_page = NULL;
877 if (ret)
878 return ret;
879 }
880
881 get_page(page);
882 sdio->cur_page = page;
883 sdio->cur_page_offset = offset;
884 sdio->cur_page_len = len;
885 sdio->cur_page_block = blocknr;
886 sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
887 out:
888
889
890
891
892 if (sdio->boundary) {
893 ret = dio_send_cur_page(dio, sdio, map_bh);
894 if (sdio->bio)
895 dio_bio_submit(dio, sdio);
896 put_page(sdio->cur_page);
897 sdio->cur_page = NULL;
898 }
899 return ret;
900 }
901
902
903
904
905
906
907
908
909
910
911 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
912 int end, struct buffer_head *map_bh)
913 {
914 unsigned dio_blocks_per_fs_block;
915 unsigned this_chunk_blocks;
916 unsigned this_chunk_bytes;
917 struct page *page;
918
919 sdio->start_zero_done = 1;
920 if (!sdio->blkfactor || !buffer_new(map_bh))
921 return;
922
923 dio_blocks_per_fs_block = 1 << sdio->blkfactor;
924 this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
925
926 if (!this_chunk_blocks)
927 return;
928
929
930
931
932
933 if (end)
934 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
935
936 this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
937
938 page = ZERO_PAGE(0);
939 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
940 sdio->next_block_for_io, map_bh))
941 return;
942
943 sdio->next_block_for_io += this_chunk_blocks;
944 }
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
963 struct buffer_head *map_bh)
964 {
965 const unsigned blkbits = sdio->blkbits;
966 const unsigned i_blkbits = blkbits + sdio->blkfactor;
967 int ret = 0;
968
969 while (sdio->block_in_file < sdio->final_block_in_request) {
970 struct page *page;
971 size_t from, to;
972
973 page = dio_get_page(dio, sdio);
974 if (IS_ERR(page)) {
975 ret = PTR_ERR(page);
976 goto out;
977 }
978 from = sdio->head ? 0 : sdio->from;
979 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
980 sdio->head++;
981
982 while (from < to) {
983 unsigned this_chunk_bytes;
984 unsigned this_chunk_blocks;
985 unsigned u;
986
987 if (sdio->blocks_available == 0) {
988
989
990
991 unsigned long blkmask;
992 unsigned long dio_remainder;
993
994 ret = get_more_blocks(dio, sdio, map_bh);
995 if (ret) {
996 put_page(page);
997 goto out;
998 }
999 if (!buffer_mapped(map_bh))
1000 goto do_holes;
1001
1002 sdio->blocks_available =
1003 map_bh->b_size >> blkbits;
1004 sdio->next_block_for_io =
1005 map_bh->b_blocknr << sdio->blkfactor;
1006 if (buffer_new(map_bh)) {
1007 clean_bdev_aliases(
1008 map_bh->b_bdev,
1009 map_bh->b_blocknr,
1010 map_bh->b_size >> i_blkbits);
1011 }
1012
1013 if (!sdio->blkfactor)
1014 goto do_holes;
1015
1016 blkmask = (1 << sdio->blkfactor) - 1;
1017 dio_remainder = (sdio->block_in_file & blkmask);
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 if (!buffer_new(map_bh))
1031 sdio->next_block_for_io += dio_remainder;
1032 sdio->blocks_available -= dio_remainder;
1033 }
1034 do_holes:
1035
1036 if (!buffer_mapped(map_bh)) {
1037 loff_t i_size_aligned;
1038
1039
1040 if (dio->op == REQ_OP_WRITE) {
1041 put_page(page);
1042 return -ENOTBLK;
1043 }
1044
1045
1046
1047
1048
1049 i_size_aligned = ALIGN(i_size_read(dio->inode),
1050 1 << blkbits);
1051 if (sdio->block_in_file >=
1052 i_size_aligned >> blkbits) {
1053
1054 put_page(page);
1055 goto out;
1056 }
1057 zero_user(page, from, 1 << blkbits);
1058 sdio->block_in_file++;
1059 from += 1 << blkbits;
1060 dio->result += 1 << blkbits;
1061 goto next_block;
1062 }
1063
1064
1065
1066
1067
1068
1069 if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
1070 dio_zero_block(dio, sdio, 0, map_bh);
1071
1072
1073
1074
1075
1076 this_chunk_blocks = sdio->blocks_available;
1077 u = (to - from) >> blkbits;
1078 if (this_chunk_blocks > u)
1079 this_chunk_blocks = u;
1080 u = sdio->final_block_in_request - sdio->block_in_file;
1081 if (this_chunk_blocks > u)
1082 this_chunk_blocks = u;
1083 this_chunk_bytes = this_chunk_blocks << blkbits;
1084 BUG_ON(this_chunk_bytes == 0);
1085
1086 if (this_chunk_blocks == sdio->blocks_available)
1087 sdio->boundary = buffer_boundary(map_bh);
1088 ret = submit_page_section(dio, sdio, page,
1089 from,
1090 this_chunk_bytes,
1091 sdio->next_block_for_io,
1092 map_bh);
1093 if (ret) {
1094 put_page(page);
1095 goto out;
1096 }
1097 sdio->next_block_for_io += this_chunk_blocks;
1098
1099 sdio->block_in_file += this_chunk_blocks;
1100 from += this_chunk_bytes;
1101 dio->result += this_chunk_bytes;
1102 sdio->blocks_available -= this_chunk_blocks;
1103 next_block:
1104 BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
1105 if (sdio->block_in_file == sdio->final_block_in_request)
1106 break;
1107 }
1108
1109
1110 put_page(page);
1111 }
1112 out:
1113 return ret;
1114 }
1115
1116 static inline int drop_refcount(struct dio *dio)
1117 {
1118 int ret2;
1119 unsigned long flags;
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132 spin_lock_irqsave(&dio->bio_lock, flags);
1133 ret2 = --dio->refcount;
1134 spin_unlock_irqrestore(&dio->bio_lock, flags);
1135 return ret2;
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 static inline ssize_t
1164 do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1165 struct block_device *bdev, struct iov_iter *iter,
1166 get_block_t get_block, dio_iodone_t end_io,
1167 dio_submit_t submit_io, int flags)
1168 {
1169 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
1170 unsigned blkbits = i_blkbits;
1171 unsigned blocksize_mask = (1 << blkbits) - 1;
1172 ssize_t retval = -EINVAL;
1173 const size_t count = iov_iter_count(iter);
1174 loff_t offset = iocb->ki_pos;
1175 const loff_t end = offset + count;
1176 struct dio *dio;
1177 struct dio_submit sdio = { 0, };
1178 struct buffer_head map_bh = { 0, };
1179 struct blk_plug plug;
1180 unsigned long align = offset | iov_iter_alignment(iter);
1181
1182
1183
1184
1185
1186
1187 if (align & blocksize_mask) {
1188 if (bdev)
1189 blkbits = blksize_bits(bdev_logical_block_size(bdev));
1190 blocksize_mask = (1 << blkbits) - 1;
1191 if (align & blocksize_mask)
1192 goto out;
1193 }
1194
1195
1196 if (iov_iter_rw(iter) == READ && !count)
1197 return 0;
1198
1199 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
1200 retval = -ENOMEM;
1201 if (!dio)
1202 goto out;
1203
1204
1205
1206
1207
1208 memset(dio, 0, offsetof(struct dio, pages));
1209
1210 dio->flags = flags;
1211 if (dio->flags & DIO_LOCKING) {
1212 if (iov_iter_rw(iter) == READ) {
1213 struct address_space *mapping =
1214 iocb->ki_filp->f_mapping;
1215
1216
1217 inode_lock(inode);
1218
1219 retval = filemap_write_and_wait_range(mapping, offset,
1220 end - 1);
1221 if (retval) {
1222 inode_unlock(inode);
1223 kmem_cache_free(dio_cache, dio);
1224 goto out;
1225 }
1226 }
1227 }
1228
1229
1230 dio->i_size = i_size_read(inode);
1231 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1232 if (dio->flags & DIO_LOCKING)
1233 inode_unlock(inode);
1234 kmem_cache_free(dio_cache, dio);
1235 retval = 0;
1236 goto out;
1237 }
1238
1239
1240
1241
1242
1243
1244
1245 if (is_sync_kiocb(iocb))
1246 dio->is_async = false;
1247 else if (iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
1248 dio->is_async = false;
1249 else
1250 dio->is_async = true;
1251
1252 dio->inode = inode;
1253 if (iov_iter_rw(iter) == WRITE) {
1254 dio->op = REQ_OP_WRITE;
1255 dio->op_flags = REQ_SYNC | REQ_IDLE;
1256 if (iocb->ki_flags & IOCB_NOWAIT)
1257 dio->op_flags |= REQ_NOWAIT;
1258 } else {
1259 dio->op = REQ_OP_READ;
1260 }
1261 if (iocb->ki_flags & IOCB_HIPRI)
1262 dio->op_flags |= REQ_HIPRI;
1263
1264
1265
1266
1267
1268 if (dio->is_async && iov_iter_rw(iter) == WRITE) {
1269 retval = 0;
1270 if (iocb->ki_flags & IOCB_DSYNC)
1271 retval = dio_set_defer_completion(dio);
1272 else if (!dio->inode->i_sb->s_dio_done_wq) {
1273
1274
1275
1276
1277
1278 retval = sb_init_dio_done_wq(dio->inode->i_sb);
1279 }
1280 if (retval) {
1281
1282
1283
1284
1285 kmem_cache_free(dio_cache, dio);
1286 goto out;
1287 }
1288 }
1289
1290
1291
1292
1293 inode_dio_begin(inode);
1294
1295 retval = 0;
1296 sdio.blkbits = blkbits;
1297 sdio.blkfactor = i_blkbits - blkbits;
1298 sdio.block_in_file = offset >> blkbits;
1299
1300 sdio.get_block = get_block;
1301 dio->end_io = end_io;
1302 sdio.submit_io = submit_io;
1303 sdio.final_block_in_bio = -1;
1304 sdio.next_block_for_io = -1;
1305
1306 dio->iocb = iocb;
1307
1308 spin_lock_init(&dio->bio_lock);
1309 dio->refcount = 1;
1310
1311 dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
1312 sdio.iter = iter;
1313 sdio.final_block_in_request = end >> blkbits;
1314
1315
1316
1317
1318
1319 if (unlikely(sdio.blkfactor))
1320 sdio.pages_in_io = 2;
1321
1322 sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
1323
1324 blk_start_plug(&plug);
1325
1326 retval = do_direct_IO(dio, &sdio, &map_bh);
1327 if (retval)
1328 dio_cleanup(dio, &sdio);
1329
1330 if (retval == -ENOTBLK) {
1331
1332
1333
1334
1335 retval = 0;
1336 }
1337
1338
1339
1340
1341 dio_zero_block(dio, &sdio, 1, &map_bh);
1342
1343 if (sdio.cur_page) {
1344 ssize_t ret2;
1345
1346 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1347 if (retval == 0)
1348 retval = ret2;
1349 put_page(sdio.cur_page);
1350 sdio.cur_page = NULL;
1351 }
1352 if (sdio.bio)
1353 dio_bio_submit(dio, &sdio);
1354
1355 blk_finish_plug(&plug);
1356
1357
1358
1359
1360
1361 dio_cleanup(dio, &sdio);
1362
1363
1364
1365
1366
1367
1368 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
1369 inode_unlock(dio->inode);
1370
1371
1372
1373
1374
1375
1376
1377
1378 BUG_ON(retval == -EIOCBQUEUED);
1379 if (dio->is_async && retval == 0 && dio->result &&
1380 (iov_iter_rw(iter) == READ || dio->result == count))
1381 retval = -EIOCBQUEUED;
1382 else
1383 dio_await_completion(dio);
1384
1385 if (drop_refcount(dio) == 0) {
1386 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1387 } else
1388 BUG_ON(retval != -EIOCBQUEUED);
1389
1390 out:
1391 return retval;
1392 }
1393
1394 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1395 struct block_device *bdev, struct iov_iter *iter,
1396 get_block_t get_block,
1397 dio_iodone_t end_io, dio_submit_t submit_io,
1398 int flags)
1399 {
1400
1401
1402
1403
1404
1405
1406
1407
1408 prefetch(&bdev->bd_disk->part_tbl);
1409 prefetch(bdev->bd_queue);
1410 prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
1411
1412 return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
1413 end_io, submit_io, flags);
1414 }
1415
1416 EXPORT_SYMBOL(__blockdev_direct_IO);
1417
1418 static __init int dio_init(void)
1419 {
1420 dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
1421 return 0;
1422 }
1423 module_init(dio_init)