This source file includes following definitions.
- mpage_end_io
- mpage_bio_submit
- mpage_alloc
- map_buffer_to_page
- do_mpage_readpage
- mpage_readpages
- mpage_readpage
- clean_buffers
- clean_page_buffers
- __mpage_writepage
- mpage_writepages
- mpage_writepage
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/mm.h>
19 #include <linux/kdev_t.h>
20 #include <linux/gfp.h>
21 #include <linux/bio.h>
22 #include <linux/fs.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/highmem.h>
26 #include <linux/prefetch.h>
27 #include <linux/mpage.h>
28 #include <linux/mm_inline.h>
29 #include <linux/writeback.h>
30 #include <linux/backing-dev.h>
31 #include <linux/pagevec.h>
32 #include <linux/cleancache.h>
33 #include "internal.h"
34
35
36
37
38
39
40
41
42
43
44
45
46
47 static void mpage_end_io(struct bio *bio)
48 {
49 struct bio_vec *bv;
50 struct bvec_iter_all iter_all;
51
52 bio_for_each_segment_all(bv, bio, iter_all) {
53 struct page *page = bv->bv_page;
54 page_endio(page, bio_op(bio),
55 blk_status_to_errno(bio->bi_status));
56 }
57
58 bio_put(bio);
59 }
60
61 static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
62 {
63 bio->bi_end_io = mpage_end_io;
64 bio_set_op_attrs(bio, op, op_flags);
65 guard_bio_eod(bio);
66 submit_bio(bio);
67 return NULL;
68 }
69
70 static struct bio *
71 mpage_alloc(struct block_device *bdev,
72 sector_t first_sector, int nr_vecs,
73 gfp_t gfp_flags)
74 {
75 struct bio *bio;
76
77
78 gfp_flags &= GFP_KERNEL;
79 bio = bio_alloc(gfp_flags, nr_vecs);
80
81 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
82 while (!bio && (nr_vecs /= 2))
83 bio = bio_alloc(gfp_flags, nr_vecs);
84 }
85
86 if (bio) {
87 bio_set_dev(bio, bdev);
88 bio->bi_iter.bi_sector = first_sector;
89 }
90 return bio;
91 }
92
93
94
95
96
97
98
99
100
101
102
103 static void
104 map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
105 {
106 struct inode *inode = page->mapping->host;
107 struct buffer_head *page_bh, *head;
108 int block = 0;
109
110 if (!page_has_buffers(page)) {
111
112
113
114
115 if (inode->i_blkbits == PAGE_SHIFT &&
116 buffer_uptodate(bh)) {
117 SetPageUptodate(page);
118 return;
119 }
120 create_empty_buffers(page, i_blocksize(inode), 0);
121 }
122 head = page_buffers(page);
123 page_bh = head;
124 do {
125 if (block == page_block) {
126 page_bh->b_state = bh->b_state;
127 page_bh->b_bdev = bh->b_bdev;
128 page_bh->b_blocknr = bh->b_blocknr;
129 break;
130 }
131 page_bh = page_bh->b_this_page;
132 block++;
133 } while (page_bh != head);
134 }
135
136 struct mpage_readpage_args {
137 struct bio *bio;
138 struct page *page;
139 unsigned int nr_pages;
140 bool is_readahead;
141 sector_t last_block_in_bio;
142 struct buffer_head map_bh;
143 unsigned long first_logical_block;
144 get_block_t *get_block;
145 };
146
147
148
149
150
151
152
153
154
155
156 static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
157 {
158 struct page *page = args->page;
159 struct inode *inode = page->mapping->host;
160 const unsigned blkbits = inode->i_blkbits;
161 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
162 const unsigned blocksize = 1 << blkbits;
163 struct buffer_head *map_bh = &args->map_bh;
164 sector_t block_in_file;
165 sector_t last_block;
166 sector_t last_block_in_file;
167 sector_t blocks[MAX_BUF_PER_PAGE];
168 unsigned page_block;
169 unsigned first_hole = blocks_per_page;
170 struct block_device *bdev = NULL;
171 int length;
172 int fully_mapped = 1;
173 int op_flags;
174 unsigned nblocks;
175 unsigned relative_block;
176 gfp_t gfp;
177
178 if (args->is_readahead) {
179 op_flags = REQ_RAHEAD;
180 gfp = readahead_gfp_mask(page->mapping);
181 } else {
182 op_flags = 0;
183 gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
184 }
185
186 if (page_has_buffers(page))
187 goto confused;
188
189 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
190 last_block = block_in_file + args->nr_pages * blocks_per_page;
191 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
192 if (last_block > last_block_in_file)
193 last_block = last_block_in_file;
194 page_block = 0;
195
196
197
198
199 nblocks = map_bh->b_size >> blkbits;
200 if (buffer_mapped(map_bh) &&
201 block_in_file > args->first_logical_block &&
202 block_in_file < (args->first_logical_block + nblocks)) {
203 unsigned map_offset = block_in_file - args->first_logical_block;
204 unsigned last = nblocks - map_offset;
205
206 for (relative_block = 0; ; relative_block++) {
207 if (relative_block == last) {
208 clear_buffer_mapped(map_bh);
209 break;
210 }
211 if (page_block == blocks_per_page)
212 break;
213 blocks[page_block] = map_bh->b_blocknr + map_offset +
214 relative_block;
215 page_block++;
216 block_in_file++;
217 }
218 bdev = map_bh->b_bdev;
219 }
220
221
222
223
224 map_bh->b_page = page;
225 while (page_block < blocks_per_page) {
226 map_bh->b_state = 0;
227 map_bh->b_size = 0;
228
229 if (block_in_file < last_block) {
230 map_bh->b_size = (last_block-block_in_file) << blkbits;
231 if (args->get_block(inode, block_in_file, map_bh, 0))
232 goto confused;
233 args->first_logical_block = block_in_file;
234 }
235
236 if (!buffer_mapped(map_bh)) {
237 fully_mapped = 0;
238 if (first_hole == blocks_per_page)
239 first_hole = page_block;
240 page_block++;
241 block_in_file++;
242 continue;
243 }
244
245
246
247
248
249
250
251 if (buffer_uptodate(map_bh)) {
252 map_buffer_to_page(page, map_bh, page_block);
253 goto confused;
254 }
255
256 if (first_hole != blocks_per_page)
257 goto confused;
258
259
260 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
261 goto confused;
262 nblocks = map_bh->b_size >> blkbits;
263 for (relative_block = 0; ; relative_block++) {
264 if (relative_block == nblocks) {
265 clear_buffer_mapped(map_bh);
266 break;
267 } else if (page_block == blocks_per_page)
268 break;
269 blocks[page_block] = map_bh->b_blocknr+relative_block;
270 page_block++;
271 block_in_file++;
272 }
273 bdev = map_bh->b_bdev;
274 }
275
276 if (first_hole != blocks_per_page) {
277 zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
278 if (first_hole == 0) {
279 SetPageUptodate(page);
280 unlock_page(page);
281 goto out;
282 }
283 } else if (fully_mapped) {
284 SetPageMappedToDisk(page);
285 }
286
287 if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
288 cleancache_get_page(page) == 0) {
289 SetPageUptodate(page);
290 goto confused;
291 }
292
293
294
295
296 if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
297 args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
298
299 alloc_new:
300 if (args->bio == NULL) {
301 if (first_hole == blocks_per_page) {
302 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
303 page))
304 goto out;
305 }
306 args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
307 min_t(int, args->nr_pages,
308 BIO_MAX_PAGES),
309 gfp);
310 if (args->bio == NULL)
311 goto confused;
312 }
313
314 length = first_hole << blkbits;
315 if (bio_add_page(args->bio, page, length, 0) < length) {
316 args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
317 goto alloc_new;
318 }
319
320 relative_block = block_in_file - args->first_logical_block;
321 nblocks = map_bh->b_size >> blkbits;
322 if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
323 (first_hole != blocks_per_page))
324 args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
325 else
326 args->last_block_in_bio = blocks[blocks_per_page - 1];
327 out:
328 return args->bio;
329
330 confused:
331 if (args->bio)
332 args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
333 if (!PageUptodate(page))
334 block_read_full_page(page, args->get_block);
335 else
336 unlock_page(page);
337 goto out;
338 }
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384 int
385 mpage_readpages(struct address_space *mapping, struct list_head *pages,
386 unsigned nr_pages, get_block_t get_block)
387 {
388 struct mpage_readpage_args args = {
389 .get_block = get_block,
390 .is_readahead = true,
391 };
392 unsigned page_idx;
393
394 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
395 struct page *page = lru_to_page(pages);
396
397 prefetchw(&page->flags);
398 list_del(&page->lru);
399 if (!add_to_page_cache_lru(page, mapping,
400 page->index,
401 readahead_gfp_mask(mapping))) {
402 args.page = page;
403 args.nr_pages = nr_pages - page_idx;
404 args.bio = do_mpage_readpage(&args);
405 }
406 put_page(page);
407 }
408 BUG_ON(!list_empty(pages));
409 if (args.bio)
410 mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
411 return 0;
412 }
413 EXPORT_SYMBOL(mpage_readpages);
414
415
416
417
418 int mpage_readpage(struct page *page, get_block_t get_block)
419 {
420 struct mpage_readpage_args args = {
421 .page = page,
422 .nr_pages = 1,
423 .get_block = get_block,
424 };
425
426 args.bio = do_mpage_readpage(&args);
427 if (args.bio)
428 mpage_bio_submit(REQ_OP_READ, 0, args.bio);
429 return 0;
430 }
431 EXPORT_SYMBOL(mpage_readpage);
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450 struct mpage_data {
451 struct bio *bio;
452 sector_t last_block_in_bio;
453 get_block_t *get_block;
454 unsigned use_writepage;
455 };
456
457
458
459
460
461 static void clean_buffers(struct page *page, unsigned first_unmapped)
462 {
463 unsigned buffer_counter = 0;
464 struct buffer_head *bh, *head;
465 if (!page_has_buffers(page))
466 return;
467 head = page_buffers(page);
468 bh = head;
469
470 do {
471 if (buffer_counter++ == first_unmapped)
472 break;
473 clear_buffer_dirty(bh);
474 bh = bh->b_this_page;
475 } while (bh != head);
476
477
478
479
480
481
482 if (buffer_heads_over_limit && PageUptodate(page))
483 try_to_free_buffers(page);
484 }
485
486
487
488
489
490
491 void clean_page_buffers(struct page *page)
492 {
493 clean_buffers(page, ~0U);
494 }
495
496 static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
497 void *data)
498 {
499 struct mpage_data *mpd = data;
500 struct bio *bio = mpd->bio;
501 struct address_space *mapping = page->mapping;
502 struct inode *inode = page->mapping->host;
503 const unsigned blkbits = inode->i_blkbits;
504 unsigned long end_index;
505 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
506 sector_t last_block;
507 sector_t block_in_file;
508 sector_t blocks[MAX_BUF_PER_PAGE];
509 unsigned page_block;
510 unsigned first_unmapped = blocks_per_page;
511 struct block_device *bdev = NULL;
512 int boundary = 0;
513 sector_t boundary_block = 0;
514 struct block_device *boundary_bdev = NULL;
515 int length;
516 struct buffer_head map_bh;
517 loff_t i_size = i_size_read(inode);
518 int ret = 0;
519 int op_flags = wbc_to_write_flags(wbc);
520
521 if (page_has_buffers(page)) {
522 struct buffer_head *head = page_buffers(page);
523 struct buffer_head *bh = head;
524
525
526 page_block = 0;
527 do {
528 BUG_ON(buffer_locked(bh));
529 if (!buffer_mapped(bh)) {
530
531
532
533
534 if (buffer_dirty(bh))
535 goto confused;
536 if (first_unmapped == blocks_per_page)
537 first_unmapped = page_block;
538 continue;
539 }
540
541 if (first_unmapped != blocks_per_page)
542 goto confused;
543
544 if (!buffer_dirty(bh) || !buffer_uptodate(bh))
545 goto confused;
546 if (page_block) {
547 if (bh->b_blocknr != blocks[page_block-1] + 1)
548 goto confused;
549 }
550 blocks[page_block++] = bh->b_blocknr;
551 boundary = buffer_boundary(bh);
552 if (boundary) {
553 boundary_block = bh->b_blocknr;
554 boundary_bdev = bh->b_bdev;
555 }
556 bdev = bh->b_bdev;
557 } while ((bh = bh->b_this_page) != head);
558
559 if (first_unmapped)
560 goto page_is_mapped;
561
562
563
564
565
566
567
568 goto confused;
569 }
570
571
572
573
574 BUG_ON(!PageUptodate(page));
575 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
576 last_block = (i_size - 1) >> blkbits;
577 map_bh.b_page = page;
578 for (page_block = 0; page_block < blocks_per_page; ) {
579
580 map_bh.b_state = 0;
581 map_bh.b_size = 1 << blkbits;
582 if (mpd->get_block(inode, block_in_file, &map_bh, 1))
583 goto confused;
584 if (buffer_new(&map_bh))
585 clean_bdev_bh_alias(&map_bh);
586 if (buffer_boundary(&map_bh)) {
587 boundary_block = map_bh.b_blocknr;
588 boundary_bdev = map_bh.b_bdev;
589 }
590 if (page_block) {
591 if (map_bh.b_blocknr != blocks[page_block-1] + 1)
592 goto confused;
593 }
594 blocks[page_block++] = map_bh.b_blocknr;
595 boundary = buffer_boundary(&map_bh);
596 bdev = map_bh.b_bdev;
597 if (block_in_file == last_block)
598 break;
599 block_in_file++;
600 }
601 BUG_ON(page_block == 0);
602
603 first_unmapped = page_block;
604
605 page_is_mapped:
606 end_index = i_size >> PAGE_SHIFT;
607 if (page->index >= end_index) {
608
609
610
611
612
613
614
615
616 unsigned offset = i_size & (PAGE_SIZE - 1);
617
618 if (page->index > end_index || !offset)
619 goto confused;
620 zero_user_segment(page, offset, PAGE_SIZE);
621 }
622
623
624
625
626 if (bio && mpd->last_block_in_bio != blocks[0] - 1)
627 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
628
629 alloc_new:
630 if (bio == NULL) {
631 if (first_unmapped == blocks_per_page) {
632 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
633 page, wbc))
634 goto out;
635 }
636 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
637 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
638 if (bio == NULL)
639 goto confused;
640
641 wbc_init_bio(wbc, bio);
642 bio->bi_write_hint = inode->i_write_hint;
643 }
644
645
646
647
648
649
650 wbc_account_cgroup_owner(wbc, page, PAGE_SIZE);
651 length = first_unmapped << blkbits;
652 if (bio_add_page(bio, page, length, 0) < length) {
653 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
654 goto alloc_new;
655 }
656
657 clean_buffers(page, first_unmapped);
658
659 BUG_ON(PageWriteback(page));
660 set_page_writeback(page);
661 unlock_page(page);
662 if (boundary || (first_unmapped != blocks_per_page)) {
663 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
664 if (boundary_block) {
665 write_boundary_block(boundary_bdev,
666 boundary_block, 1 << blkbits);
667 }
668 } else {
669 mpd->last_block_in_bio = blocks[blocks_per_page - 1];
670 }
671 goto out;
672
673 confused:
674 if (bio)
675 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
676
677 if (mpd->use_writepage) {
678 ret = mapping->a_ops->writepage(page, wbc);
679 } else {
680 ret = -EAGAIN;
681 goto out;
682 }
683
684
685
686 mapping_set_error(mapping, ret);
687 out:
688 mpd->bio = bio;
689 return ret;
690 }
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711 int
712 mpage_writepages(struct address_space *mapping,
713 struct writeback_control *wbc, get_block_t get_block)
714 {
715 struct blk_plug plug;
716 int ret;
717
718 blk_start_plug(&plug);
719
720 if (!get_block)
721 ret = generic_writepages(mapping, wbc);
722 else {
723 struct mpage_data mpd = {
724 .bio = NULL,
725 .last_block_in_bio = 0,
726 .get_block = get_block,
727 .use_writepage = 1,
728 };
729
730 ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
731 if (mpd.bio) {
732 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
733 REQ_SYNC : 0);
734 mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
735 }
736 }
737 blk_finish_plug(&plug);
738 return ret;
739 }
740 EXPORT_SYMBOL(mpage_writepages);
741
742 int mpage_writepage(struct page *page, get_block_t get_block,
743 struct writeback_control *wbc)
744 {
745 struct mpage_data mpd = {
746 .bio = NULL,
747 .last_block_in_bio = 0,
748 .get_block = get_block,
749 .use_writepage = 0,
750 };
751 int ret = __mpage_writepage(page, wbc, &mpd);
752 if (mpd.bio) {
753 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
754 REQ_SYNC : 0);
755 mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
756 }
757 return ret;
758 }
759 EXPORT_SYMBOL(mpage_writepage);