This source file includes following definitions.
- read_block
- do_readpage
- release_new_page_budget
- release_existing_page_budget
- write_begin_slow
- allocate_budget
- ubifs_write_begin
- cancel_budget
- ubifs_write_end
- populate_page
- ubifs_do_bulk_read
- ubifs_bulk_read
- ubifs_readpage
- do_writepage
- ubifs_writepage
- do_attr_changes
- do_truncation
- do_setattr
- ubifs_setattr
- ubifs_invalidatepage
- ubifs_fsync
- mctime_update_needed
- ubifs_update_time
- update_mctime
- ubifs_write_iter
- ubifs_set_page_dirty
- ubifs_migrate_page
- ubifs_releasepage
- ubifs_vm_page_mkwrite
- ubifs_file_mmap
- ubifs_get_link
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 #include "ubifs.h"
41 #include <linux/mount.h>
42 #include <linux/slab.h>
43 #include <linux/migrate.h>
44
45 static int read_block(struct inode *inode, void *addr, unsigned int block,
46 struct ubifs_data_node *dn)
47 {
48 struct ubifs_info *c = inode->i_sb->s_fs_info;
49 int err, len, out_len;
50 union ubifs_key key;
51 unsigned int dlen;
52
53 data_key_init(c, &key, inode->i_ino, block);
54 err = ubifs_tnc_lookup(c, &key, dn);
55 if (err) {
56 if (err == -ENOENT)
57
58 memset(addr, 0, UBIFS_BLOCK_SIZE);
59 return err;
60 }
61
62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
63 ubifs_inode(inode)->creat_sqnum);
64 len = le32_to_cpu(dn->size);
65 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
66 goto dump;
67
68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
69
70 if (ubifs_crypt_is_encrypted(inode)) {
71 err = ubifs_decrypt(inode, dn, &dlen, block);
72 if (err)
73 goto dump;
74 }
75
76 out_len = UBIFS_BLOCK_SIZE;
77 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
78 le16_to_cpu(dn->compr_type));
79 if (err || len != out_len)
80 goto dump;
81
82
83
84
85
86
87 if (len < UBIFS_BLOCK_SIZE)
88 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
89
90 return 0;
91
92 dump:
93 ubifs_err(c, "bad data node (block %u, inode %lu)",
94 block, inode->i_ino);
95 ubifs_dump_node(c, dn);
96 return -EINVAL;
97 }
98
99 static int do_readpage(struct page *page)
100 {
101 void *addr;
102 int err = 0, i;
103 unsigned int block, beyond;
104 struct ubifs_data_node *dn;
105 struct inode *inode = page->mapping->host;
106 struct ubifs_info *c = inode->i_sb->s_fs_info;
107 loff_t i_size = i_size_read(inode);
108
109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
110 inode->i_ino, page->index, i_size, page->flags);
111 ubifs_assert(c, !PageChecked(page));
112 ubifs_assert(c, !PagePrivate(page));
113
114 addr = kmap(page);
115
116 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
117 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
118 if (block >= beyond) {
119
120 SetPageChecked(page);
121 memset(addr, 0, PAGE_SIZE);
122 goto out;
123 }
124
125 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
126 if (!dn) {
127 err = -ENOMEM;
128 goto error;
129 }
130
131 i = 0;
132 while (1) {
133 int ret;
134
135 if (block >= beyond) {
136
137 err = -ENOENT;
138 memset(addr, 0, UBIFS_BLOCK_SIZE);
139 } else {
140 ret = read_block(inode, addr, block, dn);
141 if (ret) {
142 err = ret;
143 if (err != -ENOENT)
144 break;
145 } else if (block + 1 == beyond) {
146 int dlen = le32_to_cpu(dn->size);
147 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
148
149 if (ilen && ilen < dlen)
150 memset(addr + ilen, 0, dlen - ilen);
151 }
152 }
153 if (++i >= UBIFS_BLOCKS_PER_PAGE)
154 break;
155 block += 1;
156 addr += UBIFS_BLOCK_SIZE;
157 }
158 if (err) {
159 struct ubifs_info *c = inode->i_sb->s_fs_info;
160 if (err == -ENOENT) {
161
162 SetPageChecked(page);
163 dbg_gen("hole");
164 goto out_free;
165 }
166 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
167 page->index, inode->i_ino, err);
168 goto error;
169 }
170
171 out_free:
172 kfree(dn);
173 out:
174 SetPageUptodate(page);
175 ClearPageError(page);
176 flush_dcache_page(page);
177 kunmap(page);
178 return 0;
179
180 error:
181 kfree(dn);
182 ClearPageUptodate(page);
183 SetPageError(page);
184 flush_dcache_page(page);
185 kunmap(page);
186 return err;
187 }
188
189
190
191
192
193
194
195
196 static void release_new_page_budget(struct ubifs_info *c)
197 {
198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
199
200 ubifs_release_budget(c, &req);
201 }
202
203
204
205
206
207
208
209
210 static void release_existing_page_budget(struct ubifs_info *c)
211 {
212 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
213
214 ubifs_release_budget(c, &req);
215 }
216
217 static int write_begin_slow(struct address_space *mapping,
218 loff_t pos, unsigned len, struct page **pagep,
219 unsigned flags)
220 {
221 struct inode *inode = mapping->host;
222 struct ubifs_info *c = inode->i_sb->s_fs_info;
223 pgoff_t index = pos >> PAGE_SHIFT;
224 struct ubifs_budget_req req = { .new_page = 1 };
225 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
226 struct page *page;
227
228 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
229 inode->i_ino, pos, len, inode->i_size);
230
231
232
233
234
235
236
237
238
239 if (appending)
240
241 req.dirtied_ino = 1;
242
243 err = ubifs_budget_space(c, &req);
244 if (unlikely(err))
245 return err;
246
247 page = grab_cache_page_write_begin(mapping, index, flags);
248 if (unlikely(!page)) {
249 ubifs_release_budget(c, &req);
250 return -ENOMEM;
251 }
252
253 if (!PageUptodate(page)) {
254 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
255 SetPageChecked(page);
256 else {
257 err = do_readpage(page);
258 if (err) {
259 unlock_page(page);
260 put_page(page);
261 ubifs_release_budget(c, &req);
262 return err;
263 }
264 }
265
266 SetPageUptodate(page);
267 ClearPageError(page);
268 }
269
270 if (PagePrivate(page))
271
272
273
274
275
276
277
278
279
280
281 release_new_page_budget(c);
282 else if (!PageChecked(page))
283
284
285
286
287
288
289 ubifs_convert_page_budget(c);
290
291 if (appending) {
292 struct ubifs_inode *ui = ubifs_inode(inode);
293
294
295
296
297
298
299 mutex_lock(&ui->ui_mutex);
300 if (ui->dirty)
301
302
303
304
305 ubifs_release_dirty_inode_budget(c, ui);
306 }
307
308 *pagep = page;
309 return 0;
310 }
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325 static int allocate_budget(struct ubifs_info *c, struct page *page,
326 struct ubifs_inode *ui, int appending)
327 {
328 struct ubifs_budget_req req = { .fast = 1 };
329
330 if (PagePrivate(page)) {
331 if (!appending)
332
333
334
335
336 return 0;
337
338 mutex_lock(&ui->ui_mutex);
339 if (ui->dirty)
340
341
342
343
344
345
346
347
348
349 return 0;
350
351
352
353
354
355 req.dirtied_ino = 1;
356 } else {
357 if (PageChecked(page))
358
359
360
361
362
363
364
365 req.new_page = 1;
366 else
367
368
369
370
371
372 req.dirtied_page = 1;
373
374 if (appending) {
375 mutex_lock(&ui->ui_mutex);
376 if (!ui->dirty)
377
378
379
380
381
382 req.dirtied_ino = 1;
383 }
384 }
385
386 return ubifs_budget_space(c, &req);
387 }
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421 static int ubifs_write_begin(struct file *file, struct address_space *mapping,
422 loff_t pos, unsigned len, unsigned flags,
423 struct page **pagep, void **fsdata)
424 {
425 struct inode *inode = mapping->host;
426 struct ubifs_info *c = inode->i_sb->s_fs_info;
427 struct ubifs_inode *ui = ubifs_inode(inode);
428 pgoff_t index = pos >> PAGE_SHIFT;
429 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
430 int skipped_read = 0;
431 struct page *page;
432
433 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
434 ubifs_assert(c, !c->ro_media && !c->ro_mount);
435
436 if (unlikely(c->ro_error))
437 return -EROFS;
438
439
440 page = grab_cache_page_write_begin(mapping, index, flags);
441 if (unlikely(!page))
442 return -ENOMEM;
443
444 if (!PageUptodate(page)) {
445
446 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
447
448
449
450
451
452
453
454
455
456 SetPageChecked(page);
457 skipped_read = 1;
458 } else {
459 err = do_readpage(page);
460 if (err) {
461 unlock_page(page);
462 put_page(page);
463 return err;
464 }
465 }
466
467 SetPageUptodate(page);
468 ClearPageError(page);
469 }
470
471 err = allocate_budget(c, page, ui, appending);
472 if (unlikely(err)) {
473 ubifs_assert(c, err == -ENOSPC);
474
475
476
477
478 if (skipped_read) {
479 ClearPageChecked(page);
480 ClearPageUptodate(page);
481 }
482
483
484
485
486
487
488
489 if (appending) {
490 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
491 mutex_unlock(&ui->ui_mutex);
492 }
493 unlock_page(page);
494 put_page(page);
495
496 return write_begin_slow(mapping, pos, len, pagep, flags);
497 }
498
499
500
501
502
503
504
505 *pagep = page;
506 return 0;
507
508 }
509
510
511
512
513
514
515
516
517
518
519
520 static void cancel_budget(struct ubifs_info *c, struct page *page,
521 struct ubifs_inode *ui, int appending)
522 {
523 if (appending) {
524 if (!ui->dirty)
525 ubifs_release_dirty_inode_budget(c, ui);
526 mutex_unlock(&ui->ui_mutex);
527 }
528 if (!PagePrivate(page)) {
529 if (PageChecked(page))
530 release_new_page_budget(c);
531 else
532 release_existing_page_budget(c);
533 }
534 }
535
536 static int ubifs_write_end(struct file *file, struct address_space *mapping,
537 loff_t pos, unsigned len, unsigned copied,
538 struct page *page, void *fsdata)
539 {
540 struct inode *inode = mapping->host;
541 struct ubifs_inode *ui = ubifs_inode(inode);
542 struct ubifs_info *c = inode->i_sb->s_fs_info;
543 loff_t end_pos = pos + len;
544 int appending = !!(end_pos > inode->i_size);
545
546 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
547 inode->i_ino, pos, page->index, len, copied, inode->i_size);
548
549 if (unlikely(copied < len && len == PAGE_SIZE)) {
550
551
552
553
554
555
556
557
558
559 dbg_gen("copied %d instead of %d, read page and repeat",
560 copied, len);
561 cancel_budget(c, page, ui, appending);
562 ClearPageChecked(page);
563
564
565
566
567
568 copied = do_readpage(page);
569 goto out;
570 }
571
572 if (!PagePrivate(page)) {
573 SetPagePrivate(page);
574 atomic_long_inc(&c->dirty_pg_cnt);
575 __set_page_dirty_nobuffers(page);
576 }
577
578 if (appending) {
579 i_size_write(inode, end_pos);
580 ui->ui_size = end_pos;
581
582
583
584
585
586 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
587 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
588 mutex_unlock(&ui->ui_mutex);
589 }
590
591 out:
592 unlock_page(page);
593 put_page(page);
594 return copied;
595 }
596
597
598
599
600
601
602
603
604
605
606 static int populate_page(struct ubifs_info *c, struct page *page,
607 struct bu_info *bu, int *n)
608 {
609 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
610 struct inode *inode = page->mapping->host;
611 loff_t i_size = i_size_read(inode);
612 unsigned int page_block;
613 void *addr, *zaddr;
614 pgoff_t end_index;
615
616 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
617 inode->i_ino, page->index, i_size, page->flags);
618
619 addr = zaddr = kmap(page);
620
621 end_index = (i_size - 1) >> PAGE_SHIFT;
622 if (!i_size || page->index > end_index) {
623 hole = 1;
624 memset(addr, 0, PAGE_SIZE);
625 goto out_hole;
626 }
627
628 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
629 while (1) {
630 int err, len, out_len, dlen;
631
632 if (nn >= bu->cnt) {
633 hole = 1;
634 memset(addr, 0, UBIFS_BLOCK_SIZE);
635 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
636 struct ubifs_data_node *dn;
637
638 dn = bu->buf + (bu->zbranch[nn].offs - offs);
639
640 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
641 ubifs_inode(inode)->creat_sqnum);
642
643 len = le32_to_cpu(dn->size);
644 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
645 goto out_err;
646
647 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
648 out_len = UBIFS_BLOCK_SIZE;
649
650 if (ubifs_crypt_is_encrypted(inode)) {
651 err = ubifs_decrypt(inode, dn, &dlen, page_block);
652 if (err)
653 goto out_err;
654 }
655
656 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
657 le16_to_cpu(dn->compr_type));
658 if (err || len != out_len)
659 goto out_err;
660
661 if (len < UBIFS_BLOCK_SIZE)
662 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
663
664 nn += 1;
665 read = (i << UBIFS_BLOCK_SHIFT) + len;
666 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
667 nn += 1;
668 continue;
669 } else {
670 hole = 1;
671 memset(addr, 0, UBIFS_BLOCK_SIZE);
672 }
673 if (++i >= UBIFS_BLOCKS_PER_PAGE)
674 break;
675 addr += UBIFS_BLOCK_SIZE;
676 page_block += 1;
677 }
678
679 if (end_index == page->index) {
680 int len = i_size & (PAGE_SIZE - 1);
681
682 if (len && len < read)
683 memset(zaddr + len, 0, read - len);
684 }
685
686 out_hole:
687 if (hole) {
688 SetPageChecked(page);
689 dbg_gen("hole");
690 }
691
692 SetPageUptodate(page);
693 ClearPageError(page);
694 flush_dcache_page(page);
695 kunmap(page);
696 *n = nn;
697 return 0;
698
699 out_err:
700 ClearPageUptodate(page);
701 SetPageError(page);
702 flush_dcache_page(page);
703 kunmap(page);
704 ubifs_err(c, "bad data node (block %u, inode %lu)",
705 page_block, inode->i_ino);
706 return -EINVAL;
707 }
708
709
710
711
712
713
714
715
716
717 static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
718 struct page *page1)
719 {
720 pgoff_t offset = page1->index, end_index;
721 struct address_space *mapping = page1->mapping;
722 struct inode *inode = mapping->host;
723 struct ubifs_inode *ui = ubifs_inode(inode);
724 int err, page_idx, page_cnt, ret = 0, n = 0;
725 int allocate = bu->buf ? 0 : 1;
726 loff_t isize;
727 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
728
729 err = ubifs_tnc_get_bu_keys(c, bu);
730 if (err)
731 goto out_warn;
732
733 if (bu->eof) {
734
735 ui->read_in_a_row = 1;
736 ui->bulk_read = 0;
737 }
738
739 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
740 if (!page_cnt) {
741
742
743
744
745
746
747 goto out_bu_off;
748 }
749
750 if (bu->cnt) {
751 if (allocate) {
752
753
754
755
756 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
757 bu->zbranch[bu->cnt - 1].len -
758 bu->zbranch[0].offs;
759 ubifs_assert(c, bu->buf_len > 0);
760 ubifs_assert(c, bu->buf_len <= c->leb_size);
761 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
762 if (!bu->buf)
763 goto out_bu_off;
764 }
765
766 err = ubifs_tnc_bulk_read(c, bu);
767 if (err)
768 goto out_warn;
769 }
770
771 err = populate_page(c, page1, bu, &n);
772 if (err)
773 goto out_warn;
774
775 unlock_page(page1);
776 ret = 1;
777
778 isize = i_size_read(inode);
779 if (isize == 0)
780 goto out_free;
781 end_index = ((isize - 1) >> PAGE_SHIFT);
782
783 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
784 pgoff_t page_offset = offset + page_idx;
785 struct page *page;
786
787 if (page_offset > end_index)
788 break;
789 page = pagecache_get_page(mapping, page_offset,
790 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
791 ra_gfp_mask);
792 if (!page)
793 break;
794 if (!PageUptodate(page))
795 err = populate_page(c, page, bu, &n);
796 unlock_page(page);
797 put_page(page);
798 if (err)
799 break;
800 }
801
802 ui->last_page_read = offset + page_idx - 1;
803
804 out_free:
805 if (allocate)
806 kfree(bu->buf);
807 return ret;
808
809 out_warn:
810 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
811 goto out_free;
812
813 out_bu_off:
814 ui->read_in_a_row = ui->bulk_read = 0;
815 goto out_free;
816 }
817
818
819
820
821
822
823
824
825
826
827 static int ubifs_bulk_read(struct page *page)
828 {
829 struct inode *inode = page->mapping->host;
830 struct ubifs_info *c = inode->i_sb->s_fs_info;
831 struct ubifs_inode *ui = ubifs_inode(inode);
832 pgoff_t index = page->index, last_page_read = ui->last_page_read;
833 struct bu_info *bu;
834 int err = 0, allocated = 0;
835
836 ui->last_page_read = index;
837 if (!c->bulk_read)
838 return 0;
839
840
841
842
843
844 if (!mutex_trylock(&ui->ui_mutex))
845 return 0;
846
847 if (index != last_page_read + 1) {
848
849 ui->read_in_a_row = 1;
850 if (ui->bulk_read)
851 ui->bulk_read = 0;
852 goto out_unlock;
853 }
854
855 if (!ui->bulk_read) {
856 ui->read_in_a_row += 1;
857 if (ui->read_in_a_row < 3)
858 goto out_unlock;
859
860 ui->bulk_read = 1;
861 }
862
863
864
865
866
867 if (mutex_trylock(&c->bu_mutex))
868 bu = &c->bu;
869 else {
870 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
871 if (!bu)
872 goto out_unlock;
873
874 bu->buf = NULL;
875 allocated = 1;
876 }
877
878 bu->buf_len = c->max_bu_buf_len;
879 data_key_init(c, &bu->key, inode->i_ino,
880 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
881 err = ubifs_do_bulk_read(c, bu, page);
882
883 if (!allocated)
884 mutex_unlock(&c->bu_mutex);
885 else
886 kfree(bu);
887
888 out_unlock:
889 mutex_unlock(&ui->ui_mutex);
890 return err;
891 }
892
893 static int ubifs_readpage(struct file *file, struct page *page)
894 {
895 if (ubifs_bulk_read(page))
896 return 0;
897 do_readpage(page);
898 unlock_page(page);
899 return 0;
900 }
901
902 static int do_writepage(struct page *page, int len)
903 {
904 int err = 0, i, blen;
905 unsigned int block;
906 void *addr;
907 union ubifs_key key;
908 struct inode *inode = page->mapping->host;
909 struct ubifs_info *c = inode->i_sb->s_fs_info;
910
911 #ifdef UBIFS_DEBUG
912 struct ubifs_inode *ui = ubifs_inode(inode);
913 spin_lock(&ui->ui_lock);
914 ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT);
915 spin_unlock(&ui->ui_lock);
916 #endif
917
918
919 set_page_writeback(page);
920
921 addr = kmap(page);
922 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
923 i = 0;
924 while (len) {
925 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
926 data_key_init(c, &key, inode->i_ino, block);
927 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
928 if (err)
929 break;
930 if (++i >= UBIFS_BLOCKS_PER_PAGE)
931 break;
932 block += 1;
933 addr += blen;
934 len -= blen;
935 }
936 if (err) {
937 SetPageError(page);
938 ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
939 page->index, inode->i_ino, err);
940 ubifs_ro_mode(c, err);
941 }
942
943 ubifs_assert(c, PagePrivate(page));
944 if (PageChecked(page))
945 release_new_page_budget(c);
946 else
947 release_existing_page_budget(c);
948
949 atomic_long_dec(&c->dirty_pg_cnt);
950 ClearPagePrivate(page);
951 ClearPageChecked(page);
952
953 kunmap(page);
954 unlock_page(page);
955 end_page_writeback(page);
956 return err;
957 }
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005 static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1006 {
1007 struct inode *inode = page->mapping->host;
1008 struct ubifs_info *c = inode->i_sb->s_fs_info;
1009 struct ubifs_inode *ui = ubifs_inode(inode);
1010 loff_t i_size = i_size_read(inode), synced_i_size;
1011 pgoff_t end_index = i_size >> PAGE_SHIFT;
1012 int err, len = i_size & (PAGE_SIZE - 1);
1013 void *kaddr;
1014
1015 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1016 inode->i_ino, page->index, page->flags);
1017 ubifs_assert(c, PagePrivate(page));
1018
1019
1020 if (page->index > end_index || (page->index == end_index && !len)) {
1021 err = 0;
1022 goto out_unlock;
1023 }
1024
1025 spin_lock(&ui->ui_lock);
1026 synced_i_size = ui->synced_i_size;
1027 spin_unlock(&ui->ui_lock);
1028
1029
1030 if (page->index < end_index) {
1031 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1032 err = inode->i_sb->s_op->write_inode(inode, NULL);
1033 if (err)
1034 goto out_unlock;
1035
1036
1037
1038
1039
1040
1041
1042
1043 }
1044 return do_writepage(page, PAGE_SIZE);
1045 }
1046
1047
1048
1049
1050
1051
1052
1053
1054 kaddr = kmap_atomic(page);
1055 memset(kaddr + len, 0, PAGE_SIZE - len);
1056 flush_dcache_page(page);
1057 kunmap_atomic(kaddr);
1058
1059 if (i_size > synced_i_size) {
1060 err = inode->i_sb->s_op->write_inode(inode, NULL);
1061 if (err)
1062 goto out_unlock;
1063 }
1064
1065 return do_writepage(page, len);
1066
1067 out_unlock:
1068 unlock_page(page);
1069 return err;
1070 }
1071
1072
1073
1074
1075
1076
1077 static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1078 {
1079 if (attr->ia_valid & ATTR_UID)
1080 inode->i_uid = attr->ia_uid;
1081 if (attr->ia_valid & ATTR_GID)
1082 inode->i_gid = attr->ia_gid;
1083 if (attr->ia_valid & ATTR_ATIME)
1084 inode->i_atime = attr->ia_atime;
1085 if (attr->ia_valid & ATTR_MTIME)
1086 inode->i_mtime = attr->ia_mtime;
1087 if (attr->ia_valid & ATTR_CTIME)
1088 inode->i_ctime = attr->ia_ctime;
1089 if (attr->ia_valid & ATTR_MODE) {
1090 umode_t mode = attr->ia_mode;
1091
1092 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1093 mode &= ~S_ISGID;
1094 inode->i_mode = mode;
1095 }
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 static int do_truncation(struct ubifs_info *c, struct inode *inode,
1109 const struct iattr *attr)
1110 {
1111 int err;
1112 struct ubifs_budget_req req;
1113 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1114 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1115 struct ubifs_inode *ui = ubifs_inode(inode);
1116
1117 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1118 memset(&req, 0, sizeof(struct ubifs_budget_req));
1119
1120
1121
1122
1123
1124
1125 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1126 req.dirtied_page = 1;
1127
1128 req.dirtied_ino = 1;
1129
1130 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1131 err = ubifs_budget_space(c, &req);
1132 if (err) {
1133
1134
1135
1136
1137 if (new_size || err != -ENOSPC)
1138 return err;
1139 budgeted = 0;
1140 }
1141
1142 truncate_setsize(inode, new_size);
1143
1144 if (offset) {
1145 pgoff_t index = new_size >> PAGE_SHIFT;
1146 struct page *page;
1147
1148 page = find_lock_page(inode->i_mapping, index);
1149 if (page) {
1150 if (PageDirty(page)) {
1151
1152
1153
1154
1155
1156
1157
1158
1159 ubifs_assert(c, PagePrivate(page));
1160
1161 clear_page_dirty_for_io(page);
1162 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1163 offset = new_size &
1164 (PAGE_SIZE - 1);
1165 err = do_writepage(page, offset);
1166 put_page(page);
1167 if (err)
1168 goto out_budg;
1169
1170
1171
1172
1173 } else {
1174
1175
1176
1177
1178
1179 unlock_page(page);
1180 put_page(page);
1181 }
1182 }
1183 }
1184
1185 mutex_lock(&ui->ui_mutex);
1186 ui->ui_size = inode->i_size;
1187
1188 inode->i_mtime = inode->i_ctime = current_time(inode);
1189
1190 do_attr_changes(inode, attr);
1191 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1192 mutex_unlock(&ui->ui_mutex);
1193
1194 out_budg:
1195 if (budgeted)
1196 ubifs_release_budget(c, &req);
1197 else {
1198 c->bi.nospace = c->bi.nospace_rp = 0;
1199 smp_wmb();
1200 }
1201 return err;
1202 }
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214 static int do_setattr(struct ubifs_info *c, struct inode *inode,
1215 const struct iattr *attr)
1216 {
1217 int err, release;
1218 loff_t new_size = attr->ia_size;
1219 struct ubifs_inode *ui = ubifs_inode(inode);
1220 struct ubifs_budget_req req = { .dirtied_ino = 1,
1221 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1222
1223 err = ubifs_budget_space(c, &req);
1224 if (err)
1225 return err;
1226
1227 if (attr->ia_valid & ATTR_SIZE) {
1228 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1229 truncate_setsize(inode, new_size);
1230 }
1231
1232 mutex_lock(&ui->ui_mutex);
1233 if (attr->ia_valid & ATTR_SIZE) {
1234
1235 inode->i_mtime = inode->i_ctime = current_time(inode);
1236
1237 ui->ui_size = inode->i_size;
1238 }
1239
1240 do_attr_changes(inode, attr);
1241
1242 release = ui->dirty;
1243 if (attr->ia_valid & ATTR_SIZE)
1244
1245
1246
1247
1248 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1249 else
1250 mark_inode_dirty_sync(inode);
1251 mutex_unlock(&ui->ui_mutex);
1252
1253 if (release)
1254 ubifs_release_budget(c, &req);
1255 if (IS_SYNC(inode))
1256 err = inode->i_sb->s_op->write_inode(inode, NULL);
1257 return err;
1258 }
1259
1260 int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
1261 {
1262 int err;
1263 struct inode *inode = d_inode(dentry);
1264 struct ubifs_info *c = inode->i_sb->s_fs_info;
1265
1266 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1267 inode->i_ino, inode->i_mode, attr->ia_valid);
1268 err = setattr_prepare(dentry, attr);
1269 if (err)
1270 return err;
1271
1272 err = dbg_check_synced_i_size(c, inode);
1273 if (err)
1274 return err;
1275
1276 err = fscrypt_prepare_setattr(dentry, attr);
1277 if (err)
1278 return err;
1279
1280 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1281
1282 err = do_truncation(c, inode, attr);
1283 else
1284 err = do_setattr(c, inode, attr);
1285
1286 return err;
1287 }
1288
1289 static void ubifs_invalidatepage(struct page *page, unsigned int offset,
1290 unsigned int length)
1291 {
1292 struct inode *inode = page->mapping->host;
1293 struct ubifs_info *c = inode->i_sb->s_fs_info;
1294
1295 ubifs_assert(c, PagePrivate(page));
1296 if (offset || length < PAGE_SIZE)
1297
1298 return;
1299
1300 if (PageChecked(page))
1301 release_new_page_budget(c);
1302 else
1303 release_existing_page_budget(c);
1304
1305 atomic_long_dec(&c->dirty_pg_cnt);
1306 ClearPagePrivate(page);
1307 ClearPageChecked(page);
1308 }
1309
1310 int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1311 {
1312 struct inode *inode = file->f_mapping->host;
1313 struct ubifs_info *c = inode->i_sb->s_fs_info;
1314 int err;
1315
1316 dbg_gen("syncing inode %lu", inode->i_ino);
1317
1318 if (c->ro_mount)
1319
1320
1321
1322
1323 return 0;
1324
1325 err = file_write_and_wait_range(file, start, end);
1326 if (err)
1327 return err;
1328 inode_lock(inode);
1329
1330
1331 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1332 err = inode->i_sb->s_op->write_inode(inode, NULL);
1333 if (err)
1334 goto out;
1335 }
1336
1337
1338
1339
1340
1341 err = ubifs_sync_wbufs_by_inode(c, inode);
1342 out:
1343 inode_unlock(inode);
1344 return err;
1345 }
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 static inline int mctime_update_needed(const struct inode *inode,
1357 const struct timespec64 *now)
1358 {
1359 if (!timespec64_equal(&inode->i_mtime, now) ||
1360 !timespec64_equal(&inode->i_ctime, now))
1361 return 1;
1362 return 0;
1363 }
1364
1365
1366
1367
1368
1369
1370
1371 int ubifs_update_time(struct inode *inode, struct timespec64 *time,
1372 int flags)
1373 {
1374 struct ubifs_inode *ui = ubifs_inode(inode);
1375 struct ubifs_info *c = inode->i_sb->s_fs_info;
1376 struct ubifs_budget_req req = { .dirtied_ino = 1,
1377 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1378 int err, release;
1379
1380 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1381 return generic_update_time(inode, time, flags);
1382
1383 err = ubifs_budget_space(c, &req);
1384 if (err)
1385 return err;
1386
1387 mutex_lock(&ui->ui_mutex);
1388 if (flags & S_ATIME)
1389 inode->i_atime = *time;
1390 if (flags & S_CTIME)
1391 inode->i_ctime = *time;
1392 if (flags & S_MTIME)
1393 inode->i_mtime = *time;
1394
1395 release = ui->dirty;
1396 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1397 mutex_unlock(&ui->ui_mutex);
1398 if (release)
1399 ubifs_release_budget(c, &req);
1400 return 0;
1401 }
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 static int update_mctime(struct inode *inode)
1412 {
1413 struct timespec64 now = current_time(inode);
1414 struct ubifs_inode *ui = ubifs_inode(inode);
1415 struct ubifs_info *c = inode->i_sb->s_fs_info;
1416
1417 if (mctime_update_needed(inode, &now)) {
1418 int err, release;
1419 struct ubifs_budget_req req = { .dirtied_ino = 1,
1420 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1421
1422 err = ubifs_budget_space(c, &req);
1423 if (err)
1424 return err;
1425
1426 mutex_lock(&ui->ui_mutex);
1427 inode->i_mtime = inode->i_ctime = current_time(inode);
1428 release = ui->dirty;
1429 mark_inode_dirty_sync(inode);
1430 mutex_unlock(&ui->ui_mutex);
1431 if (release)
1432 ubifs_release_budget(c, &req);
1433 }
1434
1435 return 0;
1436 }
1437
1438 static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1439 {
1440 int err = update_mctime(file_inode(iocb->ki_filp));
1441 if (err)
1442 return err;
1443
1444 return generic_file_write_iter(iocb, from);
1445 }
1446
1447 static int ubifs_set_page_dirty(struct page *page)
1448 {
1449 int ret;
1450 struct inode *inode = page->mapping->host;
1451 struct ubifs_info *c = inode->i_sb->s_fs_info;
1452
1453 ret = __set_page_dirty_nobuffers(page);
1454
1455
1456
1457
1458 ubifs_assert(c, ret == 0);
1459 return ret;
1460 }
1461
1462 #ifdef CONFIG_MIGRATION
1463 static int ubifs_migrate_page(struct address_space *mapping,
1464 struct page *newpage, struct page *page, enum migrate_mode mode)
1465 {
1466 int rc;
1467
1468 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
1469 if (rc != MIGRATEPAGE_SUCCESS)
1470 return rc;
1471
1472 if (PagePrivate(page)) {
1473 ClearPagePrivate(page);
1474 SetPagePrivate(newpage);
1475 }
1476
1477 if (mode != MIGRATE_SYNC_NO_COPY)
1478 migrate_page_copy(newpage, page);
1479 else
1480 migrate_page_states(newpage, page);
1481 return MIGRATEPAGE_SUCCESS;
1482 }
1483 #endif
1484
1485 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1486 {
1487 struct inode *inode = page->mapping->host;
1488 struct ubifs_info *c = inode->i_sb->s_fs_info;
1489
1490
1491
1492
1493
1494 if (PageWriteback(page))
1495 return 0;
1496 ubifs_assert(c, PagePrivate(page));
1497 ubifs_assert(c, 0);
1498 ClearPagePrivate(page);
1499 ClearPageChecked(page);
1500 return 1;
1501 }
1502
1503
1504
1505
1506
1507 static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1508 {
1509 struct page *page = vmf->page;
1510 struct inode *inode = file_inode(vmf->vma->vm_file);
1511 struct ubifs_info *c = inode->i_sb->s_fs_info;
1512 struct timespec64 now = current_time(inode);
1513 struct ubifs_budget_req req = { .new_page = 1 };
1514 int err, update_time;
1515
1516 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1517 i_size_read(inode));
1518 ubifs_assert(c, !c->ro_media && !c->ro_mount);
1519
1520 if (unlikely(c->ro_error))
1521 return VM_FAULT_SIGBUS;
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 update_time = mctime_update_needed(inode, &now);
1542 if (update_time)
1543
1544
1545
1546
1547 req.dirtied_ino = 1;
1548
1549 err = ubifs_budget_space(c, &req);
1550 if (unlikely(err)) {
1551 if (err == -ENOSPC)
1552 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1553 inode->i_ino);
1554 return VM_FAULT_SIGBUS;
1555 }
1556
1557 lock_page(page);
1558 if (unlikely(page->mapping != inode->i_mapping ||
1559 page_offset(page) > i_size_read(inode))) {
1560
1561 goto sigbus;
1562 }
1563
1564 if (PagePrivate(page))
1565 release_new_page_budget(c);
1566 else {
1567 if (!PageChecked(page))
1568 ubifs_convert_page_budget(c);
1569 SetPagePrivate(page);
1570 atomic_long_inc(&c->dirty_pg_cnt);
1571 __set_page_dirty_nobuffers(page);
1572 }
1573
1574 if (update_time) {
1575 int release;
1576 struct ubifs_inode *ui = ubifs_inode(inode);
1577
1578 mutex_lock(&ui->ui_mutex);
1579 inode->i_mtime = inode->i_ctime = current_time(inode);
1580 release = ui->dirty;
1581 mark_inode_dirty_sync(inode);
1582 mutex_unlock(&ui->ui_mutex);
1583 if (release)
1584 ubifs_release_dirty_inode_budget(c, ui);
1585 }
1586
1587 wait_for_stable_page(page);
1588 return VM_FAULT_LOCKED;
1589
1590 sigbus:
1591 unlock_page(page);
1592 ubifs_release_budget(c, &req);
1593 return VM_FAULT_SIGBUS;
1594 }
1595
1596 static const struct vm_operations_struct ubifs_file_vm_ops = {
1597 .fault = filemap_fault,
1598 .map_pages = filemap_map_pages,
1599 .page_mkwrite = ubifs_vm_page_mkwrite,
1600 };
1601
1602 static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1603 {
1604 int err;
1605
1606 err = generic_file_mmap(file, vma);
1607 if (err)
1608 return err;
1609 vma->vm_ops = &ubifs_file_vm_ops;
1610
1611 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1612 file_accessed(file);
1613
1614 return 0;
1615 }
1616
1617 static const char *ubifs_get_link(struct dentry *dentry,
1618 struct inode *inode,
1619 struct delayed_call *done)
1620 {
1621 struct ubifs_inode *ui = ubifs_inode(inode);
1622
1623 if (!IS_ENCRYPTED(inode))
1624 return ui->data;
1625
1626 if (!dentry)
1627 return ERR_PTR(-ECHILD);
1628
1629 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1630 }
1631
1632 const struct address_space_operations ubifs_file_address_operations = {
1633 .readpage = ubifs_readpage,
1634 .writepage = ubifs_writepage,
1635 .write_begin = ubifs_write_begin,
1636 .write_end = ubifs_write_end,
1637 .invalidatepage = ubifs_invalidatepage,
1638 .set_page_dirty = ubifs_set_page_dirty,
1639 #ifdef CONFIG_MIGRATION
1640 .migratepage = ubifs_migrate_page,
1641 #endif
1642 .releasepage = ubifs_releasepage,
1643 };
1644
1645 const struct inode_operations ubifs_file_inode_operations = {
1646 .setattr = ubifs_setattr,
1647 .getattr = ubifs_getattr,
1648 #ifdef CONFIG_UBIFS_FS_XATTR
1649 .listxattr = ubifs_listxattr,
1650 #endif
1651 .update_time = ubifs_update_time,
1652 };
1653
1654 const struct inode_operations ubifs_symlink_inode_operations = {
1655 .get_link = ubifs_get_link,
1656 .setattr = ubifs_setattr,
1657 .getattr = ubifs_getattr,
1658 #ifdef CONFIG_UBIFS_FS_XATTR
1659 .listxattr = ubifs_listxattr,
1660 #endif
1661 .update_time = ubifs_update_time,
1662 };
1663
1664 const struct file_operations ubifs_file_operations = {
1665 .llseek = generic_file_llseek,
1666 .read_iter = generic_file_read_iter,
1667 .write_iter = ubifs_write_iter,
1668 .mmap = ubifs_file_mmap,
1669 .fsync = ubifs_fsync,
1670 .unlocked_ioctl = ubifs_ioctl,
1671 .splice_read = generic_file_splice_read,
1672 .splice_write = iter_file_splice_write,
1673 .open = fscrypt_file_open,
1674 #ifdef CONFIG_COMPAT
1675 .compat_ioctl = ubifs_compat_ioctl,
1676 #endif
1677 };