This source file includes following definitions.
- ext2_inode_is_fast_symlink
- ext2_write_failed
- ext2_evict_inode
- add_chain
- verify_chain
- ext2_block_to_path
- ext2_get_branch
- ext2_find_near
- ext2_find_goal
- ext2_blks_to_allocate
- ext2_alloc_blocks
- ext2_alloc_branch
- ext2_splice_branch
- ext2_get_blocks
- ext2_get_block
- ext2_iomap_begin
- ext2_iomap_end
- ext2_fiemap
- ext2_writepage
- ext2_readpage
- ext2_readpages
- ext2_write_begin
- ext2_write_end
- ext2_nobh_write_begin
- ext2_nobh_writepage
- ext2_bmap
- ext2_direct_IO
- ext2_writepages
- ext2_dax_writepages
- all_zeroes
- ext2_find_shared
- ext2_free_data
- ext2_free_branches
- __ext2_truncate_blocks
- ext2_truncate_blocks
- ext2_setsize
- ext2_get_inode
- ext2_set_inode_flags
- ext2_set_file_ops
- ext2_iget
- __ext2_write_inode
- ext2_write_inode
- ext2_getattr
- ext2_setattr
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #include <linux/time.h>
27 #include <linux/highuid.h>
28 #include <linux/pagemap.h>
29 #include <linux/dax.h>
30 #include <linux/blkdev.h>
31 #include <linux/quotaops.h>
32 #include <linux/writeback.h>
33 #include <linux/buffer_head.h>
34 #include <linux/mpage.h>
35 #include <linux/fiemap.h>
36 #include <linux/iomap.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include "ext2.h"
40 #include "acl.h"
41 #include "xattr.h"
42
43 static int __ext2_write_inode(struct inode *inode, int do_sync);
44
45
46
47
48 static inline int ext2_inode_is_fast_symlink(struct inode *inode)
49 {
50 int ea_blocks = EXT2_I(inode)->i_file_acl ?
51 (inode->i_sb->s_blocksize >> 9) : 0;
52
53 return (S_ISLNK(inode->i_mode) &&
54 inode->i_blocks - ea_blocks == 0);
55 }
56
57 static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59 static void ext2_write_failed(struct address_space *mapping, loff_t to)
60 {
61 struct inode *inode = mapping->host;
62
63 if (to > inode->i_size) {
64 truncate_pagecache(inode, inode->i_size);
65 ext2_truncate_blocks(inode, inode->i_size);
66 }
67 }
68
69
70
71
72 void ext2_evict_inode(struct inode * inode)
73 {
74 struct ext2_block_alloc_info *rsv;
75 int want_delete = 0;
76
77 if (!inode->i_nlink && !is_bad_inode(inode)) {
78 want_delete = 1;
79 dquot_initialize(inode);
80 } else {
81 dquot_drop(inode);
82 }
83
84 truncate_inode_pages_final(&inode->i_data);
85
86 if (want_delete) {
87 sb_start_intwrite(inode->i_sb);
88
89 EXT2_I(inode)->i_dtime = ktime_get_real_seconds();
90 mark_inode_dirty(inode);
91 __ext2_write_inode(inode, inode_needs_sync(inode));
92
93 inode->i_size = 0;
94 if (inode->i_blocks)
95 ext2_truncate_blocks(inode, 0);
96 ext2_xattr_delete_inode(inode);
97 }
98
99 invalidate_inode_buffers(inode);
100 clear_inode(inode);
101
102 ext2_discard_reservation(inode);
103 rsv = EXT2_I(inode)->i_block_alloc_info;
104 EXT2_I(inode)->i_block_alloc_info = NULL;
105 if (unlikely(rsv))
106 kfree(rsv);
107
108 if (want_delete) {
109 ext2_free_inode(inode);
110 sb_end_intwrite(inode->i_sb);
111 }
112 }
113
114 typedef struct {
115 __le32 *p;
116 __le32 key;
117 struct buffer_head *bh;
118 } Indirect;
119
120 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
121 {
122 p->key = *(p->p = v);
123 p->bh = bh;
124 }
125
126 static inline int verify_chain(Indirect *from, Indirect *to)
127 {
128 while (from <= to && from->key == *from->p)
129 from++;
130 return (from > to);
131 }
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163 static int ext2_block_to_path(struct inode *inode,
164 long i_block, int offsets[4], int *boundary)
165 {
166 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
167 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
168 const long direct_blocks = EXT2_NDIR_BLOCKS,
169 indirect_blocks = ptrs,
170 double_blocks = (1 << (ptrs_bits * 2));
171 int n = 0;
172 int final = 0;
173
174 if (i_block < 0) {
175 ext2_msg(inode->i_sb, KERN_WARNING,
176 "warning: %s: block < 0", __func__);
177 } else if (i_block < direct_blocks) {
178 offsets[n++] = i_block;
179 final = direct_blocks;
180 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
181 offsets[n++] = EXT2_IND_BLOCK;
182 offsets[n++] = i_block;
183 final = ptrs;
184 } else if ((i_block -= indirect_blocks) < double_blocks) {
185 offsets[n++] = EXT2_DIND_BLOCK;
186 offsets[n++] = i_block >> ptrs_bits;
187 offsets[n++] = i_block & (ptrs - 1);
188 final = ptrs;
189 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
190 offsets[n++] = EXT2_TIND_BLOCK;
191 offsets[n++] = i_block >> (ptrs_bits * 2);
192 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
193 offsets[n++] = i_block & (ptrs - 1);
194 final = ptrs;
195 } else {
196 ext2_msg(inode->i_sb, KERN_WARNING,
197 "warning: %s: block is too big", __func__);
198 }
199 if (boundary)
200 *boundary = final - 1 - (i_block & (ptrs - 1));
201
202 return n;
203 }
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234 static Indirect *ext2_get_branch(struct inode *inode,
235 int depth,
236 int *offsets,
237 Indirect chain[4],
238 int *err)
239 {
240 struct super_block *sb = inode->i_sb;
241 Indirect *p = chain;
242 struct buffer_head *bh;
243
244 *err = 0;
245
246 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
247 if (!p->key)
248 goto no_block;
249 while (--depth) {
250 bh = sb_bread(sb, le32_to_cpu(p->key));
251 if (!bh)
252 goto failure;
253 read_lock(&EXT2_I(inode)->i_meta_lock);
254 if (!verify_chain(chain, p))
255 goto changed;
256 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
257 read_unlock(&EXT2_I(inode)->i_meta_lock);
258 if (!p->key)
259 goto no_block;
260 }
261 return NULL;
262
263 changed:
264 read_unlock(&EXT2_I(inode)->i_meta_lock);
265 brelse(bh);
266 *err = -EAGAIN;
267 goto no_block;
268 failure:
269 *err = -EIO;
270 no_block:
271 return p;
272 }
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
295 {
296 struct ext2_inode_info *ei = EXT2_I(inode);
297 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
298 __le32 *p;
299 ext2_fsblk_t bg_start;
300 ext2_fsblk_t colour;
301
302
303 for (p = ind->p - 1; p >= start; p--)
304 if (*p)
305 return le32_to_cpu(*p);
306
307
308 if (ind->bh)
309 return ind->bh->b_blocknr;
310
311
312
313
314
315 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
316 colour = (current->pid % 16) *
317 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
318 return bg_start + colour;
319 }
320
321
322
323
324
325
326
327
328
329
330 static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
331 Indirect *partial)
332 {
333 struct ext2_block_alloc_info *block_i;
334
335 block_i = EXT2_I(inode)->i_block_alloc_info;
336
337
338
339
340
341 if (block_i && (block == block_i->last_alloc_logical_block + 1)
342 && (block_i->last_alloc_physical_block != 0)) {
343 return block_i->last_alloc_physical_block + 1;
344 }
345
346 return ext2_find_near(inode, partial);
347 }
348
349
350
351
352
353
354
355
356
357
358
359
360
361 static int
362 ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
363 int blocks_to_boundary)
364 {
365 unsigned long count = 0;
366
367
368
369
370
371 if (k > 0) {
372
373 if (blks < blocks_to_boundary + 1)
374 count += blks;
375 else
376 count += blocks_to_boundary + 1;
377 return count;
378 }
379
380 count++;
381 while (count < blks && count <= blocks_to_boundary
382 && le32_to_cpu(*(branch[0].p + count)) == 0) {
383 count++;
384 }
385 return count;
386 }
387
388
389
390
391
392
393
394
395
396
397
398 static int ext2_alloc_blocks(struct inode *inode,
399 ext2_fsblk_t goal, int indirect_blks, int blks,
400 ext2_fsblk_t new_blocks[4], int *err)
401 {
402 int target, i;
403 unsigned long count = 0;
404 int index = 0;
405 ext2_fsblk_t current_block = 0;
406 int ret = 0;
407
408
409
410
411
412
413
414
415
416 target = blks + indirect_blks;
417
418 while (1) {
419 count = target;
420
421 current_block = ext2_new_blocks(inode,goal,&count,err);
422 if (*err)
423 goto failed_out;
424
425 target -= count;
426
427 while (index < indirect_blks && count) {
428 new_blocks[index++] = current_block++;
429 count--;
430 }
431
432 if (count > 0)
433 break;
434 }
435
436
437 new_blocks[index] = current_block;
438
439
440 ret = count;
441 *err = 0;
442 return ret;
443 failed_out:
444 for (i = 0; i <index; i++)
445 ext2_free_blocks(inode, new_blocks[i], 1);
446 if (index)
447 mark_inode_dirty(inode);
448 return ret;
449 }
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478 static int ext2_alloc_branch(struct inode *inode,
479 int indirect_blks, int *blks, ext2_fsblk_t goal,
480 int *offsets, Indirect *branch)
481 {
482 int blocksize = inode->i_sb->s_blocksize;
483 int i, n = 0;
484 int err = 0;
485 struct buffer_head *bh;
486 int num;
487 ext2_fsblk_t new_blocks[4];
488 ext2_fsblk_t current_block;
489
490 num = ext2_alloc_blocks(inode, goal, indirect_blks,
491 *blks, new_blocks, &err);
492 if (err)
493 return err;
494
495 branch[0].key = cpu_to_le32(new_blocks[0]);
496
497
498
499 for (n = 1; n <= indirect_blks; n++) {
500
501
502
503
504
505 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
506 if (unlikely(!bh)) {
507 err = -ENOMEM;
508 goto failed;
509 }
510 branch[n].bh = bh;
511 lock_buffer(bh);
512 memset(bh->b_data, 0, blocksize);
513 branch[n].p = (__le32 *) bh->b_data + offsets[n];
514 branch[n].key = cpu_to_le32(new_blocks[n]);
515 *branch[n].p = branch[n].key;
516 if ( n == indirect_blks) {
517 current_block = new_blocks[n];
518
519
520
521
522
523 for (i=1; i < num; i++)
524 *(branch[n].p + i) = cpu_to_le32(++current_block);
525 }
526 set_buffer_uptodate(bh);
527 unlock_buffer(bh);
528 mark_buffer_dirty_inode(bh, inode);
529
530
531
532
533 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
534 sync_dirty_buffer(bh);
535 }
536 *blks = num;
537 return err;
538
539 failed:
540 for (i = 1; i < n; i++)
541 bforget(branch[i].bh);
542 for (i = 0; i < indirect_blks; i++)
543 ext2_free_blocks(inode, new_blocks[i], 1);
544 ext2_free_blocks(inode, new_blocks[i], num);
545 return err;
546 }
547
548
549
550
551
552
553
554
555
556
557
558
559
560 static void ext2_splice_branch(struct inode *inode,
561 long block, Indirect *where, int num, int blks)
562 {
563 int i;
564 struct ext2_block_alloc_info *block_i;
565 ext2_fsblk_t current_block;
566
567 block_i = EXT2_I(inode)->i_block_alloc_info;
568
569
570
571
572 *where->p = where->key;
573
574
575
576
577
578 if (num == 0 && blks > 1) {
579 current_block = le32_to_cpu(where->key) + 1;
580 for (i = 1; i < blks; i++)
581 *(where->p + i ) = cpu_to_le32(current_block++);
582 }
583
584
585
586
587
588
589 if (block_i) {
590 block_i->last_alloc_logical_block = block + blks - 1;
591 block_i->last_alloc_physical_block =
592 le32_to_cpu(where[num].key) + blks - 1;
593 }
594
595
596
597
598 if (where->bh)
599 mark_buffer_dirty_inode(where->bh, inode);
600
601 inode->i_ctime = current_time(inode);
602 mark_inode_dirty(inode);
603 }
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623 static int ext2_get_blocks(struct inode *inode,
624 sector_t iblock, unsigned long maxblocks,
625 u32 *bno, bool *new, bool *boundary,
626 int create)
627 {
628 int err;
629 int offsets[4];
630 Indirect chain[4];
631 Indirect *partial;
632 ext2_fsblk_t goal;
633 int indirect_blks;
634 int blocks_to_boundary = 0;
635 int depth;
636 struct ext2_inode_info *ei = EXT2_I(inode);
637 int count = 0;
638 ext2_fsblk_t first_block = 0;
639
640 BUG_ON(maxblocks == 0);
641
642 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
643
644 if (depth == 0)
645 return -EIO;
646
647 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
648
649 if (!partial) {
650 first_block = le32_to_cpu(chain[depth - 1].key);
651 count++;
652
653 while (count < maxblocks && count <= blocks_to_boundary) {
654 ext2_fsblk_t blk;
655
656 if (!verify_chain(chain, chain + depth - 1)) {
657
658
659
660
661
662
663 err = -EAGAIN;
664 count = 0;
665 partial = chain + depth - 1;
666 break;
667 }
668 blk = le32_to_cpu(*(chain[depth-1].p + count));
669 if (blk == first_block + count)
670 count++;
671 else
672 break;
673 }
674 if (err != -EAGAIN)
675 goto got_it;
676 }
677
678
679 if (!create || err == -EIO)
680 goto cleanup;
681
682 mutex_lock(&ei->truncate_mutex);
683
684
685
686
687
688
689
690
691
692
693
694
695 if (err == -EAGAIN || !verify_chain(chain, partial)) {
696 while (partial > chain) {
697 brelse(partial->bh);
698 partial--;
699 }
700 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
701 if (!partial) {
702 count++;
703 mutex_unlock(&ei->truncate_mutex);
704 goto got_it;
705 }
706
707 if (err) {
708 mutex_unlock(&ei->truncate_mutex);
709 goto cleanup;
710 }
711 }
712
713
714
715
716
717 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
718 ext2_init_block_alloc_info(inode);
719
720 goal = ext2_find_goal(inode, iblock, partial);
721
722
723 indirect_blks = (chain + depth) - partial - 1;
724
725
726
727
728 count = ext2_blks_to_allocate(partial, indirect_blks,
729 maxblocks, blocks_to_boundary);
730
731
732
733 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
734 offsets + (partial - chain), partial);
735
736 if (err) {
737 mutex_unlock(&ei->truncate_mutex);
738 goto cleanup;
739 }
740
741 if (IS_DAX(inode)) {
742
743
744
745
746 clean_bdev_aliases(inode->i_sb->s_bdev,
747 le32_to_cpu(chain[depth-1].key),
748 count);
749
750
751
752
753
754 err = sb_issue_zeroout(inode->i_sb,
755 le32_to_cpu(chain[depth-1].key), count,
756 GFP_NOFS);
757 if (err) {
758 mutex_unlock(&ei->truncate_mutex);
759 goto cleanup;
760 }
761 }
762 *new = true;
763
764 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
765 mutex_unlock(&ei->truncate_mutex);
766 got_it:
767 if (count > blocks_to_boundary)
768 *boundary = true;
769 err = count;
770
771 partial = chain + depth - 1;
772 cleanup:
773 while (partial > chain) {
774 brelse(partial->bh);
775 partial--;
776 }
777 if (err > 0)
778 *bno = le32_to_cpu(chain[depth-1].key);
779 return err;
780 }
781
782 int ext2_get_block(struct inode *inode, sector_t iblock,
783 struct buffer_head *bh_result, int create)
784 {
785 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
786 bool new = false, boundary = false;
787 u32 bno;
788 int ret;
789
790 ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary,
791 create);
792 if (ret <= 0)
793 return ret;
794
795 map_bh(bh_result, inode->i_sb, bno);
796 bh_result->b_size = (ret << inode->i_blkbits);
797 if (new)
798 set_buffer_new(bh_result);
799 if (boundary)
800 set_buffer_boundary(bh_result);
801 return 0;
802
803 }
804
805 #ifdef CONFIG_FS_DAX
806 static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
807 unsigned flags, struct iomap *iomap)
808 {
809 unsigned int blkbits = inode->i_blkbits;
810 unsigned long first_block = offset >> blkbits;
811 unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
812 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
813 bool new = false, boundary = false;
814 u32 bno;
815 int ret;
816
817 ret = ext2_get_blocks(inode, first_block, max_blocks,
818 &bno, &new, &boundary, flags & IOMAP_WRITE);
819 if (ret < 0)
820 return ret;
821
822 iomap->flags = 0;
823 iomap->bdev = inode->i_sb->s_bdev;
824 iomap->offset = (u64)first_block << blkbits;
825 iomap->dax_dev = sbi->s_daxdev;
826
827 if (ret == 0) {
828 iomap->type = IOMAP_HOLE;
829 iomap->addr = IOMAP_NULL_ADDR;
830 iomap->length = 1 << blkbits;
831 } else {
832 iomap->type = IOMAP_MAPPED;
833 iomap->addr = (u64)bno << blkbits;
834 iomap->length = (u64)ret << blkbits;
835 iomap->flags |= IOMAP_F_MERGED;
836 }
837
838 if (new)
839 iomap->flags |= IOMAP_F_NEW;
840 return 0;
841 }
842
843 static int
844 ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
845 ssize_t written, unsigned flags, struct iomap *iomap)
846 {
847 if (iomap->type == IOMAP_MAPPED &&
848 written < length &&
849 (flags & IOMAP_WRITE))
850 ext2_write_failed(inode->i_mapping, offset + length);
851 return 0;
852 }
853
854 const struct iomap_ops ext2_iomap_ops = {
855 .iomap_begin = ext2_iomap_begin,
856 .iomap_end = ext2_iomap_end,
857 };
858 #else
859
860 const struct iomap_ops ext2_iomap_ops;
861 #endif
862
863 int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
864 u64 start, u64 len)
865 {
866 return generic_block_fiemap(inode, fieinfo, start, len,
867 ext2_get_block);
868 }
869
870 static int ext2_writepage(struct page *page, struct writeback_control *wbc)
871 {
872 return block_write_full_page(page, ext2_get_block, wbc);
873 }
874
875 static int ext2_readpage(struct file *file, struct page *page)
876 {
877 return mpage_readpage(page, ext2_get_block);
878 }
879
880 static int
881 ext2_readpages(struct file *file, struct address_space *mapping,
882 struct list_head *pages, unsigned nr_pages)
883 {
884 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
885 }
886
887 static int
888 ext2_write_begin(struct file *file, struct address_space *mapping,
889 loff_t pos, unsigned len, unsigned flags,
890 struct page **pagep, void **fsdata)
891 {
892 int ret;
893
894 ret = block_write_begin(mapping, pos, len, flags, pagep,
895 ext2_get_block);
896 if (ret < 0)
897 ext2_write_failed(mapping, pos + len);
898 return ret;
899 }
900
901 static int ext2_write_end(struct file *file, struct address_space *mapping,
902 loff_t pos, unsigned len, unsigned copied,
903 struct page *page, void *fsdata)
904 {
905 int ret;
906
907 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
908 if (ret < len)
909 ext2_write_failed(mapping, pos + len);
910 return ret;
911 }
912
913 static int
914 ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
915 loff_t pos, unsigned len, unsigned flags,
916 struct page **pagep, void **fsdata)
917 {
918 int ret;
919
920 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
921 ext2_get_block);
922 if (ret < 0)
923 ext2_write_failed(mapping, pos + len);
924 return ret;
925 }
926
927 static int ext2_nobh_writepage(struct page *page,
928 struct writeback_control *wbc)
929 {
930 return nobh_writepage(page, ext2_get_block, wbc);
931 }
932
933 static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
934 {
935 return generic_block_bmap(mapping,block,ext2_get_block);
936 }
937
938 static ssize_t
939 ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
940 {
941 struct file *file = iocb->ki_filp;
942 struct address_space *mapping = file->f_mapping;
943 struct inode *inode = mapping->host;
944 size_t count = iov_iter_count(iter);
945 loff_t offset = iocb->ki_pos;
946 ssize_t ret;
947
948 ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
949 if (ret < 0 && iov_iter_rw(iter) == WRITE)
950 ext2_write_failed(mapping, offset + count);
951 return ret;
952 }
953
954 static int
955 ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
956 {
957 return mpage_writepages(mapping, wbc, ext2_get_block);
958 }
959
960 static int
961 ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
962 {
963 return dax_writeback_mapping_range(mapping,
964 mapping->host->i_sb->s_bdev, wbc);
965 }
966
967 const struct address_space_operations ext2_aops = {
968 .readpage = ext2_readpage,
969 .readpages = ext2_readpages,
970 .writepage = ext2_writepage,
971 .write_begin = ext2_write_begin,
972 .write_end = ext2_write_end,
973 .bmap = ext2_bmap,
974 .direct_IO = ext2_direct_IO,
975 .writepages = ext2_writepages,
976 .migratepage = buffer_migrate_page,
977 .is_partially_uptodate = block_is_partially_uptodate,
978 .error_remove_page = generic_error_remove_page,
979 };
980
981 const struct address_space_operations ext2_nobh_aops = {
982 .readpage = ext2_readpage,
983 .readpages = ext2_readpages,
984 .writepage = ext2_nobh_writepage,
985 .write_begin = ext2_nobh_write_begin,
986 .write_end = nobh_write_end,
987 .bmap = ext2_bmap,
988 .direct_IO = ext2_direct_IO,
989 .writepages = ext2_writepages,
990 .migratepage = buffer_migrate_page,
991 .error_remove_page = generic_error_remove_page,
992 };
993
994 static const struct address_space_operations ext2_dax_aops = {
995 .writepages = ext2_dax_writepages,
996 .direct_IO = noop_direct_IO,
997 .set_page_dirty = noop_set_page_dirty,
998 .invalidatepage = noop_invalidatepage,
999 };
1000
1001
1002
1003
1004
1005
1006 static inline int all_zeroes(__le32 *p, __le32 *q)
1007 {
1008 while (p < q)
1009 if (*p++)
1010 return 0;
1011 return 1;
1012 }
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 static Indirect *ext2_find_shared(struct inode *inode,
1049 int depth,
1050 int offsets[4],
1051 Indirect chain[4],
1052 __le32 *top)
1053 {
1054 Indirect *partial, *p;
1055 int k, err;
1056
1057 *top = 0;
1058 for (k = depth; k > 1 && !offsets[k-1]; k--)
1059 ;
1060 partial = ext2_get_branch(inode, k, offsets, chain, &err);
1061 if (!partial)
1062 partial = chain + k-1;
1063
1064
1065
1066
1067 write_lock(&EXT2_I(inode)->i_meta_lock);
1068 if (!partial->key && *partial->p) {
1069 write_unlock(&EXT2_I(inode)->i_meta_lock);
1070 goto no_top;
1071 }
1072 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1073 ;
1074
1075
1076
1077
1078
1079
1080 if (p == chain + k - 1 && p > chain) {
1081 p->p--;
1082 } else {
1083 *top = *p->p;
1084 *p->p = 0;
1085 }
1086 write_unlock(&EXT2_I(inode)->i_meta_lock);
1087
1088 while(partial > p)
1089 {
1090 brelse(partial->bh);
1091 partial--;
1092 }
1093 no_top:
1094 return partial;
1095 }
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1108 {
1109 unsigned long block_to_free = 0, count = 0;
1110 unsigned long nr;
1111
1112 for ( ; p < q ; p++) {
1113 nr = le32_to_cpu(*p);
1114 if (nr) {
1115 *p = 0;
1116
1117 if (count == 0)
1118 goto free_this;
1119 else if (block_to_free == nr - count)
1120 count++;
1121 else {
1122 ext2_free_blocks (inode, block_to_free, count);
1123 mark_inode_dirty(inode);
1124 free_this:
1125 block_to_free = nr;
1126 count = 1;
1127 }
1128 }
1129 }
1130 if (count > 0) {
1131 ext2_free_blocks (inode, block_to_free, count);
1132 mark_inode_dirty(inode);
1133 }
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1148 {
1149 struct buffer_head * bh;
1150 unsigned long nr;
1151
1152 if (depth--) {
1153 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1154 for ( ; p < q ; p++) {
1155 nr = le32_to_cpu(*p);
1156 if (!nr)
1157 continue;
1158 *p = 0;
1159 bh = sb_bread(inode->i_sb, nr);
1160
1161
1162
1163
1164 if (!bh) {
1165 ext2_error(inode->i_sb, "ext2_free_branches",
1166 "Read failure, inode=%ld, block=%ld",
1167 inode->i_ino, nr);
1168 continue;
1169 }
1170 ext2_free_branches(inode,
1171 (__le32*)bh->b_data,
1172 (__le32*)bh->b_data + addr_per_block,
1173 depth);
1174 bforget(bh);
1175 ext2_free_blocks(inode, nr, 1);
1176 mark_inode_dirty(inode);
1177 }
1178 } else
1179 ext2_free_data(inode, p, q);
1180 }
1181
1182
1183 static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1184 {
1185 __le32 *i_data = EXT2_I(inode)->i_data;
1186 struct ext2_inode_info *ei = EXT2_I(inode);
1187 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1188 int offsets[4];
1189 Indirect chain[4];
1190 Indirect *partial;
1191 __le32 nr = 0;
1192 int n;
1193 long iblock;
1194 unsigned blocksize;
1195 blocksize = inode->i_sb->s_blocksize;
1196 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1197
1198 #ifdef CONFIG_FS_DAX
1199 WARN_ON(!rwsem_is_locked(&ei->dax_sem));
1200 #endif
1201
1202 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1203 if (n == 0)
1204 return;
1205
1206
1207
1208
1209
1210 mutex_lock(&ei->truncate_mutex);
1211
1212 if (n == 1) {
1213 ext2_free_data(inode, i_data+offsets[0],
1214 i_data + EXT2_NDIR_BLOCKS);
1215 goto do_indirects;
1216 }
1217
1218 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1219
1220 if (nr) {
1221 if (partial == chain)
1222 mark_inode_dirty(inode);
1223 else
1224 mark_buffer_dirty_inode(partial->bh, inode);
1225 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1226 }
1227
1228 while (partial > chain) {
1229 ext2_free_branches(inode,
1230 partial->p + 1,
1231 (__le32*)partial->bh->b_data+addr_per_block,
1232 (chain+n-1) - partial);
1233 mark_buffer_dirty_inode(partial->bh, inode);
1234 brelse (partial->bh);
1235 partial--;
1236 }
1237 do_indirects:
1238
1239 switch (offsets[0]) {
1240 default:
1241 nr = i_data[EXT2_IND_BLOCK];
1242 if (nr) {
1243 i_data[EXT2_IND_BLOCK] = 0;
1244 mark_inode_dirty(inode);
1245 ext2_free_branches(inode, &nr, &nr+1, 1);
1246 }
1247
1248 case EXT2_IND_BLOCK:
1249 nr = i_data[EXT2_DIND_BLOCK];
1250 if (nr) {
1251 i_data[EXT2_DIND_BLOCK] = 0;
1252 mark_inode_dirty(inode);
1253 ext2_free_branches(inode, &nr, &nr+1, 2);
1254 }
1255
1256 case EXT2_DIND_BLOCK:
1257 nr = i_data[EXT2_TIND_BLOCK];
1258 if (nr) {
1259 i_data[EXT2_TIND_BLOCK] = 0;
1260 mark_inode_dirty(inode);
1261 ext2_free_branches(inode, &nr, &nr+1, 3);
1262 }
1263 case EXT2_TIND_BLOCK:
1264 ;
1265 }
1266
1267 ext2_discard_reservation(inode);
1268
1269 mutex_unlock(&ei->truncate_mutex);
1270 }
1271
1272 static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1273 {
1274 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1275 S_ISLNK(inode->i_mode)))
1276 return;
1277 if (ext2_inode_is_fast_symlink(inode))
1278 return;
1279
1280 dax_sem_down_write(EXT2_I(inode));
1281 __ext2_truncate_blocks(inode, offset);
1282 dax_sem_up_write(EXT2_I(inode));
1283 }
1284
1285 static int ext2_setsize(struct inode *inode, loff_t newsize)
1286 {
1287 int error;
1288
1289 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1290 S_ISLNK(inode->i_mode)))
1291 return -EINVAL;
1292 if (ext2_inode_is_fast_symlink(inode))
1293 return -EINVAL;
1294 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1295 return -EPERM;
1296
1297 inode_dio_wait(inode);
1298
1299 if (IS_DAX(inode)) {
1300 error = iomap_zero_range(inode, newsize,
1301 PAGE_ALIGN(newsize) - newsize, NULL,
1302 &ext2_iomap_ops);
1303 } else if (test_opt(inode->i_sb, NOBH))
1304 error = nobh_truncate_page(inode->i_mapping,
1305 newsize, ext2_get_block);
1306 else
1307 error = block_truncate_page(inode->i_mapping,
1308 newsize, ext2_get_block);
1309 if (error)
1310 return error;
1311
1312 dax_sem_down_write(EXT2_I(inode));
1313 truncate_setsize(inode, newsize);
1314 __ext2_truncate_blocks(inode, newsize);
1315 dax_sem_up_write(EXT2_I(inode));
1316
1317 inode->i_mtime = inode->i_ctime = current_time(inode);
1318 if (inode_needs_sync(inode)) {
1319 sync_mapping_buffers(inode->i_mapping);
1320 sync_inode_metadata(inode, 1);
1321 } else {
1322 mark_inode_dirty(inode);
1323 }
1324
1325 return 0;
1326 }
1327
1328 static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1329 struct buffer_head **p)
1330 {
1331 struct buffer_head * bh;
1332 unsigned long block_group;
1333 unsigned long block;
1334 unsigned long offset;
1335 struct ext2_group_desc * gdp;
1336
1337 *p = NULL;
1338 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1339 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1340 goto Einval;
1341
1342 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1343 gdp = ext2_get_group_desc(sb, block_group, NULL);
1344 if (!gdp)
1345 goto Egdp;
1346
1347
1348
1349 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1350 block = le32_to_cpu(gdp->bg_inode_table) +
1351 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1352 if (!(bh = sb_bread(sb, block)))
1353 goto Eio;
1354
1355 *p = bh;
1356 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1357 return (struct ext2_inode *) (bh->b_data + offset);
1358
1359 Einval:
1360 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1361 (unsigned long) ino);
1362 return ERR_PTR(-EINVAL);
1363 Eio:
1364 ext2_error(sb, "ext2_get_inode",
1365 "unable to read inode block - inode=%lu, block=%lu",
1366 (unsigned long) ino, block);
1367 Egdp:
1368 return ERR_PTR(-EIO);
1369 }
1370
1371 void ext2_set_inode_flags(struct inode *inode)
1372 {
1373 unsigned int flags = EXT2_I(inode)->i_flags;
1374
1375 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
1376 S_DIRSYNC | S_DAX);
1377 if (flags & EXT2_SYNC_FL)
1378 inode->i_flags |= S_SYNC;
1379 if (flags & EXT2_APPEND_FL)
1380 inode->i_flags |= S_APPEND;
1381 if (flags & EXT2_IMMUTABLE_FL)
1382 inode->i_flags |= S_IMMUTABLE;
1383 if (flags & EXT2_NOATIME_FL)
1384 inode->i_flags |= S_NOATIME;
1385 if (flags & EXT2_DIRSYNC_FL)
1386 inode->i_flags |= S_DIRSYNC;
1387 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1388 inode->i_flags |= S_DAX;
1389 }
1390
1391 void ext2_set_file_ops(struct inode *inode)
1392 {
1393 inode->i_op = &ext2_file_inode_operations;
1394 inode->i_fop = &ext2_file_operations;
1395 if (IS_DAX(inode))
1396 inode->i_mapping->a_ops = &ext2_dax_aops;
1397 else if (test_opt(inode->i_sb, NOBH))
1398 inode->i_mapping->a_ops = &ext2_nobh_aops;
1399 else
1400 inode->i_mapping->a_ops = &ext2_aops;
1401 }
1402
1403 struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1404 {
1405 struct ext2_inode_info *ei;
1406 struct buffer_head * bh = NULL;
1407 struct ext2_inode *raw_inode;
1408 struct inode *inode;
1409 long ret = -EIO;
1410 int n;
1411 uid_t i_uid;
1412 gid_t i_gid;
1413
1414 inode = iget_locked(sb, ino);
1415 if (!inode)
1416 return ERR_PTR(-ENOMEM);
1417 if (!(inode->i_state & I_NEW))
1418 return inode;
1419
1420 ei = EXT2_I(inode);
1421 ei->i_block_alloc_info = NULL;
1422
1423 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1424 if (IS_ERR(raw_inode)) {
1425 ret = PTR_ERR(raw_inode);
1426 goto bad_inode;
1427 }
1428
1429 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1430 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1431 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1432 if (!(test_opt (inode->i_sb, NO_UID32))) {
1433 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1434 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1435 }
1436 i_uid_write(inode, i_uid);
1437 i_gid_write(inode, i_gid);
1438 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1439 inode->i_size = le32_to_cpu(raw_inode->i_size);
1440 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1441 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1442 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1443 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1444 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1445
1446
1447
1448
1449
1450 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1451
1452 ret = -ESTALE;
1453 goto bad_inode;
1454 }
1455 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1456 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1457 ext2_set_inode_flags(inode);
1458 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1459 ei->i_frag_no = raw_inode->i_frag;
1460 ei->i_frag_size = raw_inode->i_fsize;
1461 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1462 ei->i_dir_acl = 0;
1463
1464 if (ei->i_file_acl &&
1465 !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
1466 ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
1467 ei->i_file_acl);
1468 ret = -EFSCORRUPTED;
1469 goto bad_inode;
1470 }
1471
1472 if (S_ISREG(inode->i_mode))
1473 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1474 else
1475 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1476 if (i_size_read(inode) < 0) {
1477 ret = -EFSCORRUPTED;
1478 goto bad_inode;
1479 }
1480 ei->i_dtime = 0;
1481 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1482 ei->i_state = 0;
1483 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1484 ei->i_dir_start_lookup = 0;
1485
1486
1487
1488
1489
1490 for (n = 0; n < EXT2_N_BLOCKS; n++)
1491 ei->i_data[n] = raw_inode->i_block[n];
1492
1493 if (S_ISREG(inode->i_mode)) {
1494 ext2_set_file_ops(inode);
1495 } else if (S_ISDIR(inode->i_mode)) {
1496 inode->i_op = &ext2_dir_inode_operations;
1497 inode->i_fop = &ext2_dir_operations;
1498 if (test_opt(inode->i_sb, NOBH))
1499 inode->i_mapping->a_ops = &ext2_nobh_aops;
1500 else
1501 inode->i_mapping->a_ops = &ext2_aops;
1502 } else if (S_ISLNK(inode->i_mode)) {
1503 if (ext2_inode_is_fast_symlink(inode)) {
1504 inode->i_link = (char *)ei->i_data;
1505 inode->i_op = &ext2_fast_symlink_inode_operations;
1506 nd_terminate_link(ei->i_data, inode->i_size,
1507 sizeof(ei->i_data) - 1);
1508 } else {
1509 inode->i_op = &ext2_symlink_inode_operations;
1510 inode_nohighmem(inode);
1511 if (test_opt(inode->i_sb, NOBH))
1512 inode->i_mapping->a_ops = &ext2_nobh_aops;
1513 else
1514 inode->i_mapping->a_ops = &ext2_aops;
1515 }
1516 } else {
1517 inode->i_op = &ext2_special_inode_operations;
1518 if (raw_inode->i_block[0])
1519 init_special_inode(inode, inode->i_mode,
1520 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1521 else
1522 init_special_inode(inode, inode->i_mode,
1523 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1524 }
1525 brelse (bh);
1526 unlock_new_inode(inode);
1527 return inode;
1528
1529 bad_inode:
1530 brelse(bh);
1531 iget_failed(inode);
1532 return ERR_PTR(ret);
1533 }
1534
1535 static int __ext2_write_inode(struct inode *inode, int do_sync)
1536 {
1537 struct ext2_inode_info *ei = EXT2_I(inode);
1538 struct super_block *sb = inode->i_sb;
1539 ino_t ino = inode->i_ino;
1540 uid_t uid = i_uid_read(inode);
1541 gid_t gid = i_gid_read(inode);
1542 struct buffer_head * bh;
1543 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1544 int n;
1545 int err = 0;
1546
1547 if (IS_ERR(raw_inode))
1548 return -EIO;
1549
1550
1551
1552 if (ei->i_state & EXT2_STATE_NEW)
1553 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1554
1555 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1556 if (!(test_opt(sb, NO_UID32))) {
1557 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1558 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1559
1560
1561
1562
1563 if (!ei->i_dtime) {
1564 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1565 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1566 } else {
1567 raw_inode->i_uid_high = 0;
1568 raw_inode->i_gid_high = 0;
1569 }
1570 } else {
1571 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1572 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1573 raw_inode->i_uid_high = 0;
1574 raw_inode->i_gid_high = 0;
1575 }
1576 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1577 raw_inode->i_size = cpu_to_le32(inode->i_size);
1578 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1579 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1580 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1581
1582 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1583 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1584 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1585 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1586 raw_inode->i_frag = ei->i_frag_no;
1587 raw_inode->i_fsize = ei->i_frag_size;
1588 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1589 if (!S_ISREG(inode->i_mode))
1590 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1591 else {
1592 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1593 if (inode->i_size > 0x7fffffffULL) {
1594 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1595 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1596 EXT2_SB(sb)->s_es->s_rev_level ==
1597 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1598
1599
1600
1601 spin_lock(&EXT2_SB(sb)->s_lock);
1602 ext2_update_dynamic_rev(sb);
1603 EXT2_SET_RO_COMPAT_FEATURE(sb,
1604 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1605 spin_unlock(&EXT2_SB(sb)->s_lock);
1606 ext2_sync_super(sb, EXT2_SB(sb)->s_es, 1);
1607 }
1608 }
1609 }
1610
1611 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1612 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1613 if (old_valid_dev(inode->i_rdev)) {
1614 raw_inode->i_block[0] =
1615 cpu_to_le32(old_encode_dev(inode->i_rdev));
1616 raw_inode->i_block[1] = 0;
1617 } else {
1618 raw_inode->i_block[0] = 0;
1619 raw_inode->i_block[1] =
1620 cpu_to_le32(new_encode_dev(inode->i_rdev));
1621 raw_inode->i_block[2] = 0;
1622 }
1623 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1624 raw_inode->i_block[n] = ei->i_data[n];
1625 mark_buffer_dirty(bh);
1626 if (do_sync) {
1627 sync_dirty_buffer(bh);
1628 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1629 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1630 sb->s_id, (unsigned long) ino);
1631 err = -EIO;
1632 }
1633 }
1634 ei->i_state &= ~EXT2_STATE_NEW;
1635 brelse (bh);
1636 return err;
1637 }
1638
1639 int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1640 {
1641 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1642 }
1643
1644 int ext2_getattr(const struct path *path, struct kstat *stat,
1645 u32 request_mask, unsigned int query_flags)
1646 {
1647 struct inode *inode = d_inode(path->dentry);
1648 struct ext2_inode_info *ei = EXT2_I(inode);
1649 unsigned int flags;
1650
1651 flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
1652 if (flags & EXT2_APPEND_FL)
1653 stat->attributes |= STATX_ATTR_APPEND;
1654 if (flags & EXT2_COMPR_FL)
1655 stat->attributes |= STATX_ATTR_COMPRESSED;
1656 if (flags & EXT2_IMMUTABLE_FL)
1657 stat->attributes |= STATX_ATTR_IMMUTABLE;
1658 if (flags & EXT2_NODUMP_FL)
1659 stat->attributes |= STATX_ATTR_NODUMP;
1660 stat->attributes_mask |= (STATX_ATTR_APPEND |
1661 STATX_ATTR_COMPRESSED |
1662 STATX_ATTR_ENCRYPTED |
1663 STATX_ATTR_IMMUTABLE |
1664 STATX_ATTR_NODUMP);
1665
1666 generic_fillattr(inode, stat);
1667 return 0;
1668 }
1669
1670 int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1671 {
1672 struct inode *inode = d_inode(dentry);
1673 int error;
1674
1675 error = setattr_prepare(dentry, iattr);
1676 if (error)
1677 return error;
1678
1679 if (is_quota_modification(inode, iattr)) {
1680 error = dquot_initialize(inode);
1681 if (error)
1682 return error;
1683 }
1684 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
1685 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
1686 error = dquot_transfer(inode, iattr);
1687 if (error)
1688 return error;
1689 }
1690 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1691 error = ext2_setsize(inode, iattr->ia_size);
1692 if (error)
1693 return error;
1694 }
1695 setattr_copy(inode, iattr);
1696 if (iattr->ia_valid & ATTR_MODE)
1697 error = posix_acl_chmod(inode, inode->i_mode);
1698 mark_inode_dirty(inode);
1699
1700 return error;
1701 }