This source file includes following definitions.
- add_chain
- ext4_block_to_path
- ext4_get_branch
- ext4_find_near
- ext4_find_goal
- ext4_blks_to_allocate
- ext4_alloc_branch
- ext4_splice_branch
- ext4_ind_map_blocks
- ext4_ind_calc_metadata_amount
- ext4_ind_trans_blocks
- try_to_extend_transaction
- all_zeroes
- ext4_find_shared
- ext4_clear_blocks
- ext4_free_data
- ext4_free_branches
- ext4_ind_truncate
- ext4_ind_remove_space
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include "ext4_jbd2.h"
25 #include "truncate.h"
26 #include <linux/dax.h>
27 #include <linux/uio.h>
28
29 #include <trace/events/ext4.h>
30
31 typedef struct {
32 __le32 *p;
33 __le32 key;
34 struct buffer_head *bh;
35 } Indirect;
36
37 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
38 {
39 p->key = *(p->p = v);
40 p->bh = bh;
41 }
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74 static int ext4_block_to_path(struct inode *inode,
75 ext4_lblk_t i_block,
76 ext4_lblk_t offsets[4], int *boundary)
77 {
78 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
79 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
80 const long direct_blocks = EXT4_NDIR_BLOCKS,
81 indirect_blocks = ptrs,
82 double_blocks = (1 << (ptrs_bits * 2));
83 int n = 0;
84 int final = 0;
85
86 if (i_block < direct_blocks) {
87 offsets[n++] = i_block;
88 final = direct_blocks;
89 } else if ((i_block -= direct_blocks) < indirect_blocks) {
90 offsets[n++] = EXT4_IND_BLOCK;
91 offsets[n++] = i_block;
92 final = ptrs;
93 } else if ((i_block -= indirect_blocks) < double_blocks) {
94 offsets[n++] = EXT4_DIND_BLOCK;
95 offsets[n++] = i_block >> ptrs_bits;
96 offsets[n++] = i_block & (ptrs - 1);
97 final = ptrs;
98 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
99 offsets[n++] = EXT4_TIND_BLOCK;
100 offsets[n++] = i_block >> (ptrs_bits * 2);
101 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
102 offsets[n++] = i_block & (ptrs - 1);
103 final = ptrs;
104 } else {
105 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
106 i_block + direct_blocks +
107 indirect_blocks + double_blocks, inode->i_ino);
108 }
109 if (boundary)
110 *boundary = final - 1 - (i_block & (ptrs - 1));
111 return n;
112 }
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 static Indirect *ext4_get_branch(struct inode *inode, int depth,
145 ext4_lblk_t *offsets,
146 Indirect chain[4], int *err)
147 {
148 struct super_block *sb = inode->i_sb;
149 Indirect *p = chain;
150 struct buffer_head *bh;
151 int ret = -EIO;
152
153 *err = 0;
154
155 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
156 if (!p->key)
157 goto no_block;
158 while (--depth) {
159 bh = sb_getblk(sb, le32_to_cpu(p->key));
160 if (unlikely(!bh)) {
161 ret = -ENOMEM;
162 goto failure;
163 }
164
165 if (!bh_uptodate_or_lock(bh)) {
166 if (bh_submit_read(bh) < 0) {
167 put_bh(bh);
168 goto failure;
169 }
170
171 if (ext4_check_indirect_blockref(inode, bh)) {
172 put_bh(bh);
173 goto failure;
174 }
175 }
176
177 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
178
179 if (!p->key)
180 goto no_block;
181 }
182 return NULL;
183
184 failure:
185 *err = ret;
186 no_block:
187 return p;
188 }
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
211 {
212 struct ext4_inode_info *ei = EXT4_I(inode);
213 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
214 __le32 *p;
215
216
217 for (p = ind->p - 1; p >= start; p--) {
218 if (*p)
219 return le32_to_cpu(*p);
220 }
221
222
223 if (ind->bh)
224 return ind->bh->b_blocknr;
225
226
227
228
229
230 return ext4_inode_to_goal_block(inode);
231 }
232
233
234
235
236
237
238
239
240
241
242
243
244 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
245 Indirect *partial)
246 {
247 ext4_fsblk_t goal;
248
249
250
251
252
253 goal = ext4_find_near(inode, partial);
254 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
255 return goal;
256 }
257
258
259
260
261
262
263
264
265
266
267
268
269
270 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
271 int blocks_to_boundary)
272 {
273 unsigned int count = 0;
274
275
276
277
278
279 if (k > 0) {
280
281 if (blks < blocks_to_boundary + 1)
282 count += blks;
283 else
284 count += blocks_to_boundary + 1;
285 return count;
286 }
287
288 count++;
289 while (count < blks && count <= blocks_to_boundary &&
290 le32_to_cpu(*(branch[0].p + count)) == 0) {
291 count++;
292 }
293 return count;
294 }
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321 static int ext4_alloc_branch(handle_t *handle,
322 struct ext4_allocation_request *ar,
323 int indirect_blks, ext4_lblk_t *offsets,
324 Indirect *branch)
325 {
326 struct buffer_head * bh;
327 ext4_fsblk_t b, new_blocks[4];
328 __le32 *p;
329 int i, j, err, len = 1;
330
331 for (i = 0; i <= indirect_blks; i++) {
332 if (i == indirect_blks) {
333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
334 } else
335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
336 ar->inode, ar->goal,
337 ar->flags & EXT4_MB_DELALLOC_RESERVED,
338 NULL, &err);
339 if (err) {
340 i--;
341 goto failed;
342 }
343 branch[i].key = cpu_to_le32(new_blocks[i]);
344 if (i == 0)
345 continue;
346
347 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
348 if (unlikely(!bh)) {
349 err = -ENOMEM;
350 goto failed;
351 }
352 lock_buffer(bh);
353 BUFFER_TRACE(bh, "call get_create_access");
354 err = ext4_journal_get_create_access(handle, bh);
355 if (err) {
356 unlock_buffer(bh);
357 goto failed;
358 }
359
360 memset(bh->b_data, 0, bh->b_size);
361 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
362 b = new_blocks[i];
363
364 if (i == indirect_blks)
365 len = ar->len;
366 for (j = 0; j < len; j++)
367 *p++ = cpu_to_le32(b++);
368
369 BUFFER_TRACE(bh, "marking uptodate");
370 set_buffer_uptodate(bh);
371 unlock_buffer(bh);
372
373 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
374 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
375 if (err)
376 goto failed;
377 }
378 return 0;
379 failed:
380 for (; i >= 0; i--) {
381
382
383
384
385
386
387 if (i > 0 && i != indirect_blks && branch[i].bh)
388 ext4_forget(handle, 1, ar->inode, branch[i].bh,
389 branch[i].bh->b_blocknr);
390 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
391 (i == indirect_blks) ? ar->len : 1, 0);
392 }
393 return err;
394 }
395
396
397
398
399
400
401
402
403
404
405
406
407 static int ext4_splice_branch(handle_t *handle,
408 struct ext4_allocation_request *ar,
409 Indirect *where, int num)
410 {
411 int i;
412 int err = 0;
413 ext4_fsblk_t current_block;
414
415
416
417
418
419
420 if (where->bh) {
421 BUFFER_TRACE(where->bh, "get_write_access");
422 err = ext4_journal_get_write_access(handle, where->bh);
423 if (err)
424 goto err_out;
425 }
426
427
428 *where->p = where->key;
429
430
431
432
433
434 if (num == 0 && ar->len > 1) {
435 current_block = le32_to_cpu(where->key) + 1;
436 for (i = 1; i < ar->len; i++)
437 *(where->p + i) = cpu_to_le32(current_block++);
438 }
439
440
441
442 if (where->bh) {
443
444
445
446
447
448
449
450
451 jbd_debug(5, "splicing indirect only\n");
452 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
453 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
454 if (err)
455 goto err_out;
456 } else {
457
458
459
460 ext4_mark_inode_dirty(handle, ar->inode);
461 jbd_debug(5, "splicing direct\n");
462 }
463 return err;
464
465 err_out:
466 for (i = 1; i <= num; i++) {
467
468
469
470
471
472 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
473 EXT4_FREE_BLOCKS_FORGET);
474 }
475 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
476 ar->len, 0);
477
478 return err;
479 }
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509 int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
510 struct ext4_map_blocks *map,
511 int flags)
512 {
513 struct ext4_allocation_request ar;
514 int err = -EIO;
515 ext4_lblk_t offsets[4];
516 Indirect chain[4];
517 Indirect *partial;
518 int indirect_blks;
519 int blocks_to_boundary = 0;
520 int depth;
521 int count = 0;
522 ext4_fsblk_t first_block = 0;
523
524 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
525 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
526 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
527 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
528 &blocks_to_boundary);
529
530 if (depth == 0)
531 goto out;
532
533 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
534
535
536 if (!partial) {
537 first_block = le32_to_cpu(chain[depth - 1].key);
538 count++;
539
540 while (count < map->m_len && count <= blocks_to_boundary) {
541 ext4_fsblk_t blk;
542
543 blk = le32_to_cpu(*(chain[depth-1].p + count));
544
545 if (blk == first_block + count)
546 count++;
547 else
548 break;
549 }
550 goto got_it;
551 }
552
553
554 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
555 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
556 int i;
557
558
559
560
561
562
563
564 count = 0;
565 for (i = partial - chain + 1; i < depth; i++)
566 count = count * epb + (epb - offsets[i] - 1);
567 count++;
568
569 map->m_pblk = 0;
570 map->m_len = min_t(unsigned int, map->m_len, count);
571 goto cleanup;
572 }
573
574
575 if (err == -EIO)
576 goto cleanup;
577
578
579
580
581 if (ext4_has_feature_bigalloc(inode->i_sb)) {
582 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
583 "non-extent mapped inodes with bigalloc");
584 return -EFSCORRUPTED;
585 }
586
587
588 memset(&ar, 0, sizeof(ar));
589 ar.inode = inode;
590 ar.logical = map->m_lblk;
591 if (S_ISREG(inode->i_mode))
592 ar.flags = EXT4_MB_HINT_DATA;
593 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
594 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
595 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
596 ar.flags |= EXT4_MB_USE_RESERVED;
597
598 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
599
600
601 indirect_blks = (chain + depth) - partial - 1;
602
603
604
605
606
607 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
608 map->m_len, blocks_to_boundary);
609
610
611
612
613 err = ext4_alloc_branch(handle, &ar, indirect_blks,
614 offsets + (partial - chain), partial);
615
616
617
618
619
620
621
622
623 if (!err)
624 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
625 if (err)
626 goto cleanup;
627
628 map->m_flags |= EXT4_MAP_NEW;
629
630 ext4_update_inode_fsync_trans(handle, inode, 1);
631 count = ar.len;
632 got_it:
633 map->m_flags |= EXT4_MAP_MAPPED;
634 map->m_pblk = le32_to_cpu(chain[depth-1].key);
635 map->m_len = count;
636 if (count > blocks_to_boundary)
637 map->m_flags |= EXT4_MAP_BOUNDARY;
638 err = count;
639
640 partial = chain + depth - 1;
641 cleanup:
642 while (partial > chain) {
643 BUFFER_TRACE(partial->bh, "call brelse");
644 brelse(partial->bh);
645 partial--;
646 }
647 out:
648 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
649 return err;
650 }
651
652
653
654
655
656 int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
657 {
658 struct ext4_inode_info *ei = EXT4_I(inode);
659 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
660 int blk_bits;
661
662 if (lblock < EXT4_NDIR_BLOCKS)
663 return 0;
664
665 lblock -= EXT4_NDIR_BLOCKS;
666
667 if (ei->i_da_metadata_calc_len &&
668 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
669 ei->i_da_metadata_calc_len++;
670 return 0;
671 }
672 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
673 ei->i_da_metadata_calc_len = 1;
674 blk_bits = order_base_2(lblock);
675 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
676 }
677
678
679
680
681
682 int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
683 {
684
685
686
687
688
689 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
690 }
691
692
693
694
695
696
697
698
699
700
701
702
703
704 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
705 {
706 if (!ext4_handle_valid(handle))
707 return 0;
708 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
709 return 0;
710 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
711 return 0;
712 return 1;
713 }
714
715
716
717
718
719
720 static inline int all_zeroes(__le32 *p, __le32 *q)
721 {
722 while (p < q)
723 if (*p++)
724 return 0;
725 return 1;
726 }
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763 static Indirect *ext4_find_shared(struct inode *inode, int depth,
764 ext4_lblk_t offsets[4], Indirect chain[4],
765 __le32 *top)
766 {
767 Indirect *partial, *p;
768 int k, err;
769
770 *top = 0;
771
772 for (k = depth; k > 1 && !offsets[k-1]; k--)
773 ;
774 partial = ext4_get_branch(inode, k, offsets, chain, &err);
775
776 if (!partial)
777 partial = chain + k-1;
778
779
780
781
782 if (!partial->key && *partial->p)
783
784 goto no_top;
785 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
786 ;
787
788
789
790
791
792
793 if (p == chain + k - 1 && p > chain) {
794 p->p--;
795 } else {
796 *top = *p->p;
797
798 #if 0
799 *p->p = 0;
800 #endif
801 }
802
803
804 while (partial > p) {
805 brelse(partial->bh);
806 partial--;
807 }
808 no_top:
809 return partial;
810 }
811
812
813
814
815
816
817
818
819
820
821
822
823 static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
824 struct buffer_head *bh,
825 ext4_fsblk_t block_to_free,
826 unsigned long count, __le32 *first,
827 __le32 *last)
828 {
829 __le32 *p;
830 int flags = EXT4_FREE_BLOCKS_VALIDATED;
831 int err;
832
833 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
834 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
835 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
836 else if (ext4_should_journal_data(inode))
837 flags |= EXT4_FREE_BLOCKS_FORGET;
838
839 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
840 count)) {
841 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
842 "blocks %llu len %lu",
843 (unsigned long long) block_to_free, count);
844 return 1;
845 }
846
847 if (try_to_extend_transaction(handle, inode)) {
848 if (bh) {
849 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
850 err = ext4_handle_dirty_metadata(handle, inode, bh);
851 if (unlikely(err))
852 goto out_err;
853 }
854 err = ext4_mark_inode_dirty(handle, inode);
855 if (unlikely(err))
856 goto out_err;
857 err = ext4_truncate_restart_trans(handle, inode,
858 ext4_blocks_for_truncate(inode));
859 if (unlikely(err))
860 goto out_err;
861 if (bh) {
862 BUFFER_TRACE(bh, "retaking write access");
863 err = ext4_journal_get_write_access(handle, bh);
864 if (unlikely(err))
865 goto out_err;
866 }
867 }
868
869 for (p = first; p < last; p++)
870 *p = 0;
871
872 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
873 return 0;
874 out_err:
875 ext4_std_error(inode->i_sb, err);
876 return err;
877 }
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898 static void ext4_free_data(handle_t *handle, struct inode *inode,
899 struct buffer_head *this_bh,
900 __le32 *first, __le32 *last)
901 {
902 ext4_fsblk_t block_to_free = 0;
903 unsigned long count = 0;
904 __le32 *block_to_free_p = NULL;
905
906
907 ext4_fsblk_t nr;
908 __le32 *p;
909
910 int err = 0;
911
912 if (this_bh) {
913 BUFFER_TRACE(this_bh, "get_write_access");
914 err = ext4_journal_get_write_access(handle, this_bh);
915
916
917 if (err)
918 return;
919 }
920
921 for (p = first; p < last; p++) {
922 nr = le32_to_cpu(*p);
923 if (nr) {
924
925 if (count == 0) {
926 block_to_free = nr;
927 block_to_free_p = p;
928 count = 1;
929 } else if (nr == block_to_free + count) {
930 count++;
931 } else {
932 err = ext4_clear_blocks(handle, inode, this_bh,
933 block_to_free, count,
934 block_to_free_p, p);
935 if (err)
936 break;
937 block_to_free = nr;
938 block_to_free_p = p;
939 count = 1;
940 }
941 }
942 }
943
944 if (!err && count > 0)
945 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
946 count, block_to_free_p, p);
947 if (err < 0)
948
949 return;
950
951 if (this_bh) {
952 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
953
954
955
956
957
958
959
960 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
961 ext4_handle_dirty_metadata(handle, inode, this_bh);
962 else
963 EXT4_ERROR_INODE(inode,
964 "circular indirect block detected at "
965 "block %llu",
966 (unsigned long long) this_bh->b_blocknr);
967 }
968 }
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983 static void ext4_free_branches(handle_t *handle, struct inode *inode,
984 struct buffer_head *parent_bh,
985 __le32 *first, __le32 *last, int depth)
986 {
987 ext4_fsblk_t nr;
988 __le32 *p;
989
990 if (ext4_handle_is_aborted(handle))
991 return;
992
993 if (depth--) {
994 struct buffer_head *bh;
995 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
996 p = last;
997 while (--p >= first) {
998 nr = le32_to_cpu(*p);
999 if (!nr)
1000 continue;
1001
1002 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1003 nr, 1)) {
1004 EXT4_ERROR_INODE(inode,
1005 "invalid indirect mapped "
1006 "block %lu (level %d)",
1007 (unsigned long) nr, depth);
1008 break;
1009 }
1010
1011
1012 bh = sb_bread(inode->i_sb, nr);
1013
1014
1015
1016
1017
1018 if (!bh) {
1019 EXT4_ERROR_INODE_BLOCK(inode, nr,
1020 "Read failure");
1021 continue;
1022 }
1023
1024
1025 BUFFER_TRACE(bh, "free child branches");
1026 ext4_free_branches(handle, inode, bh,
1027 (__le32 *) bh->b_data,
1028 (__le32 *) bh->b_data + addr_per_block,
1029 depth);
1030 brelse(bh);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 if (ext4_handle_is_aborted(handle))
1049 return;
1050 if (try_to_extend_transaction(handle, inode)) {
1051 ext4_mark_inode_dirty(handle, inode);
1052 ext4_truncate_restart_trans(handle, inode,
1053 ext4_blocks_for_truncate(inode));
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 ext4_free_blocks(handle, inode, NULL, nr, 1,
1068 EXT4_FREE_BLOCKS_METADATA|
1069 EXT4_FREE_BLOCKS_FORGET);
1070
1071 if (parent_bh) {
1072
1073
1074
1075
1076 BUFFER_TRACE(parent_bh, "get_write_access");
1077 if (!ext4_journal_get_write_access(handle,
1078 parent_bh)){
1079 *p = 0;
1080 BUFFER_TRACE(parent_bh,
1081 "call ext4_handle_dirty_metadata");
1082 ext4_handle_dirty_metadata(handle,
1083 inode,
1084 parent_bh);
1085 }
1086 }
1087 }
1088 } else {
1089
1090 BUFFER_TRACE(parent_bh, "free data blocks");
1091 ext4_free_data(handle, inode, parent_bh, first, last);
1092 }
1093 }
1094
1095 void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1096 {
1097 struct ext4_inode_info *ei = EXT4_I(inode);
1098 __le32 *i_data = ei->i_data;
1099 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1100 ext4_lblk_t offsets[4];
1101 Indirect chain[4];
1102 Indirect *partial;
1103 __le32 nr = 0;
1104 int n = 0;
1105 ext4_lblk_t last_block, max_block;
1106 unsigned blocksize = inode->i_sb->s_blocksize;
1107
1108 last_block = (inode->i_size + blocksize-1)
1109 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1110 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1111 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1112
1113 if (last_block != max_block) {
1114 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1115 if (n == 0)
1116 return;
1117 }
1118
1119 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1120
1121
1122
1123
1124
1125
1126
1127
1128 ei->i_disksize = inode->i_size;
1129
1130 if (last_block == max_block) {
1131
1132
1133
1134
1135 return;
1136 } else if (n == 1) {
1137 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1138 i_data + EXT4_NDIR_BLOCKS);
1139 goto do_indirects;
1140 }
1141
1142 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1143
1144 if (nr) {
1145 if (partial == chain) {
1146
1147 ext4_free_branches(handle, inode, NULL,
1148 &nr, &nr+1, (chain+n-1) - partial);
1149 *partial->p = 0;
1150
1151
1152
1153
1154 } else {
1155
1156 BUFFER_TRACE(partial->bh, "get_write_access");
1157 ext4_free_branches(handle, inode, partial->bh,
1158 partial->p,
1159 partial->p+1, (chain+n-1) - partial);
1160 }
1161 }
1162
1163 while (partial > chain) {
1164 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1165 (__le32*)partial->bh->b_data+addr_per_block,
1166 (chain+n-1) - partial);
1167 BUFFER_TRACE(partial->bh, "call brelse");
1168 brelse(partial->bh);
1169 partial--;
1170 }
1171 do_indirects:
1172
1173 switch (offsets[0]) {
1174 default:
1175 nr = i_data[EXT4_IND_BLOCK];
1176 if (nr) {
1177 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1178 i_data[EXT4_IND_BLOCK] = 0;
1179 }
1180
1181 case EXT4_IND_BLOCK:
1182 nr = i_data[EXT4_DIND_BLOCK];
1183 if (nr) {
1184 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1185 i_data[EXT4_DIND_BLOCK] = 0;
1186 }
1187
1188 case EXT4_DIND_BLOCK:
1189 nr = i_data[EXT4_TIND_BLOCK];
1190 if (nr) {
1191 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1192 i_data[EXT4_TIND_BLOCK] = 0;
1193 }
1194
1195 case EXT4_TIND_BLOCK:
1196 ;
1197 }
1198 }
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210 int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1211 ext4_lblk_t start, ext4_lblk_t end)
1212 {
1213 struct ext4_inode_info *ei = EXT4_I(inode);
1214 __le32 *i_data = ei->i_data;
1215 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1216 ext4_lblk_t offsets[4], offsets2[4];
1217 Indirect chain[4], chain2[4];
1218 Indirect *partial, *partial2;
1219 Indirect *p = NULL, *p2 = NULL;
1220 ext4_lblk_t max_block;
1221 __le32 nr = 0, nr2 = 0;
1222 int n = 0, n2 = 0;
1223 unsigned blocksize = inode->i_sb->s_blocksize;
1224
1225 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1226 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1227 if (end >= max_block)
1228 end = max_block;
1229 if ((start >= end) || (start > max_block))
1230 return 0;
1231
1232 n = ext4_block_to_path(inode, start, offsets, NULL);
1233 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1234
1235 BUG_ON(n > n2);
1236
1237 if ((n == 1) && (n == n2)) {
1238
1239 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1240 i_data + offsets2[0]);
1241 return 0;
1242 } else if (n2 > n) {
1243
1244
1245
1246
1247
1248
1249
1250 if (n == 1) {
1251
1252
1253
1254
1255 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1256 i_data + EXT4_NDIR_BLOCKS);
1257 goto end_range;
1258 }
1259
1260
1261 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1262 if (nr) {
1263 if (partial == chain) {
1264
1265 ext4_free_branches(handle, inode, NULL,
1266 &nr, &nr+1, (chain+n-1) - partial);
1267 *partial->p = 0;
1268 } else {
1269
1270 BUFFER_TRACE(partial->bh, "get_write_access");
1271 ext4_free_branches(handle, inode, partial->bh,
1272 partial->p,
1273 partial->p+1, (chain+n-1) - partial);
1274 }
1275 }
1276
1277
1278
1279
1280
1281 while (partial > chain) {
1282 ext4_free_branches(handle, inode, partial->bh,
1283 partial->p + 1,
1284 (__le32 *)partial->bh->b_data+addr_per_block,
1285 (chain+n-1) - partial);
1286 partial--;
1287 }
1288
1289 end_range:
1290 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1291 if (nr2) {
1292 if (partial2 == chain2) {
1293
1294
1295
1296
1297
1298
1299 goto do_indirects;
1300 }
1301 } else {
1302
1303
1304
1305
1306
1307
1308 partial2->p++;
1309 }
1310
1311
1312
1313
1314
1315 while (partial2 > chain2) {
1316 ext4_free_branches(handle, inode, partial2->bh,
1317 (__le32 *)partial2->bh->b_data,
1318 partial2->p,
1319 (chain2+n2-1) - partial2);
1320 partial2--;
1321 }
1322 goto do_indirects;
1323 }
1324
1325
1326 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1327 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1328
1329
1330 if (nr) {
1331 int level = min(partial - chain, partial2 - chain2);
1332 int i;
1333 int subtree = 1;
1334
1335 for (i = 0; i <= level; i++) {
1336 if (offsets[i] != offsets2[i]) {
1337 subtree = 0;
1338 break;
1339 }
1340 }
1341
1342 if (!subtree) {
1343 if (partial == chain) {
1344
1345 ext4_free_branches(handle, inode, NULL,
1346 &nr, &nr+1,
1347 (chain+n-1) - partial);
1348 *partial->p = 0;
1349 } else {
1350
1351 BUFFER_TRACE(partial->bh, "get_write_access");
1352 ext4_free_branches(handle, inode, partial->bh,
1353 partial->p,
1354 partial->p+1,
1355 (chain+n-1) - partial);
1356 }
1357 }
1358 }
1359
1360 if (!nr2) {
1361
1362
1363
1364
1365
1366
1367 partial2->p++;
1368 }
1369
1370 while (partial > chain || partial2 > chain2) {
1371 int depth = (chain+n-1) - partial;
1372 int depth2 = (chain2+n2-1) - partial2;
1373
1374 if (partial > chain && partial2 > chain2 &&
1375 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1376
1377
1378
1379
1380 ext4_free_branches(handle, inode, partial->bh,
1381 partial->p + 1,
1382 partial2->p,
1383 (chain+n-1) - partial);
1384 goto cleanup;
1385 }
1386
1387
1388
1389
1390
1391
1392
1393
1394 if (partial > chain && depth <= depth2) {
1395 ext4_free_branches(handle, inode, partial->bh,
1396 partial->p + 1,
1397 (__le32 *)partial->bh->b_data+addr_per_block,
1398 (chain+n-1) - partial);
1399 partial--;
1400 }
1401 if (partial2 > chain2 && depth2 <= depth) {
1402 ext4_free_branches(handle, inode, partial2->bh,
1403 (__le32 *)partial2->bh->b_data,
1404 partial2->p,
1405 (chain2+n2-1) - partial2);
1406 partial2--;
1407 }
1408 }
1409
1410 cleanup:
1411 while (p && p > chain) {
1412 BUFFER_TRACE(p->bh, "call brelse");
1413 brelse(p->bh);
1414 p--;
1415 }
1416 while (p2 && p2 > chain2) {
1417 BUFFER_TRACE(p2->bh, "call brelse");
1418 brelse(p2->bh);
1419 p2--;
1420 }
1421 return 0;
1422
1423 do_indirects:
1424
1425 switch (offsets[0]) {
1426 default:
1427 if (++n >= n2)
1428 break;
1429 nr = i_data[EXT4_IND_BLOCK];
1430 if (nr) {
1431 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1432 i_data[EXT4_IND_BLOCK] = 0;
1433 }
1434
1435 case EXT4_IND_BLOCK:
1436 if (++n >= n2)
1437 break;
1438 nr = i_data[EXT4_DIND_BLOCK];
1439 if (nr) {
1440 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1441 i_data[EXT4_DIND_BLOCK] = 0;
1442 }
1443
1444 case EXT4_DIND_BLOCK:
1445 if (++n >= n2)
1446 break;
1447 nr = i_data[EXT4_TIND_BLOCK];
1448 if (nr) {
1449 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1450 i_data[EXT4_TIND_BLOCK] = 0;
1451 }
1452
1453 case EXT4_TIND_BLOCK:
1454 ;
1455 }
1456 goto cleanup;
1457 }