This source file includes following definitions.
- ext4_extent_block_csum
- ext4_extent_block_csum_verify
- ext4_extent_block_csum_set
- ext4_ext_truncate_extend_restart
- ext4_ext_get_access
- __ext4_ext_dirty
- ext4_ext_find_goal
- ext4_ext_new_meta_block
- ext4_ext_space_block
- ext4_ext_space_block_idx
- ext4_ext_space_root
- ext4_ext_space_root_idx
- ext4_force_split_extent_at
- ext4_ext_calc_metadata_amount
- ext4_ext_max_entries
- ext4_valid_extent
- ext4_valid_extent_idx
- ext4_valid_extent_entries
- __ext4_ext_check
- ext4_ext_check_inode
- ext4_cache_extents
- __read_extent_tree_block
- ext4_ext_precache
- ext4_ext_show_path
- ext4_ext_show_leaf
- ext4_ext_show_move
- ext4_ext_drop_refs
- ext4_ext_binsearch_idx
- ext4_ext_binsearch
- ext4_ext_tree_init
- ext4_find_extent
- ext4_ext_insert_index
- ext4_ext_split
- ext4_ext_grow_indepth
- ext4_ext_create_new_leaf
- ext4_ext_search_left
- ext4_ext_search_right
- ext4_ext_next_allocated_block
- ext4_ext_next_leaf_block
- ext4_ext_correct_indexes
- ext4_can_extents_be_merged
- ext4_ext_try_to_merge_right
- ext4_ext_try_to_merge_up
- ext4_ext_try_to_merge
- ext4_ext_check_overlap
- ext4_ext_insert_extent
- ext4_fill_fiemap_extents
- ext4_fill_es_cache_info
- ext4_ext_determine_hole
- ext4_ext_put_gap_in_cache
- ext4_ext_rm_idx
- ext4_ext_calc_credits_for_single_extent
- ext4_ext_index_trans_blocks
- get_default_free_blocks_flags
- ext4_rereserve_cluster
- ext4_remove_blocks
- ext4_ext_rm_leaf
- ext4_ext_more_to_rm
- ext4_ext_remove_space
- ext4_ext_init
- ext4_ext_release
- ext4_zeroout_es
- ext4_ext_zeroout
- ext4_split_extent_at
- ext4_split_extent
- ext4_ext_convert_to_initialized
- ext4_split_convert_extents
- ext4_convert_unwritten_extents_endio
- check_eofblocks_fl
- convert_initialized_extent
- ext4_ext_handle_unwritten_extents
- get_implied_cluster_alloc
- ext4_ext_map_blocks
- ext4_ext_truncate
- ext4_alloc_file_blocks
- ext4_zero_range
- ext4_fallocate
- ext4_convert_unwritten_extents
- ext4_find_delayed_extent
- ext4_xattr_fiemap
- _ext4_fiemap
- ext4_fiemap
- ext4_get_es_cache
- ext4_access_path
- ext4_ext_shift_path_extents
- ext4_ext_shift_extents
- ext4_collapse_range
- ext4_insert_range
- ext4_swap_extents
- ext4_clu_mapped
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include <linux/fs.h>
21 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/highuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/quotaops.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/fiemap.h>
30 #include <linux/backing-dev.h>
31 #include "ext4_jbd2.h"
32 #include "ext4_extents.h"
33 #include "xattr.h"
34
35 #include <trace/events/ext4.h>
36
37
38
39
40 #define EXT4_EXT_MAY_ZEROOUT 0x1
41
42 #define EXT4_EXT_MARK_UNWRIT1 0x2
43 #define EXT4_EXT_MARK_UNWRIT2 0x4
44
45 #define EXT4_EXT_DATA_VALID1 0x8
46 #define EXT4_EXT_DATA_VALID2 0x10
47
48 static __le32 ext4_extent_block_csum(struct inode *inode,
49 struct ext4_extent_header *eh)
50 {
51 struct ext4_inode_info *ei = EXT4_I(inode);
52 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
53 __u32 csum;
54
55 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
56 EXT4_EXTENT_TAIL_OFFSET(eh));
57 return cpu_to_le32(csum);
58 }
59
60 static int ext4_extent_block_csum_verify(struct inode *inode,
61 struct ext4_extent_header *eh)
62 {
63 struct ext4_extent_tail *et;
64
65 if (!ext4_has_metadata_csum(inode->i_sb))
66 return 1;
67
68 et = find_ext4_extent_tail(eh);
69 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
70 return 0;
71 return 1;
72 }
73
74 static void ext4_extent_block_csum_set(struct inode *inode,
75 struct ext4_extent_header *eh)
76 {
77 struct ext4_extent_tail *et;
78
79 if (!ext4_has_metadata_csum(inode->i_sb))
80 return;
81
82 et = find_ext4_extent_tail(eh);
83 et->et_checksum = ext4_extent_block_csum(inode, eh);
84 }
85
86 static int ext4_split_extent(handle_t *handle,
87 struct inode *inode,
88 struct ext4_ext_path **ppath,
89 struct ext4_map_blocks *map,
90 int split_flag,
91 int flags);
92
93 static int ext4_split_extent_at(handle_t *handle,
94 struct inode *inode,
95 struct ext4_ext_path **ppath,
96 ext4_lblk_t split,
97 int split_flag,
98 int flags);
99
100 static int ext4_find_delayed_extent(struct inode *inode,
101 struct extent_status *newes);
102
103 static int ext4_ext_truncate_extend_restart(handle_t *handle,
104 struct inode *inode,
105 int needed)
106 {
107 int err;
108
109 if (!ext4_handle_valid(handle))
110 return 0;
111 if (handle->h_buffer_credits >= needed)
112 return 0;
113
114
115
116
117 needed += 3;
118 err = ext4_journal_extend(handle, needed - handle->h_buffer_credits);
119 if (err <= 0)
120 return err;
121 err = ext4_truncate_restart_trans(handle, inode, needed);
122 if (err == 0)
123 err = -EAGAIN;
124
125 return err;
126 }
127
128
129
130
131
132
133 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
134 struct ext4_ext_path *path)
135 {
136 if (path->p_bh) {
137
138 BUFFER_TRACE(path->p_bh, "get_write_access");
139 return ext4_journal_get_write_access(handle, path->p_bh);
140 }
141
142
143 return 0;
144 }
145
146
147
148
149
150
151
152 int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
153 struct inode *inode, struct ext4_ext_path *path)
154 {
155 int err;
156
157 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
158 if (path->p_bh) {
159 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
160
161 err = __ext4_handle_dirty_metadata(where, line, handle,
162 inode, path->p_bh);
163 } else {
164
165 err = ext4_mark_inode_dirty(handle, inode);
166 }
167 return err;
168 }
169
170 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
171 struct ext4_ext_path *path,
172 ext4_lblk_t block)
173 {
174 if (path) {
175 int depth = path->p_depth;
176 struct ext4_extent *ex;
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 ex = path[depth].p_ext;
196 if (ex) {
197 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
198 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
199
200 if (block > ext_block)
201 return ext_pblk + (block - ext_block);
202 else
203 return ext_pblk - (ext_block - block);
204 }
205
206
207
208 if (path[depth].p_bh)
209 return path[depth].p_bh->b_blocknr;
210 }
211
212
213 return ext4_inode_to_goal_block(inode);
214 }
215
216
217
218
219 static ext4_fsblk_t
220 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
221 struct ext4_ext_path *path,
222 struct ext4_extent *ex, int *err, unsigned int flags)
223 {
224 ext4_fsblk_t goal, newblock;
225
226 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
227 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
228 NULL, err);
229 return newblock;
230 }
231
232 static inline int ext4_ext_space_block(struct inode *inode, int check)
233 {
234 int size;
235
236 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
237 / sizeof(struct ext4_extent);
238 #ifdef AGGRESSIVE_TEST
239 if (!check && size > 6)
240 size = 6;
241 #endif
242 return size;
243 }
244
245 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
246 {
247 int size;
248
249 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
250 / sizeof(struct ext4_extent_idx);
251 #ifdef AGGRESSIVE_TEST
252 if (!check && size > 5)
253 size = 5;
254 #endif
255 return size;
256 }
257
258 static inline int ext4_ext_space_root(struct inode *inode, int check)
259 {
260 int size;
261
262 size = sizeof(EXT4_I(inode)->i_data);
263 size -= sizeof(struct ext4_extent_header);
264 size /= sizeof(struct ext4_extent);
265 #ifdef AGGRESSIVE_TEST
266 if (!check && size > 3)
267 size = 3;
268 #endif
269 return size;
270 }
271
272 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
273 {
274 int size;
275
276 size = sizeof(EXT4_I(inode)->i_data);
277 size -= sizeof(struct ext4_extent_header);
278 size /= sizeof(struct ext4_extent_idx);
279 #ifdef AGGRESSIVE_TEST
280 if (!check && size > 4)
281 size = 4;
282 #endif
283 return size;
284 }
285
286 static inline int
287 ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
288 struct ext4_ext_path **ppath, ext4_lblk_t lblk,
289 int nofail)
290 {
291 struct ext4_ext_path *path = *ppath;
292 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
293
294 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
295 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
296 EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
297 (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
298 }
299
300
301
302
303
304
305 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
306 {
307 struct ext4_inode_info *ei = EXT4_I(inode);
308 int idxs;
309
310 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
311 / sizeof(struct ext4_extent_idx));
312
313
314
315
316
317
318
319
320
321 if (ei->i_da_metadata_calc_len &&
322 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
323 int num = 0;
324
325 if ((ei->i_da_metadata_calc_len % idxs) == 0)
326 num++;
327 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
328 num++;
329 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
330 num++;
331 ei->i_da_metadata_calc_len = 0;
332 } else
333 ei->i_da_metadata_calc_len++;
334 ei->i_da_metadata_calc_last_lblock++;
335 return num;
336 }
337
338
339
340
341
342 ei->i_da_metadata_calc_len = 1;
343 ei->i_da_metadata_calc_last_lblock = lblock;
344 return ext_depth(inode) + 1;
345 }
346
347 static int
348 ext4_ext_max_entries(struct inode *inode, int depth)
349 {
350 int max;
351
352 if (depth == ext_depth(inode)) {
353 if (depth == 0)
354 max = ext4_ext_space_root(inode, 1);
355 else
356 max = ext4_ext_space_root_idx(inode, 1);
357 } else {
358 if (depth == 0)
359 max = ext4_ext_space_block(inode, 1);
360 else
361 max = ext4_ext_space_block_idx(inode, 1);
362 }
363
364 return max;
365 }
366
367 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
368 {
369 ext4_fsblk_t block = ext4_ext_pblock(ext);
370 int len = ext4_ext_get_actual_len(ext);
371 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
372
373
374
375
376
377
378 if (lblock + len <= lblock)
379 return 0;
380 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
381 }
382
383 static int ext4_valid_extent_idx(struct inode *inode,
384 struct ext4_extent_idx *ext_idx)
385 {
386 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
387
388 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
389 }
390
391 static int ext4_valid_extent_entries(struct inode *inode,
392 struct ext4_extent_header *eh,
393 int depth)
394 {
395 unsigned short entries;
396 if (eh->eh_entries == 0)
397 return 1;
398
399 entries = le16_to_cpu(eh->eh_entries);
400
401 if (depth == 0) {
402
403 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
404 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
405 ext4_fsblk_t pblock = 0;
406 ext4_lblk_t lblock = 0;
407 ext4_lblk_t prev = 0;
408 int len = 0;
409 while (entries) {
410 if (!ext4_valid_extent(inode, ext))
411 return 0;
412
413
414 lblock = le32_to_cpu(ext->ee_block);
415 len = ext4_ext_get_actual_len(ext);
416 if ((lblock <= prev) && prev) {
417 pblock = ext4_ext_pblock(ext);
418 es->s_last_error_block = cpu_to_le64(pblock);
419 return 0;
420 }
421 ext++;
422 entries--;
423 prev = lblock + len - 1;
424 }
425 } else {
426 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
427 while (entries) {
428 if (!ext4_valid_extent_idx(inode, ext_idx))
429 return 0;
430 ext_idx++;
431 entries--;
432 }
433 }
434 return 1;
435 }
436
437 static int __ext4_ext_check(const char *function, unsigned int line,
438 struct inode *inode, struct ext4_extent_header *eh,
439 int depth, ext4_fsblk_t pblk)
440 {
441 const char *error_msg;
442 int max = 0, err = -EFSCORRUPTED;
443
444 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
445 error_msg = "invalid magic";
446 goto corrupted;
447 }
448 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
449 error_msg = "unexpected eh_depth";
450 goto corrupted;
451 }
452 if (unlikely(eh->eh_max == 0)) {
453 error_msg = "invalid eh_max";
454 goto corrupted;
455 }
456 max = ext4_ext_max_entries(inode, depth);
457 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
458 error_msg = "too large eh_max";
459 goto corrupted;
460 }
461 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
462 error_msg = "invalid eh_entries";
463 goto corrupted;
464 }
465 if (!ext4_valid_extent_entries(inode, eh, depth)) {
466 error_msg = "invalid extent entries";
467 goto corrupted;
468 }
469 if (unlikely(depth > 32)) {
470 error_msg = "too large eh_depth";
471 goto corrupted;
472 }
473
474 if (ext_depth(inode) != depth &&
475 !ext4_extent_block_csum_verify(inode, eh)) {
476 error_msg = "extent tree corrupted";
477 err = -EFSBADCRC;
478 goto corrupted;
479 }
480 return 0;
481
482 corrupted:
483 ext4_error_inode(inode, function, line, 0,
484 "pblk %llu bad header/extent: %s - magic %x, "
485 "entries %u, max %u(%u), depth %u(%u)",
486 (unsigned long long) pblk, error_msg,
487 le16_to_cpu(eh->eh_magic),
488 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
489 max, le16_to_cpu(eh->eh_depth), depth);
490 return err;
491 }
492
493 #define ext4_ext_check(inode, eh, depth, pblk) \
494 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
495
496 int ext4_ext_check_inode(struct inode *inode)
497 {
498 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
499 }
500
501 static void ext4_cache_extents(struct inode *inode,
502 struct ext4_extent_header *eh)
503 {
504 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
505 ext4_lblk_t prev = 0;
506 int i;
507
508 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
509 unsigned int status = EXTENT_STATUS_WRITTEN;
510 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
511 int len = ext4_ext_get_actual_len(ex);
512
513 if (prev && (prev != lblk))
514 ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
515 EXTENT_STATUS_HOLE);
516
517 if (ext4_ext_is_unwritten(ex))
518 status = EXTENT_STATUS_UNWRITTEN;
519 ext4_es_cache_extent(inode, lblk, len,
520 ext4_ext_pblock(ex), status);
521 prev = lblk + len;
522 }
523 }
524
525 static struct buffer_head *
526 __read_extent_tree_block(const char *function, unsigned int line,
527 struct inode *inode, ext4_fsblk_t pblk, int depth,
528 int flags)
529 {
530 struct buffer_head *bh;
531 int err;
532
533 bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
534 if (unlikely(!bh))
535 return ERR_PTR(-ENOMEM);
536
537 if (!bh_uptodate_or_lock(bh)) {
538 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
539 err = bh_submit_read(bh);
540 if (err < 0)
541 goto errout;
542 }
543 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
544 return bh;
545 if (!ext4_has_feature_journal(inode->i_sb) ||
546 (inode->i_ino !=
547 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
548 err = __ext4_ext_check(function, line, inode,
549 ext_block_hdr(bh), depth, pblk);
550 if (err)
551 goto errout;
552 }
553 set_buffer_verified(bh);
554
555
556
557 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
558 struct ext4_extent_header *eh = ext_block_hdr(bh);
559 ext4_cache_extents(inode, eh);
560 }
561 return bh;
562 errout:
563 put_bh(bh);
564 return ERR_PTR(err);
565
566 }
567
568 #define read_extent_tree_block(inode, pblk, depth, flags) \
569 __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
570 (depth), (flags))
571
572
573
574
575
576 int ext4_ext_precache(struct inode *inode)
577 {
578 struct ext4_inode_info *ei = EXT4_I(inode);
579 struct ext4_ext_path *path = NULL;
580 struct buffer_head *bh;
581 int i = 0, depth, ret = 0;
582
583 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
584 return 0;
585
586 down_read(&ei->i_data_sem);
587 depth = ext_depth(inode);
588
589 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
590 GFP_NOFS);
591 if (path == NULL) {
592 up_read(&ei->i_data_sem);
593 return -ENOMEM;
594 }
595
596
597 if (depth == 0)
598 goto out;
599 path[0].p_hdr = ext_inode_hdr(inode);
600 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
601 if (ret)
602 goto out;
603 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
604 while (i >= 0) {
605
606
607
608
609 if ((i == depth) ||
610 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
611 brelse(path[i].p_bh);
612 path[i].p_bh = NULL;
613 i--;
614 continue;
615 }
616 bh = read_extent_tree_block(inode,
617 ext4_idx_pblock(path[i].p_idx++),
618 depth - i - 1,
619 EXT4_EX_FORCE_CACHE);
620 if (IS_ERR(bh)) {
621 ret = PTR_ERR(bh);
622 break;
623 }
624 i++;
625 path[i].p_bh = bh;
626 path[i].p_hdr = ext_block_hdr(bh);
627 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
628 }
629 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
630 out:
631 up_read(&ei->i_data_sem);
632 ext4_ext_drop_refs(path);
633 kfree(path);
634 return ret;
635 }
636
637 #ifdef EXT_DEBUG
638 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
639 {
640 int k, l = path->p_depth;
641
642 ext_debug("path:");
643 for (k = 0; k <= l; k++, path++) {
644 if (path->p_idx) {
645 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
646 ext4_idx_pblock(path->p_idx));
647 } else if (path->p_ext) {
648 ext_debug(" %d:[%d]%d:%llu ",
649 le32_to_cpu(path->p_ext->ee_block),
650 ext4_ext_is_unwritten(path->p_ext),
651 ext4_ext_get_actual_len(path->p_ext),
652 ext4_ext_pblock(path->p_ext));
653 } else
654 ext_debug(" []");
655 }
656 ext_debug("\n");
657 }
658
659 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
660 {
661 int depth = ext_depth(inode);
662 struct ext4_extent_header *eh;
663 struct ext4_extent *ex;
664 int i;
665
666 if (!path)
667 return;
668
669 eh = path[depth].p_hdr;
670 ex = EXT_FIRST_EXTENT(eh);
671
672 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
673
674 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
675 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
676 ext4_ext_is_unwritten(ex),
677 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
678 }
679 ext_debug("\n");
680 }
681
682 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
683 ext4_fsblk_t newblock, int level)
684 {
685 int depth = ext_depth(inode);
686 struct ext4_extent *ex;
687
688 if (depth != level) {
689 struct ext4_extent_idx *idx;
690 idx = path[level].p_idx;
691 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
692 ext_debug("%d: move %d:%llu in new index %llu\n", level,
693 le32_to_cpu(idx->ei_block),
694 ext4_idx_pblock(idx),
695 newblock);
696 idx++;
697 }
698
699 return;
700 }
701
702 ex = path[depth].p_ext;
703 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
704 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
705 le32_to_cpu(ex->ee_block),
706 ext4_ext_pblock(ex),
707 ext4_ext_is_unwritten(ex),
708 ext4_ext_get_actual_len(ex),
709 newblock);
710 ex++;
711 }
712 }
713
714 #else
715 #define ext4_ext_show_path(inode, path)
716 #define ext4_ext_show_leaf(inode, path)
717 #define ext4_ext_show_move(inode, path, newblock, level)
718 #endif
719
720 void ext4_ext_drop_refs(struct ext4_ext_path *path)
721 {
722 int depth, i;
723
724 if (!path)
725 return;
726 depth = path->p_depth;
727 for (i = 0; i <= depth; i++, path++)
728 if (path->p_bh) {
729 brelse(path->p_bh);
730 path->p_bh = NULL;
731 }
732 }
733
734
735
736
737
738
739 static void
740 ext4_ext_binsearch_idx(struct inode *inode,
741 struct ext4_ext_path *path, ext4_lblk_t block)
742 {
743 struct ext4_extent_header *eh = path->p_hdr;
744 struct ext4_extent_idx *r, *l, *m;
745
746
747 ext_debug("binsearch for %u(idx): ", block);
748
749 l = EXT_FIRST_INDEX(eh) + 1;
750 r = EXT_LAST_INDEX(eh);
751 while (l <= r) {
752 m = l + (r - l) / 2;
753 if (block < le32_to_cpu(m->ei_block))
754 r = m - 1;
755 else
756 l = m + 1;
757 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
758 m, le32_to_cpu(m->ei_block),
759 r, le32_to_cpu(r->ei_block));
760 }
761
762 path->p_idx = l - 1;
763 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
764 ext4_idx_pblock(path->p_idx));
765
766 #ifdef CHECK_BINSEARCH
767 {
768 struct ext4_extent_idx *chix, *ix;
769 int k;
770
771 chix = ix = EXT_FIRST_INDEX(eh);
772 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
773 if (k != 0 &&
774 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
775 printk(KERN_DEBUG "k=%d, ix=0x%p, "
776 "first=0x%p\n", k,
777 ix, EXT_FIRST_INDEX(eh));
778 printk(KERN_DEBUG "%u <= %u\n",
779 le32_to_cpu(ix->ei_block),
780 le32_to_cpu(ix[-1].ei_block));
781 }
782 BUG_ON(k && le32_to_cpu(ix->ei_block)
783 <= le32_to_cpu(ix[-1].ei_block));
784 if (block < le32_to_cpu(ix->ei_block))
785 break;
786 chix = ix;
787 }
788 BUG_ON(chix != path->p_idx);
789 }
790 #endif
791
792 }
793
794
795
796
797
798
799 static void
800 ext4_ext_binsearch(struct inode *inode,
801 struct ext4_ext_path *path, ext4_lblk_t block)
802 {
803 struct ext4_extent_header *eh = path->p_hdr;
804 struct ext4_extent *r, *l, *m;
805
806 if (eh->eh_entries == 0) {
807
808
809
810
811 return;
812 }
813
814 ext_debug("binsearch for %u: ", block);
815
816 l = EXT_FIRST_EXTENT(eh) + 1;
817 r = EXT_LAST_EXTENT(eh);
818
819 while (l <= r) {
820 m = l + (r - l) / 2;
821 if (block < le32_to_cpu(m->ee_block))
822 r = m - 1;
823 else
824 l = m + 1;
825 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
826 m, le32_to_cpu(m->ee_block),
827 r, le32_to_cpu(r->ee_block));
828 }
829
830 path->p_ext = l - 1;
831 ext_debug(" -> %d:%llu:[%d]%d ",
832 le32_to_cpu(path->p_ext->ee_block),
833 ext4_ext_pblock(path->p_ext),
834 ext4_ext_is_unwritten(path->p_ext),
835 ext4_ext_get_actual_len(path->p_ext));
836
837 #ifdef CHECK_BINSEARCH
838 {
839 struct ext4_extent *chex, *ex;
840 int k;
841
842 chex = ex = EXT_FIRST_EXTENT(eh);
843 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
844 BUG_ON(k && le32_to_cpu(ex->ee_block)
845 <= le32_to_cpu(ex[-1].ee_block));
846 if (block < le32_to_cpu(ex->ee_block))
847 break;
848 chex = ex;
849 }
850 BUG_ON(chex != path->p_ext);
851 }
852 #endif
853
854 }
855
856 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
857 {
858 struct ext4_extent_header *eh;
859
860 eh = ext_inode_hdr(inode);
861 eh->eh_depth = 0;
862 eh->eh_entries = 0;
863 eh->eh_magic = EXT4_EXT_MAGIC;
864 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
865 ext4_mark_inode_dirty(handle, inode);
866 return 0;
867 }
868
869 struct ext4_ext_path *
870 ext4_find_extent(struct inode *inode, ext4_lblk_t block,
871 struct ext4_ext_path **orig_path, int flags)
872 {
873 struct ext4_extent_header *eh;
874 struct buffer_head *bh;
875 struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
876 short int depth, i, ppos = 0;
877 int ret;
878
879 eh = ext_inode_hdr(inode);
880 depth = ext_depth(inode);
881 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
882 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
883 depth);
884 ret = -EFSCORRUPTED;
885 goto err;
886 }
887
888 if (path) {
889 ext4_ext_drop_refs(path);
890 if (depth > path[0].p_maxdepth) {
891 kfree(path);
892 *orig_path = path = NULL;
893 }
894 }
895 if (!path) {
896
897 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
898 GFP_NOFS);
899 if (unlikely(!path))
900 return ERR_PTR(-ENOMEM);
901 path[0].p_maxdepth = depth + 1;
902 }
903 path[0].p_hdr = eh;
904 path[0].p_bh = NULL;
905
906 i = depth;
907 if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
908 ext4_cache_extents(inode, eh);
909
910 while (i) {
911 ext_debug("depth %d: num %d, max %d\n",
912 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
913
914 ext4_ext_binsearch_idx(inode, path + ppos, block);
915 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
916 path[ppos].p_depth = i;
917 path[ppos].p_ext = NULL;
918
919 bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
920 flags);
921 if (IS_ERR(bh)) {
922 ret = PTR_ERR(bh);
923 goto err;
924 }
925
926 eh = ext_block_hdr(bh);
927 ppos++;
928 path[ppos].p_bh = bh;
929 path[ppos].p_hdr = eh;
930 }
931
932 path[ppos].p_depth = i;
933 path[ppos].p_ext = NULL;
934 path[ppos].p_idx = NULL;
935
936
937 ext4_ext_binsearch(inode, path + ppos, block);
938
939 if (path[ppos].p_ext)
940 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
941
942 ext4_ext_show_path(inode, path);
943
944 return path;
945
946 err:
947 ext4_ext_drop_refs(path);
948 kfree(path);
949 if (orig_path)
950 *orig_path = NULL;
951 return ERR_PTR(ret);
952 }
953
954
955
956
957
958
959 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
960 struct ext4_ext_path *curp,
961 int logical, ext4_fsblk_t ptr)
962 {
963 struct ext4_extent_idx *ix;
964 int len, err;
965
966 err = ext4_ext_get_access(handle, inode, curp);
967 if (err)
968 return err;
969
970 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
971 EXT4_ERROR_INODE(inode,
972 "logical %d == ei_block %d!",
973 logical, le32_to_cpu(curp->p_idx->ei_block));
974 return -EFSCORRUPTED;
975 }
976
977 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
978 >= le16_to_cpu(curp->p_hdr->eh_max))) {
979 EXT4_ERROR_INODE(inode,
980 "eh_entries %d >= eh_max %d!",
981 le16_to_cpu(curp->p_hdr->eh_entries),
982 le16_to_cpu(curp->p_hdr->eh_max));
983 return -EFSCORRUPTED;
984 }
985
986 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
987
988 ext_debug("insert new index %d after: %llu\n", logical, ptr);
989 ix = curp->p_idx + 1;
990 } else {
991
992 ext_debug("insert new index %d before: %llu\n", logical, ptr);
993 ix = curp->p_idx;
994 }
995
996 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
997 BUG_ON(len < 0);
998 if (len > 0) {
999 ext_debug("insert new index %d: "
1000 "move %d indices from 0x%p to 0x%p\n",
1001 logical, len, ix, ix + 1);
1002 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
1003 }
1004
1005 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
1006 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
1007 return -EFSCORRUPTED;
1008 }
1009
1010 ix->ei_block = cpu_to_le32(logical);
1011 ext4_idx_store_pblock(ix, ptr);
1012 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
1013
1014 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
1015 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
1016 return -EFSCORRUPTED;
1017 }
1018
1019 err = ext4_ext_dirty(handle, inode, curp);
1020 ext4_std_error(inode->i_sb, err);
1021
1022 return err;
1023 }
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 static int ext4_ext_split(handle_t *handle, struct inode *inode,
1036 unsigned int flags,
1037 struct ext4_ext_path *path,
1038 struct ext4_extent *newext, int at)
1039 {
1040 struct buffer_head *bh = NULL;
1041 int depth = ext_depth(inode);
1042 struct ext4_extent_header *neh;
1043 struct ext4_extent_idx *fidx;
1044 int i = at, k, m, a;
1045 ext4_fsblk_t newblock, oldblock;
1046 __le32 border;
1047 ext4_fsblk_t *ablocks = NULL;
1048 int err = 0;
1049 size_t ext_size = 0;
1050
1051
1052
1053
1054
1055
1056 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1057 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1058 return -EFSCORRUPTED;
1059 }
1060 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1061 border = path[depth].p_ext[1].ee_block;
1062 ext_debug("leaf will be split."
1063 " next leaf starts at %d\n",
1064 le32_to_cpu(border));
1065 } else {
1066 border = newext->ee_block;
1067 ext_debug("leaf will be added."
1068 " next leaf starts at %d\n",
1069 le32_to_cpu(border));
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS);
1085 if (!ablocks)
1086 return -ENOMEM;
1087
1088
1089 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
1090 for (a = 0; a < depth - at; a++) {
1091 newblock = ext4_ext_new_meta_block(handle, inode, path,
1092 newext, &err, flags);
1093 if (newblock == 0)
1094 goto cleanup;
1095 ablocks[a] = newblock;
1096 }
1097
1098
1099 newblock = ablocks[--a];
1100 if (unlikely(newblock == 0)) {
1101 EXT4_ERROR_INODE(inode, "newblock == 0!");
1102 err = -EFSCORRUPTED;
1103 goto cleanup;
1104 }
1105 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1106 if (unlikely(!bh)) {
1107 err = -ENOMEM;
1108 goto cleanup;
1109 }
1110 lock_buffer(bh);
1111
1112 err = ext4_journal_get_create_access(handle, bh);
1113 if (err)
1114 goto cleanup;
1115
1116 neh = ext_block_hdr(bh);
1117 neh->eh_entries = 0;
1118 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1119 neh->eh_magic = EXT4_EXT_MAGIC;
1120 neh->eh_depth = 0;
1121
1122
1123 if (unlikely(path[depth].p_hdr->eh_entries !=
1124 path[depth].p_hdr->eh_max)) {
1125 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1126 path[depth].p_hdr->eh_entries,
1127 path[depth].p_hdr->eh_max);
1128 err = -EFSCORRUPTED;
1129 goto cleanup;
1130 }
1131
1132 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1133 ext4_ext_show_move(inode, path, newblock, depth);
1134 if (m) {
1135 struct ext4_extent *ex;
1136 ex = EXT_FIRST_EXTENT(neh);
1137 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1138 le16_add_cpu(&neh->eh_entries, m);
1139 }
1140
1141
1142 ext_size = sizeof(struct ext4_extent_header) +
1143 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1144 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1145 ext4_extent_block_csum_set(inode, neh);
1146 set_buffer_uptodate(bh);
1147 unlock_buffer(bh);
1148
1149 err = ext4_handle_dirty_metadata(handle, inode, bh);
1150 if (err)
1151 goto cleanup;
1152 brelse(bh);
1153 bh = NULL;
1154
1155
1156 if (m) {
1157 err = ext4_ext_get_access(handle, inode, path + depth);
1158 if (err)
1159 goto cleanup;
1160 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1161 err = ext4_ext_dirty(handle, inode, path + depth);
1162 if (err)
1163 goto cleanup;
1164
1165 }
1166
1167
1168 k = depth - at - 1;
1169 if (unlikely(k < 0)) {
1170 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1171 err = -EFSCORRUPTED;
1172 goto cleanup;
1173 }
1174 if (k)
1175 ext_debug("create %d intermediate indices\n", k);
1176
1177
1178 i = depth - 1;
1179 while (k--) {
1180 oldblock = newblock;
1181 newblock = ablocks[--a];
1182 bh = sb_getblk(inode->i_sb, newblock);
1183 if (unlikely(!bh)) {
1184 err = -ENOMEM;
1185 goto cleanup;
1186 }
1187 lock_buffer(bh);
1188
1189 err = ext4_journal_get_create_access(handle, bh);
1190 if (err)
1191 goto cleanup;
1192
1193 neh = ext_block_hdr(bh);
1194 neh->eh_entries = cpu_to_le16(1);
1195 neh->eh_magic = EXT4_EXT_MAGIC;
1196 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1197 neh->eh_depth = cpu_to_le16(depth - i);
1198 fidx = EXT_FIRST_INDEX(neh);
1199 fidx->ei_block = border;
1200 ext4_idx_store_pblock(fidx, oldblock);
1201
1202 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1203 i, newblock, le32_to_cpu(border), oldblock);
1204
1205
1206 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1207 EXT_LAST_INDEX(path[i].p_hdr))) {
1208 EXT4_ERROR_INODE(inode,
1209 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1210 le32_to_cpu(path[i].p_ext->ee_block));
1211 err = -EFSCORRUPTED;
1212 goto cleanup;
1213 }
1214
1215 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1216 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1217 EXT_MAX_INDEX(path[i].p_hdr));
1218 ext4_ext_show_move(inode, path, newblock, i);
1219 if (m) {
1220 memmove(++fidx, path[i].p_idx,
1221 sizeof(struct ext4_extent_idx) * m);
1222 le16_add_cpu(&neh->eh_entries, m);
1223 }
1224
1225 ext_size = sizeof(struct ext4_extent_header) +
1226 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1227 memset(bh->b_data + ext_size, 0,
1228 inode->i_sb->s_blocksize - ext_size);
1229 ext4_extent_block_csum_set(inode, neh);
1230 set_buffer_uptodate(bh);
1231 unlock_buffer(bh);
1232
1233 err = ext4_handle_dirty_metadata(handle, inode, bh);
1234 if (err)
1235 goto cleanup;
1236 brelse(bh);
1237 bh = NULL;
1238
1239
1240 if (m) {
1241 err = ext4_ext_get_access(handle, inode, path + i);
1242 if (err)
1243 goto cleanup;
1244 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1245 err = ext4_ext_dirty(handle, inode, path + i);
1246 if (err)
1247 goto cleanup;
1248 }
1249
1250 i--;
1251 }
1252
1253
1254 err = ext4_ext_insert_index(handle, inode, path + at,
1255 le32_to_cpu(border), newblock);
1256
1257 cleanup:
1258 if (bh) {
1259 if (buffer_locked(bh))
1260 unlock_buffer(bh);
1261 brelse(bh);
1262 }
1263
1264 if (err) {
1265
1266 for (i = 0; i < depth; i++) {
1267 if (!ablocks[i])
1268 continue;
1269 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1270 EXT4_FREE_BLOCKS_METADATA);
1271 }
1272 }
1273 kfree(ablocks);
1274
1275 return err;
1276 }
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1287 unsigned int flags)
1288 {
1289 struct ext4_extent_header *neh;
1290 struct buffer_head *bh;
1291 ext4_fsblk_t newblock, goal = 0;
1292 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1293 int err = 0;
1294 size_t ext_size = 0;
1295
1296
1297 if (ext_depth(inode))
1298 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1299 if (goal > le32_to_cpu(es->s_first_data_block)) {
1300 flags |= EXT4_MB_HINT_TRY_GOAL;
1301 goal--;
1302 } else
1303 goal = ext4_inode_to_goal_block(inode);
1304 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1305 NULL, &err);
1306 if (newblock == 0)
1307 return err;
1308
1309 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1310 if (unlikely(!bh))
1311 return -ENOMEM;
1312 lock_buffer(bh);
1313
1314 err = ext4_journal_get_create_access(handle, bh);
1315 if (err) {
1316 unlock_buffer(bh);
1317 goto out;
1318 }
1319
1320 ext_size = sizeof(EXT4_I(inode)->i_data);
1321
1322 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1323
1324 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1325
1326
1327 neh = ext_block_hdr(bh);
1328
1329
1330 if (ext_depth(inode))
1331 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1332 else
1333 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1334 neh->eh_magic = EXT4_EXT_MAGIC;
1335 ext4_extent_block_csum_set(inode, neh);
1336 set_buffer_uptodate(bh);
1337 unlock_buffer(bh);
1338
1339 err = ext4_handle_dirty_metadata(handle, inode, bh);
1340 if (err)
1341 goto out;
1342
1343
1344 neh = ext_inode_hdr(inode);
1345 neh->eh_entries = cpu_to_le16(1);
1346 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1347 if (neh->eh_depth == 0) {
1348
1349 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1350 EXT_FIRST_INDEX(neh)->ei_block =
1351 EXT_FIRST_EXTENT(neh)->ee_block;
1352 }
1353 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1354 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1355 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1356 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1357
1358 le16_add_cpu(&neh->eh_depth, 1);
1359 ext4_mark_inode_dirty(handle, inode);
1360 out:
1361 brelse(bh);
1362
1363 return err;
1364 }
1365
1366
1367
1368
1369
1370
1371 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1372 unsigned int mb_flags,
1373 unsigned int gb_flags,
1374 struct ext4_ext_path **ppath,
1375 struct ext4_extent *newext)
1376 {
1377 struct ext4_ext_path *path = *ppath;
1378 struct ext4_ext_path *curp;
1379 int depth, i, err = 0;
1380
1381 repeat:
1382 i = depth = ext_depth(inode);
1383
1384
1385 curp = path + depth;
1386 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1387 i--;
1388 curp--;
1389 }
1390
1391
1392
1393 if (EXT_HAS_FREE_INDEX(curp)) {
1394
1395
1396 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1397 if (err)
1398 goto out;
1399
1400
1401 path = ext4_find_extent(inode,
1402 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1403 ppath, gb_flags);
1404 if (IS_ERR(path))
1405 err = PTR_ERR(path);
1406 } else {
1407
1408 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1409 if (err)
1410 goto out;
1411
1412
1413 path = ext4_find_extent(inode,
1414 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1415 ppath, gb_flags);
1416 if (IS_ERR(path)) {
1417 err = PTR_ERR(path);
1418 goto out;
1419 }
1420
1421
1422
1423
1424
1425 depth = ext_depth(inode);
1426 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1427
1428 goto repeat;
1429 }
1430 }
1431
1432 out:
1433 return err;
1434 }
1435
1436
1437
1438
1439
1440
1441
1442
1443 static int ext4_ext_search_left(struct inode *inode,
1444 struct ext4_ext_path *path,
1445 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1446 {
1447 struct ext4_extent_idx *ix;
1448 struct ext4_extent *ex;
1449 int depth, ee_len;
1450
1451 if (unlikely(path == NULL)) {
1452 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1453 return -EFSCORRUPTED;
1454 }
1455 depth = path->p_depth;
1456 *phys = 0;
1457
1458 if (depth == 0 && path->p_ext == NULL)
1459 return 0;
1460
1461
1462
1463
1464
1465 ex = path[depth].p_ext;
1466 ee_len = ext4_ext_get_actual_len(ex);
1467 if (*logical < le32_to_cpu(ex->ee_block)) {
1468 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1469 EXT4_ERROR_INODE(inode,
1470 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1471 *logical, le32_to_cpu(ex->ee_block));
1472 return -EFSCORRUPTED;
1473 }
1474 while (--depth >= 0) {
1475 ix = path[depth].p_idx;
1476 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1477 EXT4_ERROR_INODE(inode,
1478 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1479 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1480 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1481 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1482 depth);
1483 return -EFSCORRUPTED;
1484 }
1485 }
1486 return 0;
1487 }
1488
1489 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1490 EXT4_ERROR_INODE(inode,
1491 "logical %d < ee_block %d + ee_len %d!",
1492 *logical, le32_to_cpu(ex->ee_block), ee_len);
1493 return -EFSCORRUPTED;
1494 }
1495
1496 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1497 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1498 return 0;
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508 static int ext4_ext_search_right(struct inode *inode,
1509 struct ext4_ext_path *path,
1510 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1511 struct ext4_extent **ret_ex)
1512 {
1513 struct buffer_head *bh = NULL;
1514 struct ext4_extent_header *eh;
1515 struct ext4_extent_idx *ix;
1516 struct ext4_extent *ex;
1517 ext4_fsblk_t block;
1518 int depth;
1519 int ee_len;
1520
1521 if (unlikely(path == NULL)) {
1522 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1523 return -EFSCORRUPTED;
1524 }
1525 depth = path->p_depth;
1526 *phys = 0;
1527
1528 if (depth == 0 && path->p_ext == NULL)
1529 return 0;
1530
1531
1532
1533
1534
1535 ex = path[depth].p_ext;
1536 ee_len = ext4_ext_get_actual_len(ex);
1537 if (*logical < le32_to_cpu(ex->ee_block)) {
1538 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1539 EXT4_ERROR_INODE(inode,
1540 "first_extent(path[%d].p_hdr) != ex",
1541 depth);
1542 return -EFSCORRUPTED;
1543 }
1544 while (--depth >= 0) {
1545 ix = path[depth].p_idx;
1546 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1547 EXT4_ERROR_INODE(inode,
1548 "ix != EXT_FIRST_INDEX *logical %d!",
1549 *logical);
1550 return -EFSCORRUPTED;
1551 }
1552 }
1553 goto found_extent;
1554 }
1555
1556 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1557 EXT4_ERROR_INODE(inode,
1558 "logical %d < ee_block %d + ee_len %d!",
1559 *logical, le32_to_cpu(ex->ee_block), ee_len);
1560 return -EFSCORRUPTED;
1561 }
1562
1563 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1564
1565 ex++;
1566 goto found_extent;
1567 }
1568
1569
1570 while (--depth >= 0) {
1571 ix = path[depth].p_idx;
1572 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1573 goto got_index;
1574 }
1575
1576
1577 return 0;
1578
1579 got_index:
1580
1581
1582
1583 ix++;
1584 block = ext4_idx_pblock(ix);
1585 while (++depth < path->p_depth) {
1586
1587 bh = read_extent_tree_block(inode, block,
1588 path->p_depth - depth, 0);
1589 if (IS_ERR(bh))
1590 return PTR_ERR(bh);
1591 eh = ext_block_hdr(bh);
1592 ix = EXT_FIRST_INDEX(eh);
1593 block = ext4_idx_pblock(ix);
1594 put_bh(bh);
1595 }
1596
1597 bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
1598 if (IS_ERR(bh))
1599 return PTR_ERR(bh);
1600 eh = ext_block_hdr(bh);
1601 ex = EXT_FIRST_EXTENT(eh);
1602 found_extent:
1603 *logical = le32_to_cpu(ex->ee_block);
1604 *phys = ext4_ext_pblock(ex);
1605 *ret_ex = ex;
1606 if (bh)
1607 put_bh(bh);
1608 return 0;
1609 }
1610
1611
1612
1613
1614
1615
1616
1617
1618 ext4_lblk_t
1619 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1620 {
1621 int depth;
1622
1623 BUG_ON(path == NULL);
1624 depth = path->p_depth;
1625
1626 if (depth == 0 && path->p_ext == NULL)
1627 return EXT_MAX_BLOCKS;
1628
1629 while (depth >= 0) {
1630 if (depth == path->p_depth) {
1631
1632 if (path[depth].p_ext &&
1633 path[depth].p_ext !=
1634 EXT_LAST_EXTENT(path[depth].p_hdr))
1635 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1636 } else {
1637
1638 if (path[depth].p_idx !=
1639 EXT_LAST_INDEX(path[depth].p_hdr))
1640 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1641 }
1642 depth--;
1643 }
1644
1645 return EXT_MAX_BLOCKS;
1646 }
1647
1648
1649
1650
1651
1652 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1653 {
1654 int depth;
1655
1656 BUG_ON(path == NULL);
1657 depth = path->p_depth;
1658
1659
1660 if (depth == 0)
1661 return EXT_MAX_BLOCKS;
1662
1663
1664 depth--;
1665
1666 while (depth >= 0) {
1667 if (path[depth].p_idx !=
1668 EXT_LAST_INDEX(path[depth].p_hdr))
1669 return (ext4_lblk_t)
1670 le32_to_cpu(path[depth].p_idx[1].ei_block);
1671 depth--;
1672 }
1673
1674 return EXT_MAX_BLOCKS;
1675 }
1676
1677
1678
1679
1680
1681
1682
1683 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1684 struct ext4_ext_path *path)
1685 {
1686 struct ext4_extent_header *eh;
1687 int depth = ext_depth(inode);
1688 struct ext4_extent *ex;
1689 __le32 border;
1690 int k, err = 0;
1691
1692 eh = path[depth].p_hdr;
1693 ex = path[depth].p_ext;
1694
1695 if (unlikely(ex == NULL || eh == NULL)) {
1696 EXT4_ERROR_INODE(inode,
1697 "ex %p == NULL or eh %p == NULL", ex, eh);
1698 return -EFSCORRUPTED;
1699 }
1700
1701 if (depth == 0) {
1702
1703 return 0;
1704 }
1705
1706 if (ex != EXT_FIRST_EXTENT(eh)) {
1707
1708 return 0;
1709 }
1710
1711
1712
1713
1714 k = depth - 1;
1715 border = path[depth].p_ext->ee_block;
1716 err = ext4_ext_get_access(handle, inode, path + k);
1717 if (err)
1718 return err;
1719 path[k].p_idx->ei_block = border;
1720 err = ext4_ext_dirty(handle, inode, path + k);
1721 if (err)
1722 return err;
1723
1724 while (k--) {
1725
1726 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1727 break;
1728 err = ext4_ext_get_access(handle, inode, path + k);
1729 if (err)
1730 break;
1731 path[k].p_idx->ei_block = border;
1732 err = ext4_ext_dirty(handle, inode, path + k);
1733 if (err)
1734 break;
1735 }
1736
1737 return err;
1738 }
1739
1740 int
1741 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1742 struct ext4_extent *ex2)
1743 {
1744 unsigned short ext1_ee_len, ext2_ee_len;
1745
1746 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1747 return 0;
1748
1749 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1750 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1751
1752 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1753 le32_to_cpu(ex2->ee_block))
1754 return 0;
1755
1756
1757
1758
1759
1760
1761 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1762 return 0;
1763
1764
1765
1766
1767
1768
1769 if (ext4_ext_is_unwritten(ex1) &&
1770 (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
1771 atomic_read(&EXT4_I(inode)->i_unwritten) ||
1772 (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
1773 return 0;
1774 #ifdef AGGRESSIVE_TEST
1775 if (ext1_ee_len >= 4)
1776 return 0;
1777 #endif
1778
1779 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1780 return 1;
1781 return 0;
1782 }
1783
1784
1785
1786
1787
1788
1789
1790
1791 static int ext4_ext_try_to_merge_right(struct inode *inode,
1792 struct ext4_ext_path *path,
1793 struct ext4_extent *ex)
1794 {
1795 struct ext4_extent_header *eh;
1796 unsigned int depth, len;
1797 int merge_done = 0, unwritten;
1798
1799 depth = ext_depth(inode);
1800 BUG_ON(path[depth].p_hdr == NULL);
1801 eh = path[depth].p_hdr;
1802
1803 while (ex < EXT_LAST_EXTENT(eh)) {
1804 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1805 break;
1806
1807 unwritten = ext4_ext_is_unwritten(ex);
1808 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1809 + ext4_ext_get_actual_len(ex + 1));
1810 if (unwritten)
1811 ext4_ext_mark_unwritten(ex);
1812
1813 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1814 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1815 * sizeof(struct ext4_extent);
1816 memmove(ex + 1, ex + 2, len);
1817 }
1818 le16_add_cpu(&eh->eh_entries, -1);
1819 merge_done = 1;
1820 WARN_ON(eh->eh_entries == 0);
1821 if (!eh->eh_entries)
1822 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1823 }
1824
1825 return merge_done;
1826 }
1827
1828
1829
1830
1831
1832 static void ext4_ext_try_to_merge_up(handle_t *handle,
1833 struct inode *inode,
1834 struct ext4_ext_path *path)
1835 {
1836 size_t s;
1837 unsigned max_root = ext4_ext_space_root(inode, 0);
1838 ext4_fsblk_t blk;
1839
1840 if ((path[0].p_depth != 1) ||
1841 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1842 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1843 return;
1844
1845
1846
1847
1848
1849
1850 if (ext4_journal_extend(handle, 2))
1851 return;
1852
1853
1854
1855
1856 blk = ext4_idx_pblock(path[0].p_idx);
1857 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1858 sizeof(struct ext4_extent_idx);
1859 s += sizeof(struct ext4_extent_header);
1860
1861 path[1].p_maxdepth = path[0].p_maxdepth;
1862 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1863 path[0].p_depth = 0;
1864 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1865 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1866 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1867
1868 brelse(path[1].p_bh);
1869 ext4_free_blocks(handle, inode, NULL, blk, 1,
1870 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1871 }
1872
1873
1874
1875
1876
1877 static void ext4_ext_try_to_merge(handle_t *handle,
1878 struct inode *inode,
1879 struct ext4_ext_path *path,
1880 struct ext4_extent *ex) {
1881 struct ext4_extent_header *eh;
1882 unsigned int depth;
1883 int merge_done = 0;
1884
1885 depth = ext_depth(inode);
1886 BUG_ON(path[depth].p_hdr == NULL);
1887 eh = path[depth].p_hdr;
1888
1889 if (ex > EXT_FIRST_EXTENT(eh))
1890 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1891
1892 if (!merge_done)
1893 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1894
1895 ext4_ext_try_to_merge_up(handle, inode, path);
1896 }
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1907 struct inode *inode,
1908 struct ext4_extent *newext,
1909 struct ext4_ext_path *path)
1910 {
1911 ext4_lblk_t b1, b2;
1912 unsigned int depth, len1;
1913 unsigned int ret = 0;
1914
1915 b1 = le32_to_cpu(newext->ee_block);
1916 len1 = ext4_ext_get_actual_len(newext);
1917 depth = ext_depth(inode);
1918 if (!path[depth].p_ext)
1919 goto out;
1920 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1921
1922
1923
1924
1925
1926 if (b2 < b1) {
1927 b2 = ext4_ext_next_allocated_block(path);
1928 if (b2 == EXT_MAX_BLOCKS)
1929 goto out;
1930 b2 = EXT4_LBLK_CMASK(sbi, b2);
1931 }
1932
1933
1934 if (b1 + len1 < b1) {
1935 len1 = EXT_MAX_BLOCKS - b1;
1936 newext->ee_len = cpu_to_le16(len1);
1937 ret = 1;
1938 }
1939
1940
1941 if (b1 + len1 > b2) {
1942 newext->ee_len = cpu_to_le16(b2 - b1);
1943 ret = 1;
1944 }
1945 out:
1946 return ret;
1947 }
1948
1949
1950
1951
1952
1953
1954
1955 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1956 struct ext4_ext_path **ppath,
1957 struct ext4_extent *newext, int gb_flags)
1958 {
1959 struct ext4_ext_path *path = *ppath;
1960 struct ext4_extent_header *eh;
1961 struct ext4_extent *ex, *fex;
1962 struct ext4_extent *nearex;
1963 struct ext4_ext_path *npath = NULL;
1964 int depth, len, err;
1965 ext4_lblk_t next;
1966 int mb_flags = 0, unwritten;
1967
1968 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1969 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1970 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1971 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1972 return -EFSCORRUPTED;
1973 }
1974 depth = ext_depth(inode);
1975 ex = path[depth].p_ext;
1976 eh = path[depth].p_hdr;
1977 if (unlikely(path[depth].p_hdr == NULL)) {
1978 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1979 return -EFSCORRUPTED;
1980 }
1981
1982
1983 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1984
1985
1986
1987
1988
1989
1990
1991
1992 if (ex < EXT_LAST_EXTENT(eh) &&
1993 (le32_to_cpu(ex->ee_block) +
1994 ext4_ext_get_actual_len(ex) <
1995 le32_to_cpu(newext->ee_block))) {
1996 ex += 1;
1997 goto prepend;
1998 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1999 (le32_to_cpu(newext->ee_block) +
2000 ext4_ext_get_actual_len(newext) <
2001 le32_to_cpu(ex->ee_block)))
2002 ex -= 1;
2003
2004
2005 if (ext4_can_extents_be_merged(inode, ex, newext)) {
2006 ext_debug("append [%d]%d block to %u:[%d]%d"
2007 "(from %llu)\n",
2008 ext4_ext_is_unwritten(newext),
2009 ext4_ext_get_actual_len(newext),
2010 le32_to_cpu(ex->ee_block),
2011 ext4_ext_is_unwritten(ex),
2012 ext4_ext_get_actual_len(ex),
2013 ext4_ext_pblock(ex));
2014 err = ext4_ext_get_access(handle, inode,
2015 path + depth);
2016 if (err)
2017 return err;
2018 unwritten = ext4_ext_is_unwritten(ex);
2019 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2020 + ext4_ext_get_actual_len(newext));
2021 if (unwritten)
2022 ext4_ext_mark_unwritten(ex);
2023 eh = path[depth].p_hdr;
2024 nearex = ex;
2025 goto merge;
2026 }
2027
2028 prepend:
2029
2030 if (ext4_can_extents_be_merged(inode, newext, ex)) {
2031 ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
2032 "(from %llu)\n",
2033 le32_to_cpu(newext->ee_block),
2034 ext4_ext_is_unwritten(newext),
2035 ext4_ext_get_actual_len(newext),
2036 le32_to_cpu(ex->ee_block),
2037 ext4_ext_is_unwritten(ex),
2038 ext4_ext_get_actual_len(ex),
2039 ext4_ext_pblock(ex));
2040 err = ext4_ext_get_access(handle, inode,
2041 path + depth);
2042 if (err)
2043 return err;
2044
2045 unwritten = ext4_ext_is_unwritten(ex);
2046 ex->ee_block = newext->ee_block;
2047 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2048 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2049 + ext4_ext_get_actual_len(newext));
2050 if (unwritten)
2051 ext4_ext_mark_unwritten(ex);
2052 eh = path[depth].p_hdr;
2053 nearex = ex;
2054 goto merge;
2055 }
2056 }
2057
2058 depth = ext_depth(inode);
2059 eh = path[depth].p_hdr;
2060 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2061 goto has_space;
2062
2063
2064 fex = EXT_LAST_EXTENT(eh);
2065 next = EXT_MAX_BLOCKS;
2066 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2067 next = ext4_ext_next_leaf_block(path);
2068 if (next != EXT_MAX_BLOCKS) {
2069 ext_debug("next leaf block - %u\n", next);
2070 BUG_ON(npath != NULL);
2071 npath = ext4_find_extent(inode, next, NULL, 0);
2072 if (IS_ERR(npath))
2073 return PTR_ERR(npath);
2074 BUG_ON(npath->p_depth != path->p_depth);
2075 eh = npath[depth].p_hdr;
2076 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2077 ext_debug("next leaf isn't full(%d)\n",
2078 le16_to_cpu(eh->eh_entries));
2079 path = npath;
2080 goto has_space;
2081 }
2082 ext_debug("next leaf has no free space(%d,%d)\n",
2083 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2084 }
2085
2086
2087
2088
2089
2090 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2091 mb_flags |= EXT4_MB_USE_RESERVED;
2092 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2093 ppath, newext);
2094 if (err)
2095 goto cleanup;
2096 depth = ext_depth(inode);
2097 eh = path[depth].p_hdr;
2098
2099 has_space:
2100 nearex = path[depth].p_ext;
2101
2102 err = ext4_ext_get_access(handle, inode, path + depth);
2103 if (err)
2104 goto cleanup;
2105
2106 if (!nearex) {
2107
2108 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
2109 le32_to_cpu(newext->ee_block),
2110 ext4_ext_pblock(newext),
2111 ext4_ext_is_unwritten(newext),
2112 ext4_ext_get_actual_len(newext));
2113 nearex = EXT_FIRST_EXTENT(eh);
2114 } else {
2115 if (le32_to_cpu(newext->ee_block)
2116 > le32_to_cpu(nearex->ee_block)) {
2117
2118 ext_debug("insert %u:%llu:[%d]%d before: "
2119 "nearest %p\n",
2120 le32_to_cpu(newext->ee_block),
2121 ext4_ext_pblock(newext),
2122 ext4_ext_is_unwritten(newext),
2123 ext4_ext_get_actual_len(newext),
2124 nearex);
2125 nearex++;
2126 } else {
2127
2128 BUG_ON(newext->ee_block == nearex->ee_block);
2129 ext_debug("insert %u:%llu:[%d]%d after: "
2130 "nearest %p\n",
2131 le32_to_cpu(newext->ee_block),
2132 ext4_ext_pblock(newext),
2133 ext4_ext_is_unwritten(newext),
2134 ext4_ext_get_actual_len(newext),
2135 nearex);
2136 }
2137 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2138 if (len > 0) {
2139 ext_debug("insert %u:%llu:[%d]%d: "
2140 "move %d extents from 0x%p to 0x%p\n",
2141 le32_to_cpu(newext->ee_block),
2142 ext4_ext_pblock(newext),
2143 ext4_ext_is_unwritten(newext),
2144 ext4_ext_get_actual_len(newext),
2145 len, nearex, nearex + 1);
2146 memmove(nearex + 1, nearex,
2147 len * sizeof(struct ext4_extent));
2148 }
2149 }
2150
2151 le16_add_cpu(&eh->eh_entries, 1);
2152 path[depth].p_ext = nearex;
2153 nearex->ee_block = newext->ee_block;
2154 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2155 nearex->ee_len = newext->ee_len;
2156
2157 merge:
2158
2159 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2160 ext4_ext_try_to_merge(handle, inode, path, nearex);
2161
2162
2163
2164 err = ext4_ext_correct_indexes(handle, inode, path);
2165 if (err)
2166 goto cleanup;
2167
2168 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2169
2170 cleanup:
2171 ext4_ext_drop_refs(npath);
2172 kfree(npath);
2173 return err;
2174 }
2175
2176 static int ext4_fill_fiemap_extents(struct inode *inode,
2177 ext4_lblk_t block, ext4_lblk_t num,
2178 struct fiemap_extent_info *fieinfo)
2179 {
2180 struct ext4_ext_path *path = NULL;
2181 struct ext4_extent *ex;
2182 struct extent_status es;
2183 ext4_lblk_t next, next_del, start = 0, end = 0;
2184 ext4_lblk_t last = block + num;
2185 int exists, depth = 0, err = 0;
2186 unsigned int flags = 0;
2187 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2188
2189 while (block < last && block != EXT_MAX_BLOCKS) {
2190 num = last - block;
2191
2192 down_read(&EXT4_I(inode)->i_data_sem);
2193
2194 path = ext4_find_extent(inode, block, &path, 0);
2195 if (IS_ERR(path)) {
2196 up_read(&EXT4_I(inode)->i_data_sem);
2197 err = PTR_ERR(path);
2198 path = NULL;
2199 break;
2200 }
2201
2202 depth = ext_depth(inode);
2203 if (unlikely(path[depth].p_hdr == NULL)) {
2204 up_read(&EXT4_I(inode)->i_data_sem);
2205 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2206 err = -EFSCORRUPTED;
2207 break;
2208 }
2209 ex = path[depth].p_ext;
2210 next = ext4_ext_next_allocated_block(path);
2211
2212 flags = 0;
2213 exists = 0;
2214 if (!ex) {
2215
2216
2217 start = block;
2218 end = block + num;
2219 } else if (le32_to_cpu(ex->ee_block) > block) {
2220
2221 start = block;
2222 end = le32_to_cpu(ex->ee_block);
2223 if (block + num < end)
2224 end = block + num;
2225 } else if (block >= le32_to_cpu(ex->ee_block)
2226 + ext4_ext_get_actual_len(ex)) {
2227
2228 start = block;
2229 end = block + num;
2230 if (end >= next)
2231 end = next;
2232 } else if (block >= le32_to_cpu(ex->ee_block)) {
2233
2234
2235
2236
2237 start = block;
2238 end = le32_to_cpu(ex->ee_block)
2239 + ext4_ext_get_actual_len(ex);
2240 if (block + num < end)
2241 end = block + num;
2242 exists = 1;
2243 } else {
2244 BUG();
2245 }
2246 BUG_ON(end <= start);
2247
2248 if (!exists) {
2249 es.es_lblk = start;
2250 es.es_len = end - start;
2251 es.es_pblk = 0;
2252 } else {
2253 es.es_lblk = le32_to_cpu(ex->ee_block);
2254 es.es_len = ext4_ext_get_actual_len(ex);
2255 es.es_pblk = ext4_ext_pblock(ex);
2256 if (ext4_ext_is_unwritten(ex))
2257 flags |= FIEMAP_EXTENT_UNWRITTEN;
2258 }
2259
2260
2261
2262
2263
2264
2265 next_del = ext4_find_delayed_extent(inode, &es);
2266 if (!exists && next_del) {
2267 exists = 1;
2268 flags |= (FIEMAP_EXTENT_DELALLOC |
2269 FIEMAP_EXTENT_UNKNOWN);
2270 }
2271 up_read(&EXT4_I(inode)->i_data_sem);
2272
2273 if (unlikely(es.es_len == 0)) {
2274 EXT4_ERROR_INODE(inode, "es.es_len == 0");
2275 err = -EFSCORRUPTED;
2276 break;
2277 }
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290 if (next == next_del && next == EXT_MAX_BLOCKS) {
2291 flags |= FIEMAP_EXTENT_LAST;
2292 if (unlikely(next_del != EXT_MAX_BLOCKS ||
2293 next != EXT_MAX_BLOCKS)) {
2294 EXT4_ERROR_INODE(inode,
2295 "next extent == %u, next "
2296 "delalloc extent = %u",
2297 next, next_del);
2298 err = -EFSCORRUPTED;
2299 break;
2300 }
2301 }
2302
2303 if (exists) {
2304 err = fiemap_fill_next_extent(fieinfo,
2305 (__u64)es.es_lblk << blksize_bits,
2306 (__u64)es.es_pblk << blksize_bits,
2307 (__u64)es.es_len << blksize_bits,
2308 flags);
2309 if (err < 0)
2310 break;
2311 if (err == 1) {
2312 err = 0;
2313 break;
2314 }
2315 }
2316
2317 block = es.es_lblk + es.es_len;
2318 }
2319
2320 ext4_ext_drop_refs(path);
2321 kfree(path);
2322 return err;
2323 }
2324
2325 static int ext4_fill_es_cache_info(struct inode *inode,
2326 ext4_lblk_t block, ext4_lblk_t num,
2327 struct fiemap_extent_info *fieinfo)
2328 {
2329 ext4_lblk_t next, end = block + num - 1;
2330 struct extent_status es;
2331 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2332 unsigned int flags;
2333 int err;
2334
2335 while (block <= end) {
2336 next = 0;
2337 flags = 0;
2338 if (!ext4_es_lookup_extent(inode, block, &next, &es))
2339 break;
2340 if (ext4_es_is_unwritten(&es))
2341 flags |= FIEMAP_EXTENT_UNWRITTEN;
2342 if (ext4_es_is_delayed(&es))
2343 flags |= (FIEMAP_EXTENT_DELALLOC |
2344 FIEMAP_EXTENT_UNKNOWN);
2345 if (ext4_es_is_hole(&es))
2346 flags |= EXT4_FIEMAP_EXTENT_HOLE;
2347 if (next == 0)
2348 flags |= FIEMAP_EXTENT_LAST;
2349 if (flags & (FIEMAP_EXTENT_DELALLOC|
2350 EXT4_FIEMAP_EXTENT_HOLE))
2351 es.es_pblk = 0;
2352 else
2353 es.es_pblk = ext4_es_pblock(&es);
2354 err = fiemap_fill_next_extent(fieinfo,
2355 (__u64)es.es_lblk << blksize_bits,
2356 (__u64)es.es_pblk << blksize_bits,
2357 (__u64)es.es_len << blksize_bits,
2358 flags);
2359 if (next == 0)
2360 break;
2361 block = next;
2362 if (err < 0)
2363 return err;
2364 if (err == 1)
2365 return 0;
2366 }
2367 return 0;
2368 }
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384 static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
2385 struct ext4_ext_path *path,
2386 ext4_lblk_t *lblk)
2387 {
2388 int depth = ext_depth(inode);
2389 struct ext4_extent *ex;
2390 ext4_lblk_t len;
2391
2392 ex = path[depth].p_ext;
2393 if (ex == NULL) {
2394
2395 *lblk = 0;
2396 len = EXT_MAX_BLOCKS;
2397 } else if (*lblk < le32_to_cpu(ex->ee_block)) {
2398 len = le32_to_cpu(ex->ee_block) - *lblk;
2399 } else if (*lblk >= le32_to_cpu(ex->ee_block)
2400 + ext4_ext_get_actual_len(ex)) {
2401 ext4_lblk_t next;
2402
2403 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2404 next = ext4_ext_next_allocated_block(path);
2405 BUG_ON(next == *lblk);
2406 len = next - *lblk;
2407 } else {
2408 BUG();
2409 }
2410 return len;
2411 }
2412
2413
2414
2415
2416
2417
2418 static void
2419 ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
2420 ext4_lblk_t hole_len)
2421 {
2422 struct extent_status es;
2423
2424 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
2425 hole_start + hole_len - 1, &es);
2426 if (es.es_len) {
2427
2428 if (es.es_lblk <= hole_start)
2429 return;
2430 hole_len = min(es.es_lblk - hole_start, hole_len);
2431 }
2432 ext_debug(" -> %u:%u\n", hole_start, hole_len);
2433 ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
2434 EXTENT_STATUS_HOLE);
2435 }
2436
2437
2438
2439
2440
2441 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2442 struct ext4_ext_path *path, int depth)
2443 {
2444 int err;
2445 ext4_fsblk_t leaf;
2446
2447
2448 depth--;
2449 path = path + depth;
2450 leaf = ext4_idx_pblock(path->p_idx);
2451 if (unlikely(path->p_hdr->eh_entries == 0)) {
2452 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2453 return -EFSCORRUPTED;
2454 }
2455 err = ext4_ext_get_access(handle, inode, path);
2456 if (err)
2457 return err;
2458
2459 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2460 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2461 len *= sizeof(struct ext4_extent_idx);
2462 memmove(path->p_idx, path->p_idx + 1, len);
2463 }
2464
2465 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2466 err = ext4_ext_dirty(handle, inode, path);
2467 if (err)
2468 return err;
2469 ext_debug("index is empty, remove it, free block %llu\n", leaf);
2470 trace_ext4_ext_rm_idx(inode, leaf);
2471
2472 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2473 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2474
2475 while (--depth >= 0) {
2476 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2477 break;
2478 path--;
2479 err = ext4_ext_get_access(handle, inode, path);
2480 if (err)
2481 break;
2482 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2483 err = ext4_ext_dirty(handle, inode, path);
2484 if (err)
2485 break;
2486 }
2487 return err;
2488 }
2489
2490
2491
2492
2493
2494
2495
2496
2497 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2498 struct ext4_ext_path *path)
2499 {
2500 if (path) {
2501 int depth = ext_depth(inode);
2502 int ret = 0;
2503
2504
2505 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2506 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2518 return ret;
2519 }
2520 }
2521
2522 return ext4_chunk_trans_blocks(inode, nrblocks);
2523 }
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2535 {
2536 int index;
2537 int depth;
2538
2539
2540 if (ext4_has_inline_data(inode))
2541 return 1;
2542
2543 depth = ext_depth(inode);
2544
2545 if (extents <= 1)
2546 index = depth * 2;
2547 else
2548 index = depth * 3;
2549
2550 return index;
2551 }
2552
2553 static inline int get_default_free_blocks_flags(struct inode *inode)
2554 {
2555 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2556 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2557 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2558 else if (ext4_should_journal_data(inode))
2559 return EXT4_FREE_BLOCKS_FORGET;
2560 return 0;
2561 }
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
2579 {
2580 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2581 struct ext4_inode_info *ei = EXT4_I(inode);
2582
2583 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
2584
2585 spin_lock(&ei->i_block_reservation_lock);
2586 ei->i_reserved_data_blocks++;
2587 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
2588 spin_unlock(&ei->i_block_reservation_lock);
2589
2590 percpu_counter_add(&sbi->s_freeclusters_counter, 1);
2591 ext4_remove_pending(inode, lblk);
2592 }
2593
2594 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2595 struct ext4_extent *ex,
2596 struct partial_cluster *partial,
2597 ext4_lblk_t from, ext4_lblk_t to)
2598 {
2599 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2600 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2601 ext4_fsblk_t last_pblk, pblk;
2602 ext4_lblk_t num;
2603 int flags;
2604
2605
2606 if (from < le32_to_cpu(ex->ee_block) ||
2607 to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
2608 ext4_error(sbi->s_sb,
2609 "strange request: removal(2) %u-%u from %u:%u",
2610 from, to, le32_to_cpu(ex->ee_block), ee_len);
2611 return 0;
2612 }
2613
2614 #ifdef EXTENTS_STATS
2615 spin_lock(&sbi->s_ext_stats_lock);
2616 sbi->s_ext_blocks += ee_len;
2617 sbi->s_ext_extents++;
2618 if (ee_len < sbi->s_ext_min)
2619 sbi->s_ext_min = ee_len;
2620 if (ee_len > sbi->s_ext_max)
2621 sbi->s_ext_max = ee_len;
2622 if (ext_depth(inode) > sbi->s_depth_max)
2623 sbi->s_depth_max = ext_depth(inode);
2624 spin_unlock(&sbi->s_ext_stats_lock);
2625 #endif
2626
2627 trace_ext4_remove_blocks(inode, ex, from, to, partial);
2628
2629
2630
2631
2632
2633 last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
2634
2635 if (partial->state != initial &&
2636 partial->pclu != EXT4_B2C(sbi, last_pblk)) {
2637 if (partial->state == tofree) {
2638 flags = get_default_free_blocks_flags(inode);
2639 if (ext4_is_pending(inode, partial->lblk))
2640 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2641 ext4_free_blocks(handle, inode, NULL,
2642 EXT4_C2B(sbi, partial->pclu),
2643 sbi->s_cluster_ratio, flags);
2644 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2645 ext4_rereserve_cluster(inode, partial->lblk);
2646 }
2647 partial->state = initial;
2648 }
2649
2650 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2651 pblk = ext4_ext_pblock(ex) + ee_len - num;
2652
2653
2654
2655
2656
2657
2658
2659 flags = get_default_free_blocks_flags(inode);
2660
2661
2662 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
2663 (EXT4_LBLK_CMASK(sbi, to) >= from) &&
2664 (partial->state != nofree)) {
2665 if (ext4_is_pending(inode, to))
2666 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2667 ext4_free_blocks(handle, inode, NULL,
2668 EXT4_PBLK_CMASK(sbi, last_pblk),
2669 sbi->s_cluster_ratio, flags);
2670 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2671 ext4_rereserve_cluster(inode, to);
2672 partial->state = initial;
2673 flags = get_default_free_blocks_flags(inode);
2674 }
2675
2676 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2677
2678
2679
2680
2681
2682
2683
2684 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2685 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2686
2687
2688 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
2689 partial->state = initial;
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
2702 if (partial->state == initial) {
2703 partial->pclu = EXT4_B2C(sbi, pblk);
2704 partial->lblk = from;
2705 partial->state = tofree;
2706 }
2707 } else {
2708 partial->state = initial;
2709 }
2710
2711 return 0;
2712 }
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729 static int
2730 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2731 struct ext4_ext_path *path,
2732 struct partial_cluster *partial,
2733 ext4_lblk_t start, ext4_lblk_t end)
2734 {
2735 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2736 int err = 0, correct_index = 0;
2737 int depth = ext_depth(inode), credits;
2738 struct ext4_extent_header *eh;
2739 ext4_lblk_t a, b;
2740 unsigned num;
2741 ext4_lblk_t ex_ee_block;
2742 unsigned short ex_ee_len;
2743 unsigned unwritten = 0;
2744 struct ext4_extent *ex;
2745 ext4_fsblk_t pblk;
2746
2747
2748 ext_debug("truncate since %u in leaf to %u\n", start, end);
2749 if (!path[depth].p_hdr)
2750 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2751 eh = path[depth].p_hdr;
2752 if (unlikely(path[depth].p_hdr == NULL)) {
2753 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2754 return -EFSCORRUPTED;
2755 }
2756
2757 ex = path[depth].p_ext;
2758 if (!ex)
2759 ex = EXT_LAST_EXTENT(eh);
2760
2761 ex_ee_block = le32_to_cpu(ex->ee_block);
2762 ex_ee_len = ext4_ext_get_actual_len(ex);
2763
2764 trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2765
2766 while (ex >= EXT_FIRST_EXTENT(eh) &&
2767 ex_ee_block + ex_ee_len > start) {
2768
2769 if (ext4_ext_is_unwritten(ex))
2770 unwritten = 1;
2771 else
2772 unwritten = 0;
2773
2774 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2775 unwritten, ex_ee_len);
2776 path[depth].p_ext = ex;
2777
2778 a = ex_ee_block > start ? ex_ee_block : start;
2779 b = ex_ee_block+ex_ee_len - 1 < end ?
2780 ex_ee_block+ex_ee_len - 1 : end;
2781
2782 ext_debug(" border %u:%u\n", a, b);
2783
2784
2785 if (end < ex_ee_block) {
2786
2787
2788
2789
2790
2791
2792
2793 if (sbi->s_cluster_ratio > 1) {
2794 pblk = ext4_ext_pblock(ex);
2795 partial->pclu = EXT4_B2C(sbi, pblk);
2796 partial->state = nofree;
2797 }
2798 ex--;
2799 ex_ee_block = le32_to_cpu(ex->ee_block);
2800 ex_ee_len = ext4_ext_get_actual_len(ex);
2801 continue;
2802 } else if (b != ex_ee_block + ex_ee_len - 1) {
2803 EXT4_ERROR_INODE(inode,
2804 "can not handle truncate %u:%u "
2805 "on extent %u:%u",
2806 start, end, ex_ee_block,
2807 ex_ee_block + ex_ee_len - 1);
2808 err = -EFSCORRUPTED;
2809 goto out;
2810 } else if (a != ex_ee_block) {
2811
2812 num = a - ex_ee_block;
2813 } else {
2814
2815 num = 0;
2816 }
2817
2818
2819
2820
2821
2822
2823 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2824 if (ex == EXT_FIRST_EXTENT(eh)) {
2825 correct_index = 1;
2826 credits += (ext_depth(inode)) + 1;
2827 }
2828 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2829
2830 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2831 if (err)
2832 goto out;
2833
2834 err = ext4_ext_get_access(handle, inode, path + depth);
2835 if (err)
2836 goto out;
2837
2838 err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2839 if (err)
2840 goto out;
2841
2842 if (num == 0)
2843
2844 ext4_ext_store_pblock(ex, 0);
2845
2846 ex->ee_len = cpu_to_le16(num);
2847
2848
2849
2850
2851 if (unwritten && num)
2852 ext4_ext_mark_unwritten(ex);
2853
2854
2855
2856
2857 if (num == 0) {
2858 if (end != EXT_MAX_BLOCKS - 1) {
2859
2860
2861
2862
2863
2864 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2865 sizeof(struct ext4_extent));
2866
2867
2868 memset(EXT_LAST_EXTENT(eh), 0,
2869 sizeof(struct ext4_extent));
2870 }
2871 le16_add_cpu(&eh->eh_entries, -1);
2872 }
2873
2874 err = ext4_ext_dirty(handle, inode, path + depth);
2875 if (err)
2876 goto out;
2877
2878 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2879 ext4_ext_pblock(ex));
2880 ex--;
2881 ex_ee_block = le32_to_cpu(ex->ee_block);
2882 ex_ee_len = ext4_ext_get_actual_len(ex);
2883 }
2884
2885 if (correct_index && eh->eh_entries)
2886 err = ext4_ext_correct_indexes(handle, inode, path);
2887
2888
2889
2890
2891
2892
2893
2894
2895 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
2896 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2897 if (partial->pclu != EXT4_B2C(sbi, pblk)) {
2898 int flags = get_default_free_blocks_flags(inode);
2899
2900 if (ext4_is_pending(inode, partial->lblk))
2901 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2902 ext4_free_blocks(handle, inode, NULL,
2903 EXT4_C2B(sbi, partial->pclu),
2904 sbi->s_cluster_ratio, flags);
2905 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2906 ext4_rereserve_cluster(inode, partial->lblk);
2907 }
2908 partial->state = initial;
2909 }
2910
2911
2912
2913 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2914 err = ext4_ext_rm_idx(handle, inode, path, depth);
2915
2916 out:
2917 return err;
2918 }
2919
2920
2921
2922
2923
2924 static int
2925 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2926 {
2927 BUG_ON(path->p_idx == NULL);
2928
2929 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2930 return 0;
2931
2932
2933
2934
2935
2936 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2937 return 0;
2938 return 1;
2939 }
2940
2941 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2942 ext4_lblk_t end)
2943 {
2944 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2945 int depth = ext_depth(inode);
2946 struct ext4_ext_path *path = NULL;
2947 struct partial_cluster partial;
2948 handle_t *handle;
2949 int i = 0, err = 0;
2950
2951 partial.pclu = 0;
2952 partial.lblk = 0;
2953 partial.state = initial;
2954
2955 ext_debug("truncate since %u to %u\n", start, end);
2956
2957
2958 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2959 if (IS_ERR(handle))
2960 return PTR_ERR(handle);
2961
2962 again:
2963 trace_ext4_ext_remove_space(inode, start, end, depth);
2964
2965
2966
2967
2968
2969
2970
2971
2972 if (end < EXT_MAX_BLOCKS - 1) {
2973 struct ext4_extent *ex;
2974 ext4_lblk_t ee_block, ex_end, lblk;
2975 ext4_fsblk_t pblk;
2976
2977
2978 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
2979 if (IS_ERR(path)) {
2980 ext4_journal_stop(handle);
2981 return PTR_ERR(path);
2982 }
2983 depth = ext_depth(inode);
2984
2985 ex = path[depth].p_ext;
2986 if (!ex) {
2987 if (depth) {
2988 EXT4_ERROR_INODE(inode,
2989 "path[%d].p_hdr == NULL",
2990 depth);
2991 err = -EFSCORRUPTED;
2992 }
2993 goto out;
2994 }
2995
2996 ee_block = le32_to_cpu(ex->ee_block);
2997 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2998
2999
3000
3001
3002
3003
3004
3005 if (end >= ee_block && end < ex_end) {
3006
3007
3008
3009
3010
3011
3012 if (sbi->s_cluster_ratio > 1) {
3013 pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
3014 partial.pclu = EXT4_B2C(sbi, pblk);
3015 partial.state = nofree;
3016 }
3017
3018
3019
3020
3021
3022
3023
3024 err = ext4_force_split_extent_at(handle, inode, &path,
3025 end + 1, 1);
3026 if (err < 0)
3027 goto out;
3028
3029 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
3030 partial.state == initial) {
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041 lblk = ex_end + 1;
3042 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
3043 &ex);
3044 if (err)
3045 goto out;
3046 if (pblk) {
3047 partial.pclu = EXT4_B2C(sbi, pblk);
3048 partial.state = nofree;
3049 }
3050 }
3051 }
3052
3053
3054
3055
3056 depth = ext_depth(inode);
3057 if (path) {
3058 int k = i = depth;
3059 while (--k > 0)
3060 path[k].p_block =
3061 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
3062 } else {
3063 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
3064 GFP_NOFS);
3065 if (path == NULL) {
3066 ext4_journal_stop(handle);
3067 return -ENOMEM;
3068 }
3069 path[0].p_maxdepth = path[0].p_depth = depth;
3070 path[0].p_hdr = ext_inode_hdr(inode);
3071 i = 0;
3072
3073 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
3074 err = -EFSCORRUPTED;
3075 goto out;
3076 }
3077 }
3078 err = 0;
3079
3080 while (i >= 0 && err == 0) {
3081 if (i == depth) {
3082
3083 err = ext4_ext_rm_leaf(handle, inode, path,
3084 &partial, start, end);
3085
3086 brelse(path[i].p_bh);
3087 path[i].p_bh = NULL;
3088 i--;
3089 continue;
3090 }
3091
3092
3093 if (!path[i].p_hdr) {
3094 ext_debug("initialize header\n");
3095 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
3096 }
3097
3098 if (!path[i].p_idx) {
3099
3100 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
3101 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
3102 ext_debug("init index ptr: hdr 0x%p, num %d\n",
3103 path[i].p_hdr,
3104 le16_to_cpu(path[i].p_hdr->eh_entries));
3105 } else {
3106
3107 path[i].p_idx--;
3108 }
3109
3110 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
3111 i, EXT_FIRST_INDEX(path[i].p_hdr),
3112 path[i].p_idx);
3113 if (ext4_ext_more_to_rm(path + i)) {
3114 struct buffer_head *bh;
3115
3116 ext_debug("move to level %d (block %llu)\n",
3117 i + 1, ext4_idx_pblock(path[i].p_idx));
3118 memset(path + i + 1, 0, sizeof(*path));
3119 bh = read_extent_tree_block(inode,
3120 ext4_idx_pblock(path[i].p_idx), depth - i - 1,
3121 EXT4_EX_NOCACHE);
3122 if (IS_ERR(bh)) {
3123
3124 err = PTR_ERR(bh);
3125 break;
3126 }
3127
3128
3129 cond_resched();
3130 if (WARN_ON(i + 1 > depth)) {
3131 err = -EFSCORRUPTED;
3132 break;
3133 }
3134 path[i + 1].p_bh = bh;
3135
3136
3137
3138 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
3139 i++;
3140 } else {
3141
3142 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
3143
3144
3145
3146 err = ext4_ext_rm_idx(handle, inode, path, i);
3147 }
3148
3149 brelse(path[i].p_bh);
3150 path[i].p_bh = NULL;
3151 i--;
3152 ext_debug("return to level %d\n", i);
3153 }
3154 }
3155
3156 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
3157 path->p_hdr->eh_entries);
3158
3159
3160
3161
3162
3163 if (partial.state == tofree && err == 0) {
3164 int flags = get_default_free_blocks_flags(inode);
3165
3166 if (ext4_is_pending(inode, partial.lblk))
3167 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
3168 ext4_free_blocks(handle, inode, NULL,
3169 EXT4_C2B(sbi, partial.pclu),
3170 sbi->s_cluster_ratio, flags);
3171 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
3172 ext4_rereserve_cluster(inode, partial.lblk);
3173 partial.state = initial;
3174 }
3175
3176
3177 if (path->p_hdr->eh_entries == 0) {
3178
3179
3180
3181
3182 err = ext4_ext_get_access(handle, inode, path);
3183 if (err == 0) {
3184 ext_inode_hdr(inode)->eh_depth = 0;
3185 ext_inode_hdr(inode)->eh_max =
3186 cpu_to_le16(ext4_ext_space_root(inode, 0));
3187 err = ext4_ext_dirty(handle, inode, path);
3188 }
3189 }
3190 out:
3191 ext4_ext_drop_refs(path);
3192 kfree(path);
3193 path = NULL;
3194 if (err == -EAGAIN)
3195 goto again;
3196 ext4_journal_stop(handle);
3197
3198 return err;
3199 }
3200
3201
3202
3203
3204 void ext4_ext_init(struct super_block *sb)
3205 {
3206
3207
3208
3209
3210 if (ext4_has_feature_extents(sb)) {
3211 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3212 printk(KERN_INFO "EXT4-fs: file extents enabled"
3213 #ifdef AGGRESSIVE_TEST
3214 ", aggressive tests"
3215 #endif
3216 #ifdef CHECK_BINSEARCH
3217 ", check binsearch"
3218 #endif
3219 #ifdef EXTENTS_STATS
3220 ", stats"
3221 #endif
3222 "\n");
3223 #endif
3224 #ifdef EXTENTS_STATS
3225 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3226 EXT4_SB(sb)->s_ext_min = 1 << 30;
3227 EXT4_SB(sb)->s_ext_max = 0;
3228 #endif
3229 }
3230 }
3231
3232
3233
3234
3235 void ext4_ext_release(struct super_block *sb)
3236 {
3237 if (!ext4_has_feature_extents(sb))
3238 return;
3239
3240 #ifdef EXTENTS_STATS
3241 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3242 struct ext4_sb_info *sbi = EXT4_SB(sb);
3243 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3244 sbi->s_ext_blocks, sbi->s_ext_extents,
3245 sbi->s_ext_blocks / sbi->s_ext_extents);
3246 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3247 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3248 }
3249 #endif
3250 }
3251
3252 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3253 {
3254 ext4_lblk_t ee_block;
3255 ext4_fsblk_t ee_pblock;
3256 unsigned int ee_len;
3257
3258 ee_block = le32_to_cpu(ex->ee_block);
3259 ee_len = ext4_ext_get_actual_len(ex);
3260 ee_pblock = ext4_ext_pblock(ex);
3261
3262 if (ee_len == 0)
3263 return 0;
3264
3265 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3266 EXTENT_STATUS_WRITTEN);
3267 }
3268
3269
3270 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3271 {
3272 ext4_fsblk_t ee_pblock;
3273 unsigned int ee_len;
3274
3275 ee_len = ext4_ext_get_actual_len(ex);
3276 ee_pblock = ext4_ext_pblock(ex);
3277 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3278 ee_len);
3279 }
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302 static int ext4_split_extent_at(handle_t *handle,
3303 struct inode *inode,
3304 struct ext4_ext_path **ppath,
3305 ext4_lblk_t split,
3306 int split_flag,
3307 int flags)
3308 {
3309 struct ext4_ext_path *path = *ppath;
3310 ext4_fsblk_t newblock;
3311 ext4_lblk_t ee_block;
3312 struct ext4_extent *ex, newex, orig_ex, zero_ex;
3313 struct ext4_extent *ex2 = NULL;
3314 unsigned int ee_len, depth;
3315 int err = 0;
3316
3317 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3318 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3319
3320 ext_debug("ext4_split_extents_at: inode %lu, logical"
3321 "block %llu\n", inode->i_ino, (unsigned long long)split);
3322
3323 ext4_ext_show_leaf(inode, path);
3324
3325 depth = ext_depth(inode);
3326 ex = path[depth].p_ext;
3327 ee_block = le32_to_cpu(ex->ee_block);
3328 ee_len = ext4_ext_get_actual_len(ex);
3329 newblock = split - ee_block + ext4_ext_pblock(ex);
3330
3331 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3332 BUG_ON(!ext4_ext_is_unwritten(ex) &&
3333 split_flag & (EXT4_EXT_MAY_ZEROOUT |
3334 EXT4_EXT_MARK_UNWRIT1 |
3335 EXT4_EXT_MARK_UNWRIT2));
3336
3337 err = ext4_ext_get_access(handle, inode, path + depth);
3338 if (err)
3339 goto out;
3340
3341 if (split == ee_block) {
3342
3343
3344
3345
3346
3347 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3348 ext4_ext_mark_unwritten(ex);
3349 else
3350 ext4_ext_mark_initialized(ex);
3351
3352 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3353 ext4_ext_try_to_merge(handle, inode, path, ex);
3354
3355 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3356 goto out;
3357 }
3358
3359
3360 memcpy(&orig_ex, ex, sizeof(orig_ex));
3361 ex->ee_len = cpu_to_le16(split - ee_block);
3362 if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3363 ext4_ext_mark_unwritten(ex);
3364
3365
3366
3367
3368
3369 err = ext4_ext_dirty(handle, inode, path + depth);
3370 if (err)
3371 goto fix_extent_len;
3372
3373 ex2 = &newex;
3374 ex2->ee_block = cpu_to_le32(split);
3375 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
3376 ext4_ext_store_pblock(ex2, newblock);
3377 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3378 ext4_ext_mark_unwritten(ex2);
3379
3380 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3381 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3382 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3383 if (split_flag & EXT4_EXT_DATA_VALID1) {
3384 err = ext4_ext_zeroout(inode, ex2);
3385 zero_ex.ee_block = ex2->ee_block;
3386 zero_ex.ee_len = cpu_to_le16(
3387 ext4_ext_get_actual_len(ex2));
3388 ext4_ext_store_pblock(&zero_ex,
3389 ext4_ext_pblock(ex2));
3390 } else {
3391 err = ext4_ext_zeroout(inode, ex);
3392 zero_ex.ee_block = ex->ee_block;
3393 zero_ex.ee_len = cpu_to_le16(
3394 ext4_ext_get_actual_len(ex));
3395 ext4_ext_store_pblock(&zero_ex,
3396 ext4_ext_pblock(ex));
3397 }
3398 } else {
3399 err = ext4_ext_zeroout(inode, &orig_ex);
3400 zero_ex.ee_block = orig_ex.ee_block;
3401 zero_ex.ee_len = cpu_to_le16(
3402 ext4_ext_get_actual_len(&orig_ex));
3403 ext4_ext_store_pblock(&zero_ex,
3404 ext4_ext_pblock(&orig_ex));
3405 }
3406
3407 if (err)
3408 goto fix_extent_len;
3409
3410 ex->ee_len = cpu_to_le16(ee_len);
3411 ext4_ext_try_to_merge(handle, inode, path, ex);
3412 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3413 if (err)
3414 goto fix_extent_len;
3415
3416
3417 err = ext4_zeroout_es(inode, &zero_ex);
3418
3419 goto out;
3420 } else if (err)
3421 goto fix_extent_len;
3422
3423 out:
3424 ext4_ext_show_leaf(inode, path);
3425 return err;
3426
3427 fix_extent_len:
3428 ex->ee_len = orig_ex.ee_len;
3429 ext4_ext_dirty(handle, inode, path + path->p_depth);
3430 return err;
3431 }
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444 static int ext4_split_extent(handle_t *handle,
3445 struct inode *inode,
3446 struct ext4_ext_path **ppath,
3447 struct ext4_map_blocks *map,
3448 int split_flag,
3449 int flags)
3450 {
3451 struct ext4_ext_path *path = *ppath;
3452 ext4_lblk_t ee_block;
3453 struct ext4_extent *ex;
3454 unsigned int ee_len, depth;
3455 int err = 0;
3456 int unwritten;
3457 int split_flag1, flags1;
3458 int allocated = map->m_len;
3459
3460 depth = ext_depth(inode);
3461 ex = path[depth].p_ext;
3462 ee_block = le32_to_cpu(ex->ee_block);
3463 ee_len = ext4_ext_get_actual_len(ex);
3464 unwritten = ext4_ext_is_unwritten(ex);
3465
3466 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3467 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3468 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3469 if (unwritten)
3470 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3471 EXT4_EXT_MARK_UNWRIT2;
3472 if (split_flag & EXT4_EXT_DATA_VALID2)
3473 split_flag1 |= EXT4_EXT_DATA_VALID1;
3474 err = ext4_split_extent_at(handle, inode, ppath,
3475 map->m_lblk + map->m_len, split_flag1, flags1);
3476 if (err)
3477 goto out;
3478 } else {
3479 allocated = ee_len - (map->m_lblk - ee_block);
3480 }
3481
3482
3483
3484
3485 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3486 if (IS_ERR(path))
3487 return PTR_ERR(path);
3488 depth = ext_depth(inode);
3489 ex = path[depth].p_ext;
3490 if (!ex) {
3491 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3492 (unsigned long) map->m_lblk);
3493 return -EFSCORRUPTED;
3494 }
3495 unwritten = ext4_ext_is_unwritten(ex);
3496 split_flag1 = 0;
3497
3498 if (map->m_lblk >= ee_block) {
3499 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3500 if (unwritten) {
3501 split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3502 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3503 EXT4_EXT_MARK_UNWRIT2);
3504 }
3505 err = ext4_split_extent_at(handle, inode, ppath,
3506 map->m_lblk, split_flag1, flags);
3507 if (err)
3508 goto out;
3509 }
3510
3511 ext4_ext_show_leaf(inode, path);
3512 out:
3513 return err ? err : allocated;
3514 }
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536 static int ext4_ext_convert_to_initialized(handle_t *handle,
3537 struct inode *inode,
3538 struct ext4_map_blocks *map,
3539 struct ext4_ext_path **ppath,
3540 int flags)
3541 {
3542 struct ext4_ext_path *path = *ppath;
3543 struct ext4_sb_info *sbi;
3544 struct ext4_extent_header *eh;
3545 struct ext4_map_blocks split_map;
3546 struct ext4_extent zero_ex1, zero_ex2;
3547 struct ext4_extent *ex, *abut_ex;
3548 ext4_lblk_t ee_block, eof_block;
3549 unsigned int ee_len, depth, map_len = map->m_len;
3550 int allocated = 0, max_zeroout = 0;
3551 int err = 0;
3552 int split_flag = EXT4_EXT_DATA_VALID2;
3553
3554 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3555 "block %llu, max_blocks %u\n", inode->i_ino,
3556 (unsigned long long)map->m_lblk, map_len);
3557
3558 sbi = EXT4_SB(inode->i_sb);
3559 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3560 >> inode->i_sb->s_blocksize_bits;
3561 if (eof_block < map->m_lblk + map_len)
3562 eof_block = map->m_lblk + map_len;
3563
3564 depth = ext_depth(inode);
3565 eh = path[depth].p_hdr;
3566 ex = path[depth].p_ext;
3567 ee_block = le32_to_cpu(ex->ee_block);
3568 ee_len = ext4_ext_get_actual_len(ex);
3569 zero_ex1.ee_len = 0;
3570 zero_ex2.ee_len = 0;
3571
3572 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3573
3574
3575 BUG_ON(!ext4_ext_is_unwritten(ex));
3576 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593 if ((map->m_lblk == ee_block) &&
3594
3595 (map_len < ee_len) &&
3596 (ex > EXT_FIRST_EXTENT(eh))) {
3597 ext4_lblk_t prev_lblk;
3598 ext4_fsblk_t prev_pblk, ee_pblk;
3599 unsigned int prev_len;
3600
3601 abut_ex = ex - 1;
3602 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3603 prev_len = ext4_ext_get_actual_len(abut_ex);
3604 prev_pblk = ext4_ext_pblock(abut_ex);
3605 ee_pblk = ext4_ext_pblock(ex);
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616 if ((!ext4_ext_is_unwritten(abut_ex)) &&
3617 ((prev_lblk + prev_len) == ee_block) &&
3618 ((prev_pblk + prev_len) == ee_pblk) &&
3619 (prev_len < (EXT_INIT_MAX_LEN - map_len))) {
3620 err = ext4_ext_get_access(handle, inode, path + depth);
3621 if (err)
3622 goto out;
3623
3624 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3625 map, ex, abut_ex);
3626
3627
3628 ex->ee_block = cpu_to_le32(ee_block + map_len);
3629 ext4_ext_store_pblock(ex, ee_pblk + map_len);
3630 ex->ee_len = cpu_to_le16(ee_len - map_len);
3631 ext4_ext_mark_unwritten(ex);
3632
3633
3634 abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3635
3636
3637 allocated = map_len;
3638 }
3639 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3640 (map_len < ee_len) &&
3641 ex < EXT_LAST_EXTENT(eh)) {
3642
3643 ext4_lblk_t next_lblk;
3644 ext4_fsblk_t next_pblk, ee_pblk;
3645 unsigned int next_len;
3646
3647 abut_ex = ex + 1;
3648 next_lblk = le32_to_cpu(abut_ex->ee_block);
3649 next_len = ext4_ext_get_actual_len(abut_ex);
3650 next_pblk = ext4_ext_pblock(abut_ex);
3651 ee_pblk = ext4_ext_pblock(ex);
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662 if ((!ext4_ext_is_unwritten(abut_ex)) &&
3663 ((map->m_lblk + map_len) == next_lblk) &&
3664 ((ee_pblk + ee_len) == next_pblk) &&
3665 (next_len < (EXT_INIT_MAX_LEN - map_len))) {
3666 err = ext4_ext_get_access(handle, inode, path + depth);
3667 if (err)
3668 goto out;
3669
3670 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3671 map, ex, abut_ex);
3672
3673
3674 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3675 ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3676 ex->ee_len = cpu_to_le16(ee_len - map_len);
3677 ext4_ext_mark_unwritten(ex);
3678
3679
3680 abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3681
3682
3683 allocated = map_len;
3684 }
3685 }
3686 if (allocated) {
3687
3688 ext4_ext_dirty(handle, inode, path + depth);
3689
3690
3691 path[depth].p_ext = abut_ex;
3692 goto out;
3693 } else
3694 allocated = ee_len - (map->m_lblk - ee_block);
3695
3696 WARN_ON(map->m_lblk < ee_block);
3697
3698
3699
3700
3701 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3702
3703 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3704 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3705 (inode->i_sb->s_blocksize_bits - 10);
3706
3707 if (IS_ENCRYPTED(inode))
3708 max_zeroout = 0;
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721 split_map.m_lblk = map->m_lblk;
3722 split_map.m_len = map->m_len;
3723
3724 if (max_zeroout && (allocated > split_map.m_len)) {
3725 if (allocated <= max_zeroout) {
3726
3727 zero_ex1.ee_block =
3728 cpu_to_le32(split_map.m_lblk +
3729 split_map.m_len);
3730 zero_ex1.ee_len =
3731 cpu_to_le16(allocated - split_map.m_len);
3732 ext4_ext_store_pblock(&zero_ex1,
3733 ext4_ext_pblock(ex) + split_map.m_lblk +
3734 split_map.m_len - ee_block);
3735 err = ext4_ext_zeroout(inode, &zero_ex1);
3736 if (err)
3737 goto out;
3738 split_map.m_len = allocated;
3739 }
3740 if (split_map.m_lblk - ee_block + split_map.m_len <
3741 max_zeroout) {
3742
3743 if (split_map.m_lblk != ee_block) {
3744 zero_ex2.ee_block = ex->ee_block;
3745 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3746 ee_block);
3747 ext4_ext_store_pblock(&zero_ex2,
3748 ext4_ext_pblock(ex));
3749 err = ext4_ext_zeroout(inode, &zero_ex2);
3750 if (err)
3751 goto out;
3752 }
3753
3754 split_map.m_len += split_map.m_lblk - ee_block;
3755 split_map.m_lblk = ee_block;
3756 allocated = map->m_len;
3757 }
3758 }
3759
3760 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3761 flags);
3762 if (err > 0)
3763 err = 0;
3764 out:
3765
3766 if (!err) {
3767 err = ext4_zeroout_es(inode, &zero_ex1);
3768 if (!err)
3769 err = ext4_zeroout_es(inode, &zero_ex2);
3770 }
3771 return err ? err : allocated;
3772 }
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798 static int ext4_split_convert_extents(handle_t *handle,
3799 struct inode *inode,
3800 struct ext4_map_blocks *map,
3801 struct ext4_ext_path **ppath,
3802 int flags)
3803 {
3804 struct ext4_ext_path *path = *ppath;
3805 ext4_lblk_t eof_block;
3806 ext4_lblk_t ee_block;
3807 struct ext4_extent *ex;
3808 unsigned int ee_len;
3809 int split_flag = 0, depth;
3810
3811 ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
3812 __func__, inode->i_ino,
3813 (unsigned long long)map->m_lblk, map->m_len);
3814
3815 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3816 >> inode->i_sb->s_blocksize_bits;
3817 if (eof_block < map->m_lblk + map->m_len)
3818 eof_block = map->m_lblk + map->m_len;
3819
3820
3821
3822
3823 depth = ext_depth(inode);
3824 ex = path[depth].p_ext;
3825 ee_block = le32_to_cpu(ex->ee_block);
3826 ee_len = ext4_ext_get_actual_len(ex);
3827
3828
3829 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3830 split_flag |= EXT4_EXT_DATA_VALID1;
3831
3832 } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3833 split_flag |= ee_block + ee_len <= eof_block ?
3834 EXT4_EXT_MAY_ZEROOUT : 0;
3835 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3836 }
3837 flags |= EXT4_GET_BLOCKS_PRE_IO;
3838 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3839 }
3840
3841 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3842 struct inode *inode,
3843 struct ext4_map_blocks *map,
3844 struct ext4_ext_path **ppath)
3845 {
3846 struct ext4_ext_path *path = *ppath;
3847 struct ext4_extent *ex;
3848 ext4_lblk_t ee_block;
3849 unsigned int ee_len;
3850 int depth;
3851 int err = 0;
3852
3853 depth = ext_depth(inode);
3854 ex = path[depth].p_ext;
3855 ee_block = le32_to_cpu(ex->ee_block);
3856 ee_len = ext4_ext_get_actual_len(ex);
3857
3858 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3859 "block %llu, max_blocks %u\n", inode->i_ino,
3860 (unsigned long long)ee_block, ee_len);
3861
3862
3863
3864
3865
3866
3867
3868 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3869 #ifdef CONFIG_EXT4_DEBUG
3870 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
3871 " len %u; IO logical block %llu, len %u",
3872 inode->i_ino, (unsigned long long)ee_block, ee_len,
3873 (unsigned long long)map->m_lblk, map->m_len);
3874 #endif
3875 err = ext4_split_convert_extents(handle, inode, map, ppath,
3876 EXT4_GET_BLOCKS_CONVERT);
3877 if (err < 0)
3878 return err;
3879 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3880 if (IS_ERR(path))
3881 return PTR_ERR(path);
3882 depth = ext_depth(inode);
3883 ex = path[depth].p_ext;
3884 }
3885
3886 err = ext4_ext_get_access(handle, inode, path + depth);
3887 if (err)
3888 goto out;
3889
3890 ext4_ext_mark_initialized(ex);
3891
3892
3893
3894
3895 ext4_ext_try_to_merge(handle, inode, path, ex);
3896
3897
3898 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3899 out:
3900 ext4_ext_show_leaf(inode, path);
3901 return err;
3902 }
3903
3904
3905
3906
3907 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3908 ext4_lblk_t lblk,
3909 struct ext4_ext_path *path,
3910 unsigned int len)
3911 {
3912 int i, depth;
3913 struct ext4_extent_header *eh;
3914 struct ext4_extent *last_ex;
3915
3916 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3917 return 0;
3918
3919 depth = ext_depth(inode);
3920 eh = path[depth].p_hdr;
3921
3922
3923
3924
3925
3926
3927 if (unlikely(!eh->eh_entries))
3928 goto out;
3929 last_ex = EXT_LAST_EXTENT(eh);
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939 if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3940 ext4_ext_get_actual_len(last_ex))
3941 return 0;
3942
3943
3944
3945
3946
3947
3948
3949 for (i = depth-1; i >= 0; i--)
3950 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3951 return 0;
3952 out:
3953 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3954 return ext4_mark_inode_dirty(handle, inode);
3955 }
3956
3957 static int
3958 convert_initialized_extent(handle_t *handle, struct inode *inode,
3959 struct ext4_map_blocks *map,
3960 struct ext4_ext_path **ppath,
3961 unsigned int allocated)
3962 {
3963 struct ext4_ext_path *path = *ppath;
3964 struct ext4_extent *ex;
3965 ext4_lblk_t ee_block;
3966 unsigned int ee_len;
3967 int depth;
3968 int err = 0;
3969
3970
3971
3972
3973
3974 if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3975 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3976
3977 depth = ext_depth(inode);
3978 ex = path[depth].p_ext;
3979 ee_block = le32_to_cpu(ex->ee_block);
3980 ee_len = ext4_ext_get_actual_len(ex);
3981
3982 ext_debug("%s: inode %lu, logical"
3983 "block %llu, max_blocks %u\n", __func__, inode->i_ino,
3984 (unsigned long long)ee_block, ee_len);
3985
3986 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3987 err = ext4_split_convert_extents(handle, inode, map, ppath,
3988 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3989 if (err < 0)
3990 return err;
3991 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3992 if (IS_ERR(path))
3993 return PTR_ERR(path);
3994 depth = ext_depth(inode);
3995 ex = path[depth].p_ext;
3996 if (!ex) {
3997 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3998 (unsigned long) map->m_lblk);
3999 return -EFSCORRUPTED;
4000 }
4001 }
4002
4003 err = ext4_ext_get_access(handle, inode, path + depth);
4004 if (err)
4005 return err;
4006
4007 ext4_ext_mark_unwritten(ex);
4008
4009
4010
4011
4012 ext4_ext_try_to_merge(handle, inode, path, ex);
4013
4014
4015 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
4016 if (err)
4017 return err;
4018 ext4_ext_show_leaf(inode, path);
4019
4020 ext4_update_inode_fsync_trans(handle, inode, 1);
4021 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
4022 if (err)
4023 return err;
4024 map->m_flags |= EXT4_MAP_UNWRITTEN;
4025 if (allocated > map->m_len)
4026 allocated = map->m_len;
4027 map->m_len = allocated;
4028 return allocated;
4029 }
4030
4031 static int
4032 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
4033 struct ext4_map_blocks *map,
4034 struct ext4_ext_path **ppath, int flags,
4035 unsigned int allocated, ext4_fsblk_t newblock)
4036 {
4037 struct ext4_ext_path *path = *ppath;
4038 int ret = 0;
4039 int err = 0;
4040
4041 ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
4042 "block %llu, max_blocks %u, flags %x, allocated %u\n",
4043 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
4044 flags, allocated);
4045 ext4_ext_show_leaf(inode, path);
4046
4047
4048
4049
4050
4051 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
4052
4053 trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
4054 allocated, newblock);
4055
4056
4057 if (flags & EXT4_GET_BLOCKS_PRE_IO) {
4058 ret = ext4_split_convert_extents(handle, inode, map, ppath,
4059 flags | EXT4_GET_BLOCKS_CONVERT);
4060 if (ret <= 0)
4061 goto out;
4062 map->m_flags |= EXT4_MAP_UNWRITTEN;
4063 goto out;
4064 }
4065
4066 if (flags & EXT4_GET_BLOCKS_CONVERT) {
4067 if (flags & EXT4_GET_BLOCKS_ZERO) {
4068 if (allocated > map->m_len)
4069 allocated = map->m_len;
4070 err = ext4_issue_zeroout(inode, map->m_lblk, newblock,
4071 allocated);
4072 if (err < 0)
4073 goto out2;
4074 }
4075 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
4076 ppath);
4077 if (ret >= 0) {
4078 ext4_update_inode_fsync_trans(handle, inode, 1);
4079 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4080 path, map->m_len);
4081 } else
4082 err = ret;
4083 map->m_flags |= EXT4_MAP_MAPPED;
4084 map->m_pblk = newblock;
4085 if (allocated > map->m_len)
4086 allocated = map->m_len;
4087 map->m_len = allocated;
4088 goto out2;
4089 }
4090
4091
4092
4093
4094
4095 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4096 map->m_flags |= EXT4_MAP_UNWRITTEN;
4097 goto map_out;
4098 }
4099
4100
4101 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4102
4103
4104
4105
4106
4107
4108
4109 map->m_flags |= EXT4_MAP_UNWRITTEN;
4110 goto out1;
4111 }
4112
4113
4114 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
4115 if (ret >= 0)
4116 ext4_update_inode_fsync_trans(handle, inode, 1);
4117 out:
4118 if (ret <= 0) {
4119 err = ret;
4120 goto out2;
4121 } else
4122 allocated = ret;
4123 map->m_flags |= EXT4_MAP_NEW;
4124 if (allocated > map->m_len)
4125 allocated = map->m_len;
4126 map->m_len = allocated;
4127
4128 map_out:
4129 map->m_flags |= EXT4_MAP_MAPPED;
4130 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
4131 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
4132 map->m_len);
4133 if (err < 0)
4134 goto out2;
4135 }
4136 out1:
4137 if (allocated > map->m_len)
4138 allocated = map->m_len;
4139 ext4_ext_show_leaf(inode, path);
4140 map->m_pblk = newblock;
4141 map->m_len = allocated;
4142 out2:
4143 return err ? err : allocated;
4144 }
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187 static int get_implied_cluster_alloc(struct super_block *sb,
4188 struct ext4_map_blocks *map,
4189 struct ext4_extent *ex,
4190 struct ext4_ext_path *path)
4191 {
4192 struct ext4_sb_info *sbi = EXT4_SB(sb);
4193 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4194 ext4_lblk_t ex_cluster_start, ex_cluster_end;
4195 ext4_lblk_t rr_cluster_start;
4196 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4197 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4198 unsigned short ee_len = ext4_ext_get_actual_len(ex);
4199
4200
4201 ex_cluster_start = EXT4_B2C(sbi, ee_block);
4202 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4203
4204
4205 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4206
4207 if ((rr_cluster_start == ex_cluster_end) ||
4208 (rr_cluster_start == ex_cluster_start)) {
4209 if (rr_cluster_start == ex_cluster_end)
4210 ee_start += ee_len - 1;
4211 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4212 map->m_len = min(map->m_len,
4213 (unsigned) sbi->s_cluster_ratio - c_offset);
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223 if (map->m_lblk < ee_block)
4224 map->m_len = min(map->m_len, ee_block - map->m_lblk);
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235 if (map->m_lblk > ee_block) {
4236 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4237 map->m_len = min(map->m_len, next - map->m_lblk);
4238 }
4239
4240 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4241 return 1;
4242 }
4243
4244 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4245 return 0;
4246 }
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4268 struct ext4_map_blocks *map, int flags)
4269 {
4270 struct ext4_ext_path *path = NULL;
4271 struct ext4_extent newex, *ex, *ex2;
4272 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4273 ext4_fsblk_t newblock = 0;
4274 int free_on_err = 0, err = 0, depth, ret;
4275 unsigned int allocated = 0, offset = 0;
4276 unsigned int allocated_clusters = 0;
4277 struct ext4_allocation_request ar;
4278 ext4_lblk_t cluster_offset;
4279 bool map_from_cluster = false;
4280
4281 ext_debug("blocks %u/%u requested for inode %lu\n",
4282 map->m_lblk, map->m_len, inode->i_ino);
4283 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4284
4285
4286 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4287 if (IS_ERR(path)) {
4288 err = PTR_ERR(path);
4289 path = NULL;
4290 goto out2;
4291 }
4292
4293 depth = ext_depth(inode);
4294
4295
4296
4297
4298
4299
4300 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4301 EXT4_ERROR_INODE(inode, "bad extent address "
4302 "lblock: %lu, depth: %d pblock %lld",
4303 (unsigned long) map->m_lblk, depth,
4304 path[depth].p_block);
4305 err = -EFSCORRUPTED;
4306 goto out2;
4307 }
4308
4309 ex = path[depth].p_ext;
4310 if (ex) {
4311 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4312 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4313 unsigned short ee_len;
4314
4315
4316
4317
4318
4319
4320 ee_len = ext4_ext_get_actual_len(ex);
4321
4322 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4323
4324
4325 if (in_range(map->m_lblk, ee_block, ee_len)) {
4326 newblock = map->m_lblk - ee_block + ee_start;
4327
4328 allocated = ee_len - (map->m_lblk - ee_block);
4329 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4330 ee_block, ee_len, newblock);
4331
4332
4333
4334
4335
4336 if ((!ext4_ext_is_unwritten(ex)) &&
4337 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4338 allocated = convert_initialized_extent(
4339 handle, inode, map, &path,
4340 allocated);
4341 goto out2;
4342 } else if (!ext4_ext_is_unwritten(ex))
4343 goto out;
4344
4345 ret = ext4_ext_handle_unwritten_extents(
4346 handle, inode, map, &path, flags,
4347 allocated, newblock);
4348 if (ret < 0)
4349 err = ret;
4350 else
4351 allocated = ret;
4352 goto out2;
4353 }
4354 }
4355
4356
4357
4358
4359
4360 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4361 ext4_lblk_t hole_start, hole_len;
4362
4363 hole_start = map->m_lblk;
4364 hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
4365
4366
4367
4368
4369 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
4370
4371
4372 if (hole_start != map->m_lblk)
4373 hole_len -= map->m_lblk - hole_start;
4374 map->m_pblk = 0;
4375 map->m_len = min_t(unsigned int, map->m_len, hole_len);
4376
4377 goto out2;
4378 }
4379
4380
4381
4382
4383 newex.ee_block = cpu_to_le32(map->m_lblk);
4384 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4385
4386
4387
4388
4389
4390 if (cluster_offset && ex &&
4391 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4392 ar.len = allocated = map->m_len;
4393 newblock = map->m_pblk;
4394 map_from_cluster = true;
4395 goto got_allocated_blocks;
4396 }
4397
4398
4399 ar.lleft = map->m_lblk;
4400 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4401 if (err)
4402 goto out2;
4403 ar.lright = map->m_lblk;
4404 ex2 = NULL;
4405 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4406 if (err)
4407 goto out2;
4408
4409
4410
4411 if ((sbi->s_cluster_ratio > 1) && ex2 &&
4412 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4413 ar.len = allocated = map->m_len;
4414 newblock = map->m_pblk;
4415 map_from_cluster = true;
4416 goto got_allocated_blocks;
4417 }
4418
4419
4420
4421
4422
4423
4424
4425 if (map->m_len > EXT_INIT_MAX_LEN &&
4426 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4427 map->m_len = EXT_INIT_MAX_LEN;
4428 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4429 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4430 map->m_len = EXT_UNWRITTEN_MAX_LEN;
4431
4432
4433 newex.ee_len = cpu_to_le16(map->m_len);
4434 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4435 if (err)
4436 allocated = ext4_ext_get_actual_len(&newex);
4437 else
4438 allocated = map->m_len;
4439
4440
4441 ar.inode = inode;
4442 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4443 ar.logical = map->m_lblk;
4444
4445
4446
4447
4448
4449
4450
4451
4452 offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4453 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4454 ar.goal -= offset;
4455 ar.logical -= offset;
4456 if (S_ISREG(inode->i_mode))
4457 ar.flags = EXT4_MB_HINT_DATA;
4458 else
4459
4460 ar.flags = 0;
4461 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4462 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4463 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4464 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4465 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4466 ar.flags |= EXT4_MB_USE_RESERVED;
4467 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4468 if (!newblock)
4469 goto out2;
4470 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4471 ar.goal, newblock, allocated);
4472 free_on_err = 1;
4473 allocated_clusters = ar.len;
4474 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4475 if (ar.len > allocated)
4476 ar.len = allocated;
4477
4478 got_allocated_blocks:
4479
4480 ext4_ext_store_pblock(&newex, newblock + offset);
4481 newex.ee_len = cpu_to_le16(ar.len);
4482
4483 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
4484 ext4_ext_mark_unwritten(&newex);
4485 map->m_flags |= EXT4_MAP_UNWRITTEN;
4486 }
4487
4488 err = 0;
4489 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4490 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4491 path, ar.len);
4492 if (!err)
4493 err = ext4_ext_insert_extent(handle, inode, &path,
4494 &newex, flags);
4495
4496 if (err && free_on_err) {
4497 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4498 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4499
4500
4501
4502 ext4_discard_preallocations(inode);
4503 ext4_free_blocks(handle, inode, NULL, newblock,
4504 EXT4_C2B(sbi, allocated_clusters), fb_flags);
4505 goto out2;
4506 }
4507
4508
4509 newblock = ext4_ext_pblock(&newex);
4510 allocated = ext4_ext_get_actual_len(&newex);
4511 if (allocated > map->m_len)
4512 allocated = map->m_len;
4513 map->m_flags |= EXT4_MAP_NEW;
4514
4515
4516
4517
4518
4519
4520
4521 if (test_opt(inode->i_sb, DELALLOC) && !map_from_cluster) {
4522 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4523
4524
4525
4526
4527 ext4_da_update_reserve_space(inode, allocated_clusters,
4528 1);
4529 } else {
4530 ext4_lblk_t lblk, len;
4531 unsigned int n;
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544 lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4545 len = allocated_clusters << sbi->s_cluster_bits;
4546 n = ext4_es_delayed_clu(inode, lblk, len);
4547 if (n > 0)
4548 ext4_da_update_reserve_space(inode, (int) n, 0);
4549 }
4550 }
4551
4552
4553
4554
4555
4556 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4557 ext4_update_inode_fsync_trans(handle, inode, 1);
4558 else
4559 ext4_update_inode_fsync_trans(handle, inode, 0);
4560 out:
4561 if (allocated > map->m_len)
4562 allocated = map->m_len;
4563 ext4_ext_show_leaf(inode, path);
4564 map->m_flags |= EXT4_MAP_MAPPED;
4565 map->m_pblk = newblock;
4566 map->m_len = allocated;
4567 out2:
4568 ext4_ext_drop_refs(path);
4569 kfree(path);
4570
4571 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4572 err ? err : allocated);
4573 return err ? err : allocated;
4574 }
4575
4576 int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4577 {
4578 struct super_block *sb = inode->i_sb;
4579 ext4_lblk_t last_block;
4580 int err = 0;
4581
4582
4583
4584
4585
4586
4587
4588
4589 EXT4_I(inode)->i_disksize = inode->i_size;
4590 err = ext4_mark_inode_dirty(handle, inode);
4591 if (err)
4592 return err;
4593
4594 last_block = (inode->i_size + sb->s_blocksize - 1)
4595 >> EXT4_BLOCK_SIZE_BITS(sb);
4596 retry:
4597 err = ext4_es_remove_extent(inode, last_block,
4598 EXT_MAX_BLOCKS - last_block);
4599 if (err == -ENOMEM) {
4600 cond_resched();
4601 congestion_wait(BLK_RW_ASYNC, HZ/50);
4602 goto retry;
4603 }
4604 if (err)
4605 return err;
4606 return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4607 }
4608
4609 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4610 ext4_lblk_t len, loff_t new_size,
4611 int flags)
4612 {
4613 struct inode *inode = file_inode(file);
4614 handle_t *handle;
4615 int ret = 0;
4616 int ret2 = 0;
4617 int retries = 0;
4618 int depth = 0;
4619 struct ext4_map_blocks map;
4620 unsigned int credits;
4621 loff_t epos;
4622
4623 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4624 map.m_lblk = offset;
4625 map.m_len = len;
4626
4627
4628
4629
4630
4631 if (len <= EXT_UNWRITTEN_MAX_LEN)
4632 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4633
4634
4635
4636
4637 credits = ext4_chunk_trans_blocks(inode, len);
4638 depth = ext_depth(inode);
4639
4640 retry:
4641 while (ret >= 0 && len) {
4642
4643
4644
4645 if (depth != ext_depth(inode)) {
4646 credits = ext4_chunk_trans_blocks(inode, len);
4647 depth = ext_depth(inode);
4648 }
4649
4650 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4651 credits);
4652 if (IS_ERR(handle)) {
4653 ret = PTR_ERR(handle);
4654 break;
4655 }
4656 ret = ext4_map_blocks(handle, inode, &map, flags);
4657 if (ret <= 0) {
4658 ext4_debug("inode #%lu: block %u: len %u: "
4659 "ext4_ext_map_blocks returned %d",
4660 inode->i_ino, map.m_lblk,
4661 map.m_len, ret);
4662 ext4_mark_inode_dirty(handle, inode);
4663 ret2 = ext4_journal_stop(handle);
4664 break;
4665 }
4666 map.m_lblk += ret;
4667 map.m_len = len = len - ret;
4668 epos = (loff_t)map.m_lblk << inode->i_blkbits;
4669 inode->i_ctime = current_time(inode);
4670 if (new_size) {
4671 if (epos > new_size)
4672 epos = new_size;
4673 if (ext4_update_inode_size(inode, epos) & 0x1)
4674 inode->i_mtime = inode->i_ctime;
4675 } else {
4676 if (epos > inode->i_size)
4677 ext4_set_inode_flag(inode,
4678 EXT4_INODE_EOFBLOCKS);
4679 }
4680 ext4_mark_inode_dirty(handle, inode);
4681 ext4_update_inode_fsync_trans(handle, inode, 1);
4682 ret2 = ext4_journal_stop(handle);
4683 if (ret2)
4684 break;
4685 }
4686 if (ret == -ENOSPC &&
4687 ext4_should_retry_alloc(inode->i_sb, &retries)) {
4688 ret = 0;
4689 goto retry;
4690 }
4691
4692 return ret > 0 ? ret2 : ret;
4693 }
4694
4695 static long ext4_zero_range(struct file *file, loff_t offset,
4696 loff_t len, int mode)
4697 {
4698 struct inode *inode = file_inode(file);
4699 handle_t *handle = NULL;
4700 unsigned int max_blocks;
4701 loff_t new_size = 0;
4702 int ret = 0;
4703 int flags;
4704 int credits;
4705 int partial_begin, partial_end;
4706 loff_t start, end;
4707 ext4_lblk_t lblk;
4708 unsigned int blkbits = inode->i_blkbits;
4709
4710 trace_ext4_zero_range(inode, offset, len, mode);
4711
4712 if (!S_ISREG(inode->i_mode))
4713 return -EINVAL;
4714
4715
4716 if (ext4_should_journal_data(inode)) {
4717 ret = ext4_force_commit(inode->i_sb);
4718 if (ret)
4719 return ret;
4720 }
4721
4722
4723
4724
4725
4726
4727
4728 start = round_up(offset, 1 << blkbits);
4729 end = round_down((offset + len), 1 << blkbits);
4730
4731 if (start < offset || end > offset + len)
4732 return -EINVAL;
4733 partial_begin = offset & ((1 << blkbits) - 1);
4734 partial_end = (offset + len) & ((1 << blkbits) - 1);
4735
4736 lblk = start >> blkbits;
4737 max_blocks = (end >> blkbits);
4738 if (max_blocks < lblk)
4739 max_blocks = 0;
4740 else
4741 max_blocks -= lblk;
4742
4743 inode_lock(inode);
4744
4745
4746
4747
4748 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4749 ret = -EOPNOTSUPP;
4750 goto out_mutex;
4751 }
4752
4753 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4754 (offset + len > i_size_read(inode) ||
4755 offset + len > EXT4_I(inode)->i_disksize)) {
4756 new_size = offset + len;
4757 ret = inode_newsize_ok(inode, new_size);
4758 if (ret)
4759 goto out_mutex;
4760 }
4761
4762 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4763 if (mode & FALLOC_FL_KEEP_SIZE)
4764 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4765
4766
4767 inode_dio_wait(inode);
4768
4769
4770 if (partial_begin || partial_end) {
4771 ret = ext4_alloc_file_blocks(file,
4772 round_down(offset, 1 << blkbits) >> blkbits,
4773 (round_up((offset + len), 1 << blkbits) -
4774 round_down(offset, 1 << blkbits)) >> blkbits,
4775 new_size, flags);
4776 if (ret)
4777 goto out_mutex;
4778
4779 }
4780
4781
4782 if (max_blocks > 0) {
4783 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4784 EXT4_EX_NOCACHE);
4785
4786
4787
4788
4789
4790 down_write(&EXT4_I(inode)->i_mmap_sem);
4791
4792 ret = ext4_break_layouts(inode);
4793 if (ret) {
4794 up_write(&EXT4_I(inode)->i_mmap_sem);
4795 goto out_mutex;
4796 }
4797
4798 ret = ext4_update_disksize_before_punch(inode, offset, len);
4799 if (ret) {
4800 up_write(&EXT4_I(inode)->i_mmap_sem);
4801 goto out_mutex;
4802 }
4803
4804 truncate_pagecache_range(inode, start, end - 1);
4805 inode->i_mtime = inode->i_ctime = current_time(inode);
4806
4807 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4808 flags);
4809 up_write(&EXT4_I(inode)->i_mmap_sem);
4810 if (ret)
4811 goto out_mutex;
4812 }
4813 if (!partial_begin && !partial_end)
4814 goto out_mutex;
4815
4816
4817
4818
4819
4820 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4821 if (ext4_should_journal_data(inode))
4822 credits += 2;
4823 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4824 if (IS_ERR(handle)) {
4825 ret = PTR_ERR(handle);
4826 ext4_std_error(inode->i_sb, ret);
4827 goto out_mutex;
4828 }
4829
4830 inode->i_mtime = inode->i_ctime = current_time(inode);
4831 if (new_size) {
4832 ext4_update_inode_size(inode, new_size);
4833 } else {
4834
4835
4836
4837
4838 if ((offset + len) > i_size_read(inode))
4839 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4840 }
4841 ext4_mark_inode_dirty(handle, inode);
4842
4843
4844 ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4845 if (ret >= 0)
4846 ext4_update_inode_fsync_trans(handle, inode, 1);
4847
4848 if (file->f_flags & O_SYNC)
4849 ext4_handle_sync(handle);
4850
4851 ext4_journal_stop(handle);
4852 out_mutex:
4853 inode_unlock(inode);
4854 return ret;
4855 }
4856
4857
4858
4859
4860
4861
4862
4863
4864 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4865 {
4866 struct inode *inode = file_inode(file);
4867 loff_t new_size = 0;
4868 unsigned int max_blocks;
4869 int ret = 0;
4870 int flags;
4871 ext4_lblk_t lblk;
4872 unsigned int blkbits = inode->i_blkbits;
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884 if (IS_ENCRYPTED(inode) &&
4885 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
4886 FALLOC_FL_ZERO_RANGE)))
4887 return -EOPNOTSUPP;
4888
4889
4890 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4891 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4892 FALLOC_FL_INSERT_RANGE))
4893 return -EOPNOTSUPP;
4894
4895 if (mode & FALLOC_FL_PUNCH_HOLE)
4896 return ext4_punch_hole(inode, offset, len);
4897
4898 ret = ext4_convert_inline_data(inode);
4899 if (ret)
4900 return ret;
4901
4902 if (mode & FALLOC_FL_COLLAPSE_RANGE)
4903 return ext4_collapse_range(inode, offset, len);
4904
4905 if (mode & FALLOC_FL_INSERT_RANGE)
4906 return ext4_insert_range(inode, offset, len);
4907
4908 if (mode & FALLOC_FL_ZERO_RANGE)
4909 return ext4_zero_range(file, offset, len, mode);
4910
4911 trace_ext4_fallocate_enter(inode, offset, len, mode);
4912 lblk = offset >> blkbits;
4913
4914 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4915 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4916 if (mode & FALLOC_FL_KEEP_SIZE)
4917 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4918
4919 inode_lock(inode);
4920
4921
4922
4923
4924 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4925 ret = -EOPNOTSUPP;
4926 goto out;
4927 }
4928
4929 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4930 (offset + len > i_size_read(inode) ||
4931 offset + len > EXT4_I(inode)->i_disksize)) {
4932 new_size = offset + len;
4933 ret = inode_newsize_ok(inode, new_size);
4934 if (ret)
4935 goto out;
4936 }
4937
4938
4939 inode_dio_wait(inode);
4940
4941 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
4942 if (ret)
4943 goto out;
4944
4945 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4946 ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
4947 EXT4_I(inode)->i_sync_tid);
4948 }
4949 out:
4950 inode_unlock(inode);
4951 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4952 return ret;
4953 }
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4966 loff_t offset, ssize_t len)
4967 {
4968 unsigned int max_blocks;
4969 int ret = 0;
4970 int ret2 = 0;
4971 struct ext4_map_blocks map;
4972 unsigned int credits, blkbits = inode->i_blkbits;
4973
4974 map.m_lblk = offset >> blkbits;
4975 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4976
4977
4978
4979
4980
4981
4982 if (handle) {
4983 handle = ext4_journal_start_reserved(handle,
4984 EXT4_HT_EXT_CONVERT);
4985 if (IS_ERR(handle))
4986 return PTR_ERR(handle);
4987 credits = 0;
4988 } else {
4989
4990
4991
4992 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4993 }
4994 while (ret >= 0 && ret < max_blocks) {
4995 map.m_lblk += ret;
4996 map.m_len = (max_blocks -= ret);
4997 if (credits) {
4998 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4999 credits);
5000 if (IS_ERR(handle)) {
5001 ret = PTR_ERR(handle);
5002 break;
5003 }
5004 }
5005 ret = ext4_map_blocks(handle, inode, &map,
5006 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
5007 if (ret <= 0)
5008 ext4_warning(inode->i_sb,
5009 "inode #%lu: block %u: len %u: "
5010 "ext4_ext_map_blocks returned %d",
5011 inode->i_ino, map.m_lblk,
5012 map.m_len, ret);
5013 ext4_mark_inode_dirty(handle, inode);
5014 if (credits)
5015 ret2 = ext4_journal_stop(handle);
5016 if (ret <= 0 || ret2)
5017 break;
5018 }
5019 if (!credits)
5020 ret2 = ext4_journal_stop(handle);
5021 return ret > 0 ? ret2 : ret;
5022 }
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033 static int ext4_find_delayed_extent(struct inode *inode,
5034 struct extent_status *newes)
5035 {
5036 struct extent_status es;
5037 ext4_lblk_t block, next_del;
5038
5039 if (newes->es_pblk == 0) {
5040 ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
5041 newes->es_lblk,
5042 newes->es_lblk + newes->es_len - 1,
5043 &es);
5044
5045
5046
5047
5048
5049 if (es.es_len == 0)
5050
5051 return 0;
5052
5053 if (es.es_lblk > newes->es_lblk) {
5054
5055 newes->es_len = min(es.es_lblk - newes->es_lblk,
5056 newes->es_len);
5057 return 0;
5058 }
5059
5060 newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
5061 }
5062
5063 block = newes->es_lblk + newes->es_len;
5064 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block,
5065 EXT_MAX_BLOCKS, &es);
5066 if (es.es_len == 0)
5067 next_del = EXT_MAX_BLOCKS;
5068 else
5069 next_del = es.es_lblk;
5070
5071 return next_del;
5072 }
5073
5074 static int ext4_xattr_fiemap(struct inode *inode,
5075 struct fiemap_extent_info *fieinfo)
5076 {
5077 __u64 physical = 0;
5078 __u64 length;
5079 __u32 flags = FIEMAP_EXTENT_LAST;
5080 int blockbits = inode->i_sb->s_blocksize_bits;
5081 int error = 0;
5082
5083
5084 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
5085 struct ext4_iloc iloc;
5086 int offset;
5087
5088 error = ext4_get_inode_loc(inode, &iloc);
5089 if (error)
5090 return error;
5091 physical = (__u64)iloc.bh->b_blocknr << blockbits;
5092 offset = EXT4_GOOD_OLD_INODE_SIZE +
5093 EXT4_I(inode)->i_extra_isize;
5094 physical += offset;
5095 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
5096 flags |= FIEMAP_EXTENT_DATA_INLINE;
5097 brelse(iloc.bh);
5098 } else {
5099 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
5100 length = inode->i_sb->s_blocksize;
5101 }
5102
5103 if (physical)
5104 error = fiemap_fill_next_extent(fieinfo, 0, physical,
5105 length, flags);
5106 return (error < 0 ? error : 0);
5107 }
5108
5109 static int _ext4_fiemap(struct inode *inode,
5110 struct fiemap_extent_info *fieinfo,
5111 __u64 start, __u64 len,
5112 int (*fill)(struct inode *, ext4_lblk_t,
5113 ext4_lblk_t,
5114 struct fiemap_extent_info *))
5115 {
5116 ext4_lblk_t start_blk;
5117 u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR;
5118
5119 int error = 0;
5120
5121 if (ext4_has_inline_data(inode)) {
5122 int has_inline = 1;
5123
5124 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
5125 start, len);
5126
5127 if (has_inline)
5128 return error;
5129 }
5130
5131 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
5132 error = ext4_ext_precache(inode);
5133 if (error)
5134 return error;
5135 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
5136 }
5137
5138
5139 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
5140 fill == ext4_fill_fiemap_extents)
5141 return generic_block_fiemap(inode, fieinfo, start, len,
5142 ext4_get_block);
5143
5144 if (fill == ext4_fill_es_cache_info)
5145 ext4_fiemap_flags &= FIEMAP_FLAG_XATTR;
5146 if (fiemap_check_flags(fieinfo, ext4_fiemap_flags))
5147 return -EBADR;
5148
5149 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
5150 error = ext4_xattr_fiemap(inode, fieinfo);
5151 } else {
5152 ext4_lblk_t len_blks;
5153 __u64 last_blk;
5154
5155 start_blk = start >> inode->i_sb->s_blocksize_bits;
5156 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5157 if (last_blk >= EXT_MAX_BLOCKS)
5158 last_blk = EXT_MAX_BLOCKS-1;
5159 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5160
5161
5162
5163
5164
5165 error = fill(inode, start_blk, len_blks, fieinfo);
5166 }
5167 return error;
5168 }
5169
5170 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5171 __u64 start, __u64 len)
5172 {
5173 return _ext4_fiemap(inode, fieinfo, start, len,
5174 ext4_fill_fiemap_extents);
5175 }
5176
5177 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
5178 __u64 start, __u64 len)
5179 {
5180 if (ext4_has_inline_data(inode)) {
5181 int has_inline;
5182
5183 down_read(&EXT4_I(inode)->xattr_sem);
5184 has_inline = ext4_has_inline_data(inode);
5185 up_read(&EXT4_I(inode)->xattr_sem);
5186 if (has_inline)
5187 return 0;
5188 }
5189
5190 return _ext4_fiemap(inode, fieinfo, start, len,
5191 ext4_fill_es_cache_info);
5192 }
5193
5194
5195
5196
5197
5198
5199
5200
5201 static int
5202 ext4_access_path(handle_t *handle, struct inode *inode,
5203 struct ext4_ext_path *path)
5204 {
5205 int credits, err;
5206
5207 if (!ext4_handle_valid(handle))
5208 return 0;
5209
5210
5211
5212
5213
5214
5215
5216 if (handle->h_buffer_credits < 7) {
5217 credits = ext4_writepage_trans_blocks(inode);
5218 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
5219
5220 if (err && err != -EAGAIN)
5221 return err;
5222 }
5223
5224 err = ext4_ext_get_access(handle, inode, path);
5225 return err;
5226 }
5227
5228
5229
5230
5231
5232
5233
5234 static int
5235 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5236 struct inode *inode, handle_t *handle,
5237 enum SHIFT_DIRECTION SHIFT)
5238 {
5239 int depth, err = 0;
5240 struct ext4_extent *ex_start, *ex_last;
5241 bool update = 0;
5242 depth = path->p_depth;
5243
5244 while (depth >= 0) {
5245 if (depth == path->p_depth) {
5246 ex_start = path[depth].p_ext;
5247 if (!ex_start)
5248 return -EFSCORRUPTED;
5249
5250 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5251
5252 err = ext4_access_path(handle, inode, path + depth);
5253 if (err)
5254 goto out;
5255
5256 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
5257 update = 1;
5258
5259 while (ex_start <= ex_last) {
5260 if (SHIFT == SHIFT_LEFT) {
5261 le32_add_cpu(&ex_start->ee_block,
5262 -shift);
5263
5264 if ((ex_start >
5265 EXT_FIRST_EXTENT(path[depth].p_hdr))
5266 &&
5267 ext4_ext_try_to_merge_right(inode,
5268 path, ex_start - 1))
5269 ex_last--;
5270 else
5271 ex_start++;
5272 } else {
5273 le32_add_cpu(&ex_last->ee_block, shift);
5274 ext4_ext_try_to_merge_right(inode, path,
5275 ex_last);
5276 ex_last--;
5277 }
5278 }
5279 err = ext4_ext_dirty(handle, inode, path + depth);
5280 if (err)
5281 goto out;
5282
5283 if (--depth < 0 || !update)
5284 break;
5285 }
5286
5287
5288 err = ext4_access_path(handle, inode, path + depth);
5289 if (err)
5290 goto out;
5291
5292 if (SHIFT == SHIFT_LEFT)
5293 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5294 else
5295 le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5296 err = ext4_ext_dirty(handle, inode, path + depth);
5297 if (err)
5298 goto out;
5299
5300
5301 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5302 break;
5303
5304 depth--;
5305 }
5306
5307 out:
5308 return err;
5309 }
5310
5311
5312
5313
5314
5315
5316
5317
5318 static int
5319 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5320 ext4_lblk_t start, ext4_lblk_t shift,
5321 enum SHIFT_DIRECTION SHIFT)
5322 {
5323 struct ext4_ext_path *path;
5324 int ret = 0, depth;
5325 struct ext4_extent *extent;
5326 ext4_lblk_t stop, *iterator, ex_start, ex_end;
5327
5328
5329 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5330 EXT4_EX_NOCACHE);
5331 if (IS_ERR(path))
5332 return PTR_ERR(path);
5333
5334 depth = path->p_depth;
5335 extent = path[depth].p_ext;
5336 if (!extent)
5337 goto out;
5338
5339 stop = le32_to_cpu(extent->ee_block);
5340
5341
5342
5343
5344
5345
5346 if (SHIFT == SHIFT_LEFT) {
5347 path = ext4_find_extent(inode, start - 1, &path,
5348 EXT4_EX_NOCACHE);
5349 if (IS_ERR(path))
5350 return PTR_ERR(path);
5351 depth = path->p_depth;
5352 extent = path[depth].p_ext;
5353 if (extent) {
5354 ex_start = le32_to_cpu(extent->ee_block);
5355 ex_end = le32_to_cpu(extent->ee_block) +
5356 ext4_ext_get_actual_len(extent);
5357 } else {
5358 ex_start = 0;
5359 ex_end = 0;
5360 }
5361
5362 if ((start == ex_start && shift > ex_start) ||
5363 (shift > start - ex_end)) {
5364 ret = -EINVAL;
5365 goto out;
5366 }
5367 } else {
5368 if (shift > EXT_MAX_BLOCKS -
5369 (stop + ext4_ext_get_actual_len(extent))) {
5370 ret = -EINVAL;
5371 goto out;
5372 }
5373 }
5374
5375
5376
5377
5378
5379
5380 if (SHIFT == SHIFT_LEFT)
5381 iterator = &start;
5382 else
5383 iterator = &stop;
5384
5385
5386
5387
5388
5389
5390 while (iterator && start <= stop) {
5391 path = ext4_find_extent(inode, *iterator, &path,
5392 EXT4_EX_NOCACHE);
5393 if (IS_ERR(path))
5394 return PTR_ERR(path);
5395 depth = path->p_depth;
5396 extent = path[depth].p_ext;
5397 if (!extent) {
5398 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5399 (unsigned long) *iterator);
5400 return -EFSCORRUPTED;
5401 }
5402 if (SHIFT == SHIFT_LEFT && *iterator >
5403 le32_to_cpu(extent->ee_block)) {
5404
5405 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5406 path[depth].p_ext++;
5407 } else {
5408 *iterator = ext4_ext_next_allocated_block(path);
5409 continue;
5410 }
5411 }
5412
5413 if (SHIFT == SHIFT_LEFT) {
5414 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5415 *iterator = le32_to_cpu(extent->ee_block) +
5416 ext4_ext_get_actual_len(extent);
5417 } else {
5418 extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5419 if (le32_to_cpu(extent->ee_block) > 0)
5420 *iterator = le32_to_cpu(extent->ee_block) - 1;
5421 else
5422
5423 iterator = NULL;
5424
5425 while (le32_to_cpu(extent->ee_block) < start)
5426 extent++;
5427 path[depth].p_ext = extent;
5428 }
5429 ret = ext4_ext_shift_path_extents(path, shift, inode,
5430 handle, SHIFT);
5431 if (ret)
5432 break;
5433 }
5434 out:
5435 ext4_ext_drop_refs(path);
5436 kfree(path);
5437 return ret;
5438 }
5439
5440
5441
5442
5443
5444
5445 int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5446 {
5447 struct super_block *sb = inode->i_sb;
5448 ext4_lblk_t punch_start, punch_stop;
5449 handle_t *handle;
5450 unsigned int credits;
5451 loff_t new_size, ioffset;
5452 int ret;
5453
5454
5455
5456
5457
5458
5459 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5460 return -EOPNOTSUPP;
5461
5462
5463 if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
5464 len & (EXT4_CLUSTER_SIZE(sb) - 1))
5465 return -EINVAL;
5466
5467 if (!S_ISREG(inode->i_mode))
5468 return -EINVAL;
5469
5470 trace_ext4_collapse_range(inode, offset, len);
5471
5472 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5473 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5474
5475
5476 if (ext4_should_journal_data(inode)) {
5477 ret = ext4_force_commit(inode->i_sb);
5478 if (ret)
5479 return ret;
5480 }
5481
5482 inode_lock(inode);
5483
5484
5485
5486
5487 if (offset + len >= i_size_read(inode)) {
5488 ret = -EINVAL;
5489 goto out_mutex;
5490 }
5491
5492
5493 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5494 ret = -EOPNOTSUPP;
5495 goto out_mutex;
5496 }
5497
5498
5499 inode_dio_wait(inode);
5500
5501
5502
5503
5504
5505 down_write(&EXT4_I(inode)->i_mmap_sem);
5506
5507 ret = ext4_break_layouts(inode);
5508 if (ret)
5509 goto out_mmap;
5510
5511
5512
5513
5514
5515 ioffset = round_down(offset, PAGE_SIZE);
5516
5517
5518
5519
5520 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
5521 if (ret)
5522 goto out_mmap;
5523
5524
5525
5526
5527
5528 ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
5529 LLONG_MAX);
5530 if (ret)
5531 goto out_mmap;
5532 truncate_pagecache(inode, ioffset);
5533
5534 credits = ext4_writepage_trans_blocks(inode);
5535 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5536 if (IS_ERR(handle)) {
5537 ret = PTR_ERR(handle);
5538 goto out_mmap;
5539 }
5540
5541 down_write(&EXT4_I(inode)->i_data_sem);
5542 ext4_discard_preallocations(inode);
5543
5544 ret = ext4_es_remove_extent(inode, punch_start,
5545 EXT_MAX_BLOCKS - punch_start);
5546 if (ret) {
5547 up_write(&EXT4_I(inode)->i_data_sem);
5548 goto out_stop;
5549 }
5550
5551 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5552 if (ret) {
5553 up_write(&EXT4_I(inode)->i_data_sem);
5554 goto out_stop;
5555 }
5556 ext4_discard_preallocations(inode);
5557
5558 ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5559 punch_stop - punch_start, SHIFT_LEFT);
5560 if (ret) {
5561 up_write(&EXT4_I(inode)->i_data_sem);
5562 goto out_stop;
5563 }
5564
5565 new_size = i_size_read(inode) - len;
5566 i_size_write(inode, new_size);
5567 EXT4_I(inode)->i_disksize = new_size;
5568
5569 up_write(&EXT4_I(inode)->i_data_sem);
5570 if (IS_SYNC(inode))
5571 ext4_handle_sync(handle);
5572 inode->i_mtime = inode->i_ctime = current_time(inode);
5573 ext4_mark_inode_dirty(handle, inode);
5574 ext4_update_inode_fsync_trans(handle, inode, 1);
5575
5576 out_stop:
5577 ext4_journal_stop(handle);
5578 out_mmap:
5579 up_write(&EXT4_I(inode)->i_mmap_sem);
5580 out_mutex:
5581 inode_unlock(inode);
5582 return ret;
5583 }
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593 int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
5594 {
5595 struct super_block *sb = inode->i_sb;
5596 handle_t *handle;
5597 struct ext4_ext_path *path;
5598 struct ext4_extent *extent;
5599 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5600 unsigned int credits, ee_len;
5601 int ret = 0, depth, split_flag = 0;
5602 loff_t ioffset;
5603
5604
5605
5606
5607
5608
5609 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5610 return -EOPNOTSUPP;
5611
5612
5613 if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
5614 len & (EXT4_CLUSTER_SIZE(sb) - 1))
5615 return -EINVAL;
5616
5617 if (!S_ISREG(inode->i_mode))
5618 return -EOPNOTSUPP;
5619
5620 trace_ext4_insert_range(inode, offset, len);
5621
5622 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5623 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5624
5625
5626 if (ext4_should_journal_data(inode)) {
5627 ret = ext4_force_commit(inode->i_sb);
5628 if (ret)
5629 return ret;
5630 }
5631
5632 inode_lock(inode);
5633
5634 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5635 ret = -EOPNOTSUPP;
5636 goto out_mutex;
5637 }
5638
5639
5640 if (inode->i_size + len > inode->i_sb->s_maxbytes) {
5641 ret = -EFBIG;
5642 goto out_mutex;
5643 }
5644
5645
5646 if (offset >= i_size_read(inode)) {
5647 ret = -EINVAL;
5648 goto out_mutex;
5649 }
5650
5651
5652 inode_dio_wait(inode);
5653
5654
5655
5656
5657
5658 down_write(&EXT4_I(inode)->i_mmap_sem);
5659
5660 ret = ext4_break_layouts(inode);
5661 if (ret)
5662 goto out_mmap;
5663
5664
5665
5666
5667
5668 ioffset = round_down(offset, PAGE_SIZE);
5669
5670 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5671 LLONG_MAX);
5672 if (ret)
5673 goto out_mmap;
5674 truncate_pagecache(inode, ioffset);
5675
5676 credits = ext4_writepage_trans_blocks(inode);
5677 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5678 if (IS_ERR(handle)) {
5679 ret = PTR_ERR(handle);
5680 goto out_mmap;
5681 }
5682
5683
5684 inode->i_size += len;
5685 EXT4_I(inode)->i_disksize += len;
5686 inode->i_mtime = inode->i_ctime = current_time(inode);
5687 ret = ext4_mark_inode_dirty(handle, inode);
5688 if (ret)
5689 goto out_stop;
5690
5691 down_write(&EXT4_I(inode)->i_data_sem);
5692 ext4_discard_preallocations(inode);
5693
5694 path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5695 if (IS_ERR(path)) {
5696 up_write(&EXT4_I(inode)->i_data_sem);
5697 goto out_stop;
5698 }
5699
5700 depth = ext_depth(inode);
5701 extent = path[depth].p_ext;
5702 if (extent) {
5703 ee_start_lblk = le32_to_cpu(extent->ee_block);
5704 ee_len = ext4_ext_get_actual_len(extent);
5705
5706
5707
5708
5709
5710 if ((offset_lblk > ee_start_lblk) &&
5711 (offset_lblk < (ee_start_lblk + ee_len))) {
5712 if (ext4_ext_is_unwritten(extent))
5713 split_flag = EXT4_EXT_MARK_UNWRIT1 |
5714 EXT4_EXT_MARK_UNWRIT2;
5715 ret = ext4_split_extent_at(handle, inode, &path,
5716 offset_lblk, split_flag,
5717 EXT4_EX_NOCACHE |
5718 EXT4_GET_BLOCKS_PRE_IO |
5719 EXT4_GET_BLOCKS_METADATA_NOFAIL);
5720 }
5721
5722 ext4_ext_drop_refs(path);
5723 kfree(path);
5724 if (ret < 0) {
5725 up_write(&EXT4_I(inode)->i_data_sem);
5726 goto out_stop;
5727 }
5728 } else {
5729 ext4_ext_drop_refs(path);
5730 kfree(path);
5731 }
5732
5733 ret = ext4_es_remove_extent(inode, offset_lblk,
5734 EXT_MAX_BLOCKS - offset_lblk);
5735 if (ret) {
5736 up_write(&EXT4_I(inode)->i_data_sem);
5737 goto out_stop;
5738 }
5739
5740
5741
5742
5743
5744 ret = ext4_ext_shift_extents(inode, handle,
5745 ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
5746 len_lblk, SHIFT_RIGHT);
5747
5748 up_write(&EXT4_I(inode)->i_data_sem);
5749 if (IS_SYNC(inode))
5750 ext4_handle_sync(handle);
5751 if (ret >= 0)
5752 ext4_update_inode_fsync_trans(handle, inode, 1);
5753
5754 out_stop:
5755 ext4_journal_stop(handle);
5756 out_mmap:
5757 up_write(&EXT4_I(inode)->i_mmap_sem);
5758 out_mutex:
5759 inode_unlock(inode);
5760 return ret;
5761 }
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783 int
5784 ext4_swap_extents(handle_t *handle, struct inode *inode1,
5785 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5786 ext4_lblk_t count, int unwritten, int *erp)
5787 {
5788 struct ext4_ext_path *path1 = NULL;
5789 struct ext4_ext_path *path2 = NULL;
5790 int replaced_count = 0;
5791
5792 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5793 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5794 BUG_ON(!inode_is_locked(inode1));
5795 BUG_ON(!inode_is_locked(inode2));
5796
5797 *erp = ext4_es_remove_extent(inode1, lblk1, count);
5798 if (unlikely(*erp))
5799 return 0;
5800 *erp = ext4_es_remove_extent(inode2, lblk2, count);
5801 if (unlikely(*erp))
5802 return 0;
5803
5804 while (count) {
5805 struct ext4_extent *ex1, *ex2, tmp_ex;
5806 ext4_lblk_t e1_blk, e2_blk;
5807 int e1_len, e2_len, len;
5808 int split = 0;
5809
5810 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5811 if (IS_ERR(path1)) {
5812 *erp = PTR_ERR(path1);
5813 path1 = NULL;
5814 finish:
5815 count = 0;
5816 goto repeat;
5817 }
5818 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5819 if (IS_ERR(path2)) {
5820 *erp = PTR_ERR(path2);
5821 path2 = NULL;
5822 goto finish;
5823 }
5824 ex1 = path1[path1->p_depth].p_ext;
5825 ex2 = path2[path2->p_depth].p_ext;
5826
5827 if (unlikely(!ex2 || !ex1))
5828 goto finish;
5829
5830 e1_blk = le32_to_cpu(ex1->ee_block);
5831 e2_blk = le32_to_cpu(ex2->ee_block);
5832 e1_len = ext4_ext_get_actual_len(ex1);
5833 e2_len = ext4_ext_get_actual_len(ex2);
5834
5835
5836 if (!in_range(lblk1, e1_blk, e1_len) ||
5837 !in_range(lblk2, e2_blk, e2_len)) {
5838 ext4_lblk_t next1, next2;
5839
5840
5841 next1 = ext4_ext_next_allocated_block(path1);
5842 next2 = ext4_ext_next_allocated_block(path2);
5843
5844 if (e1_blk > lblk1)
5845 next1 = e1_blk;
5846 if (e2_blk > lblk2)
5847 next2 = e2_blk;
5848
5849 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5850 goto finish;
5851
5852 len = next1 - lblk1;
5853 if (len < next2 - lblk2)
5854 len = next2 - lblk2;
5855 if (len > count)
5856 len = count;
5857 lblk1 += len;
5858 lblk2 += len;
5859 count -= len;
5860 goto repeat;
5861 }
5862
5863
5864 if (e1_blk < lblk1) {
5865 split = 1;
5866 *erp = ext4_force_split_extent_at(handle, inode1,
5867 &path1, lblk1, 0);
5868 if (unlikely(*erp))
5869 goto finish;
5870 }
5871 if (e2_blk < lblk2) {
5872 split = 1;
5873 *erp = ext4_force_split_extent_at(handle, inode2,
5874 &path2, lblk2, 0);
5875 if (unlikely(*erp))
5876 goto finish;
5877 }
5878
5879
5880 if (split)
5881 goto repeat;
5882
5883
5884 len = count;
5885 if (len > e1_blk + e1_len - lblk1)
5886 len = e1_blk + e1_len - lblk1;
5887 if (len > e2_blk + e2_len - lblk2)
5888 len = e2_blk + e2_len - lblk2;
5889
5890 if (len != e1_len) {
5891 split = 1;
5892 *erp = ext4_force_split_extent_at(handle, inode1,
5893 &path1, lblk1 + len, 0);
5894 if (unlikely(*erp))
5895 goto finish;
5896 }
5897 if (len != e2_len) {
5898 split = 1;
5899 *erp = ext4_force_split_extent_at(handle, inode2,
5900 &path2, lblk2 + len, 0);
5901 if (*erp)
5902 goto finish;
5903 }
5904
5905
5906 if (split)
5907 goto repeat;
5908
5909 BUG_ON(e2_len != e1_len);
5910 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5911 if (unlikely(*erp))
5912 goto finish;
5913 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5914 if (unlikely(*erp))
5915 goto finish;
5916
5917
5918 tmp_ex = *ex1;
5919 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5920 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5921 ex1->ee_len = cpu_to_le16(e2_len);
5922 ex2->ee_len = cpu_to_le16(e1_len);
5923 if (unwritten)
5924 ext4_ext_mark_unwritten(ex2);
5925 if (ext4_ext_is_unwritten(&tmp_ex))
5926 ext4_ext_mark_unwritten(ex1);
5927
5928 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5929 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5930 *erp = ext4_ext_dirty(handle, inode2, path2 +
5931 path2->p_depth);
5932 if (unlikely(*erp))
5933 goto finish;
5934 *erp = ext4_ext_dirty(handle, inode1, path1 +
5935 path1->p_depth);
5936
5937
5938
5939
5940
5941
5942 if (unlikely(*erp))
5943 goto finish;
5944 lblk1 += len;
5945 lblk2 += len;
5946 replaced_count += len;
5947 count -= len;
5948
5949 repeat:
5950 ext4_ext_drop_refs(path1);
5951 kfree(path1);
5952 ext4_ext_drop_refs(path2);
5953 kfree(path2);
5954 path1 = path2 = NULL;
5955 }
5956 return replaced_count;
5957 }
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970
5971 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
5972 {
5973 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5974 struct ext4_ext_path *path;
5975 int depth, mapped = 0, err = 0;
5976 struct ext4_extent *extent;
5977 ext4_lblk_t first_lblk, first_lclu, last_lclu;
5978
5979
5980 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5981 if (IS_ERR(path)) {
5982 err = PTR_ERR(path);
5983 path = NULL;
5984 goto out;
5985 }
5986
5987 depth = ext_depth(inode);
5988
5989
5990
5991
5992
5993
5994 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5995 EXT4_ERROR_INODE(inode,
5996 "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
5997 (unsigned long) EXT4_C2B(sbi, lclu),
5998 depth, path[depth].p_block);
5999 err = -EFSCORRUPTED;
6000 goto out;
6001 }
6002
6003 extent = path[depth].p_ext;
6004
6005
6006 if (extent == NULL)
6007 goto out;
6008
6009 first_lblk = le32_to_cpu(extent->ee_block);
6010 first_lclu = EXT4_B2C(sbi, first_lblk);
6011
6012
6013
6014
6015
6016
6017
6018 if (lclu >= first_lclu) {
6019 last_lclu = EXT4_B2C(sbi, first_lblk +
6020 ext4_ext_get_actual_len(extent) - 1);
6021 if (lclu <= last_lclu) {
6022 mapped = 1;
6023 } else {
6024 first_lblk = ext4_ext_next_allocated_block(path);
6025 first_lclu = EXT4_B2C(sbi, first_lblk);
6026 if (lclu == first_lclu)
6027 mapped = 1;
6028 }
6029 }
6030
6031 out:
6032 ext4_ext_drop_refs(path);
6033 kfree(path);
6034
6035 return err ? err : mapped;
6036 }