This source file includes following definitions.
- gfs2_unstuffer_page
- gfs2_unstuff_dinode
- find_metapath
- metapath_branch_start
- metaptr1
- metapointer
- metaend
- clone_metapath
- gfs2_metapath_ra
- __fillup_metapath
- lookup_metapath
- fillup_metapath
- metapath_to_block
- release_metapath
- gfs2_extent_length
- gfs2_hole_walker
- gfs2_hole_size
- gfs2_indirect_init
- gfs2_iomap_alloc
- gfs2_alloc_size
- gfs2_iomap_get
- gfs2_lblk_to_dblk
- gfs2_write_lock
- gfs2_write_unlock
- gfs2_iomap_page_prepare
- gfs2_iomap_page_done
- gfs2_iomap_begin_write
- gfs2_iomap_need_write_lock
- gfs2_iomap_begin
- gfs2_iomap_end
- gfs2_block_map
- gfs2_extent_map
- gfs2_block_zero_range
- gfs2_journaled_truncate
- trunc_start
- gfs2_iomap_get_alloc
- sweep_bh_for_rgrps
- mp_eq_to_hgt
- find_nonnull_ptr
- metapointer_range
- walk_done
- punch_hole
- trunc_end
- do_shrink
- gfs2_trim_blocks
- do_grow
- gfs2_setattr_size
- gfs2_truncatei_resume
- gfs2_file_dealloc
- gfs2_free_journal_extents
- gfs2_add_jextent
- gfs2_map_journal_extents
- gfs2_write_alloc_required
- stuffed_zero_range
- gfs2_journaled_truncate_range
- __gfs2_punch_hole
1
2
3
4
5
6
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
15
16 #include "gfs2.h"
17 #include "incore.h"
18 #include "bmap.h"
19 #include "glock.h"
20 #include "inode.h"
21 #include "meta_io.h"
22 #include "quota.h"
23 #include "rgrp.h"
24 #include "log.h"
25 #include "super.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "util.h"
29 #include "aops.h"
30 #include "trace_gfs2.h"
31
32
33
34
35
36 struct metapath {
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39 int mp_fheight;
40 int mp_aheight;
41 };
42
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
44
45
46
47
48
49
50
51
52
53
54
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 u64 block, struct page *page)
57 {
58 struct inode *inode = &ip->i_inode;
59 struct buffer_head *bh;
60 int release = 0;
61
62 if (!page || page->index) {
63 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
64 if (!page)
65 return -ENOMEM;
66 release = 1;
67 }
68
69 if (!PageUptodate(page)) {
70 void *kaddr = kmap(page);
71 u64 dsize = i_size_read(inode);
72
73 if (dsize > gfs2_max_stuffed_size(ip))
74 dsize = gfs2_max_stuffed_size(ip);
75
76 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
77 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
78 kunmap(page);
79
80 SetPageUptodate(page);
81 }
82
83 if (!page_has_buffers(page))
84 create_empty_buffers(page, BIT(inode->i_blkbits),
85 BIT(BH_Uptodate));
86
87 bh = page_buffers(page);
88
89 if (!buffer_mapped(bh))
90 map_bh(bh, inode->i_sb, block);
91
92 set_buffer_uptodate(bh);
93 if (gfs2_is_jdata(ip))
94 gfs2_trans_add_data(ip->i_gl, bh);
95 else {
96 mark_buffer_dirty(bh);
97 gfs2_ordered_add_inode(ip);
98 }
99
100 if (release) {
101 unlock_page(page);
102 put_page(page);
103 }
104
105 return 0;
106 }
107
108
109
110
111
112
113
114
115
116
117
118
119 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
120 {
121 struct buffer_head *bh, *dibh;
122 struct gfs2_dinode *di;
123 u64 block = 0;
124 int isdir = gfs2_is_dir(ip);
125 int error;
126
127 down_write(&ip->i_rw_mutex);
128
129 error = gfs2_meta_inode_buffer(ip, &dibh);
130 if (error)
131 goto out;
132
133 if (i_size_read(&ip->i_inode)) {
134
135
136
137 unsigned int n = 1;
138 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
139 if (error)
140 goto out_brelse;
141 if (isdir) {
142 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
143 error = gfs2_dir_get_new_buffer(ip, block, &bh);
144 if (error)
145 goto out_brelse;
146 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
147 dibh, sizeof(struct gfs2_dinode));
148 brelse(bh);
149 } else {
150 error = gfs2_unstuffer_page(ip, dibh, block, page);
151 if (error)
152 goto out_brelse;
153 }
154 }
155
156
157
158 gfs2_trans_add_meta(ip->i_gl, dibh);
159 di = (struct gfs2_dinode *)dibh->b_data;
160 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
161
162 if (i_size_read(&ip->i_inode)) {
163 *(__be64 *)(di + 1) = cpu_to_be64(block);
164 gfs2_add_inode_blocks(&ip->i_inode, 1);
165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
166 }
167
168 ip->i_height = 1;
169 di->di_height = cpu_to_be16(1);
170
171 out_brelse:
172 brelse(dibh);
173 out:
174 up_write(&ip->i_rw_mutex);
175 return error;
176 }
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
240 {
241 unsigned int i;
242
243 mp->mp_fheight = height;
244 for (i = height; i--;)
245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
246 }
247
248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
249 {
250 if (mp->mp_list[0] == 0)
251 return 2;
252 return 1;
253 }
254
255
256
257
258
259
260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
261 {
262 struct buffer_head *bh = mp->mp_bh[height];
263 if (height == 0)
264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
266 }
267
268
269
270
271
272
273
274
275
276
277
278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
279 {
280 __be64 *p = metaptr1(height, mp);
281 return p + mp->mp_list[height];
282 }
283
284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
285 {
286 const struct buffer_head *bh = mp->mp_bh[height];
287 return (const __be64 *)(bh->b_data + bh->b_size);
288 }
289
290 static void clone_metapath(struct metapath *clone, struct metapath *mp)
291 {
292 unsigned int hgt;
293
294 *clone = *mp;
295 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 get_bh(clone->mp_bh[hgt]);
297 }
298
299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
300 {
301 const __be64 *t;
302
303 for (t = start; t < end; t++) {
304 struct buffer_head *rabh;
305
306 if (!*t)
307 continue;
308
309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 if (trylock_buffer(rabh)) {
311 if (!buffer_uptodate(rabh)) {
312 rabh->b_end_io = end_buffer_read_sync;
313 submit_bh(REQ_OP_READ,
314 REQ_RAHEAD | REQ_META | REQ_PRIO,
315 rabh);
316 continue;
317 }
318 unlock_buffer(rabh);
319 }
320 brelse(rabh);
321 }
322 }
323
324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 unsigned int x, unsigned int h)
326 {
327 for (; x < h; x++) {
328 __be64 *ptr = metapointer(x, mp);
329 u64 dblock = be64_to_cpu(*ptr);
330 int ret;
331
332 if (!dblock)
333 break;
334 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
335 if (ret)
336 return ret;
337 }
338 mp->mp_aheight = x + 1;
339 return 0;
340 }
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
360 {
361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
362 }
363
364
365
366
367
368
369
370
371
372
373
374
375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
376 {
377 unsigned int x = 0;
378 int ret;
379
380 if (h) {
381
382 for (x = h - 1; x > 0; x--) {
383 if (mp->mp_bh[x])
384 break;
385 }
386 }
387 ret = __fillup_metapath(ip, mp, x, h);
388 if (ret)
389 return ret;
390 return mp->mp_aheight - x - 1;
391 }
392
393 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
394 {
395 sector_t factor = 1, block = 0;
396 int hgt;
397
398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
399 if (hgt < mp->mp_aheight)
400 block += mp->mp_list[hgt] * factor;
401 factor *= sdp->sd_inptrs;
402 }
403 return block;
404 }
405
406 static void release_metapath(struct metapath *mp)
407 {
408 int i;
409
410 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
411 if (mp->mp_bh[i] == NULL)
412 break;
413 brelse(mp->mp_bh[i]);
414 mp->mp_bh[i] = NULL;
415 }
416 }
417
418
419
420
421
422
423
424
425
426
427
428 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
429 {
430 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
431 const __be64 *first = ptr;
432 u64 d = be64_to_cpu(*ptr);
433
434 *eob = 0;
435 do {
436 ptr++;
437 if (ptr >= end)
438 break;
439 d++;
440 } while(be64_to_cpu(*ptr) == d);
441 if (ptr >= end)
442 *eob = 1;
443 return ptr - first;
444 }
445
446 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
447
448
449
450
451
452
453
454
455
456 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
457 unsigned int ptrs);
458
459
460
461
462
463
464
465
466
467
468
469
470 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
471 u64 max_len, gfs2_metadata_walker walker)
472 {
473 struct gfs2_inode *ip = GFS2_I(inode);
474 struct gfs2_sbd *sdp = GFS2_SB(inode);
475 u64 factor = 1;
476 unsigned int hgt;
477 int ret;
478
479
480
481
482
483
484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
485 max_len += mp->mp_list[hgt] * factor;
486 mp->mp_list[hgt] = 0;
487 factor *= sdp->sd_inptrs;
488 }
489
490 for (;;) {
491 u16 start = mp->mp_list[hgt];
492 enum walker_status status;
493 unsigned int ptrs;
494 u64 len;
495
496
497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
498 len = ptrs * factor;
499 if (len > max_len)
500 ptrs = DIV_ROUND_UP_ULL(max_len, factor);
501 status = walker(mp, ptrs);
502 switch (status) {
503 case WALK_STOP:
504 return 1;
505 case WALK_FOLLOW:
506 BUG_ON(mp->mp_aheight == mp->mp_fheight);
507 ptrs = mp->mp_list[hgt] - start;
508 len = ptrs * factor;
509 break;
510 case WALK_CONTINUE:
511 break;
512 }
513 if (len >= max_len)
514 break;
515 max_len -= len;
516 if (status == WALK_FOLLOW)
517 goto fill_up_metapath;
518
519 lower_metapath:
520
521 brelse(mp->mp_bh[hgt]);
522 mp->mp_bh[hgt] = NULL;
523 mp->mp_list[hgt] = 0;
524 if (!hgt)
525 break;
526 hgt--;
527 factor *= sdp->sd_inptrs;
528
529
530 (mp->mp_list[hgt])++;
531 if (hgt) {
532 if (mp->mp_list[hgt] >= sdp->sd_inptrs)
533 goto lower_metapath;
534 } else {
535 if (mp->mp_list[hgt] >= sdp->sd_diptrs)
536 break;
537 }
538
539 fill_up_metapath:
540
541 ret = fillup_metapath(ip, mp, ip->i_height - 1);
542 if (ret < 0)
543 return ret;
544 hgt += ret;
545 for (; ret; ret--)
546 do_div(factor, sdp->sd_inptrs);
547 mp->mp_aheight = hgt + 1;
548 }
549 return 0;
550 }
551
552 static enum walker_status gfs2_hole_walker(struct metapath *mp,
553 unsigned int ptrs)
554 {
555 const __be64 *start, *ptr, *end;
556 unsigned int hgt;
557
558 hgt = mp->mp_aheight - 1;
559 start = metapointer(hgt, mp);
560 end = start + ptrs;
561
562 for (ptr = start; ptr < end; ptr++) {
563 if (*ptr) {
564 mp->mp_list[hgt] += ptr - start;
565 if (mp->mp_aheight == mp->mp_fheight)
566 return WALK_STOP;
567 return WALK_FOLLOW;
568 }
569 }
570 return WALK_CONTINUE;
571 }
572
573
574
575
576
577
578
579
580
581
582
583
584
585 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
586 struct metapath *mp, struct iomap *iomap)
587 {
588 struct metapath clone;
589 u64 hole_size;
590 int ret;
591
592 clone_metapath(&clone, mp);
593 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
594 if (ret < 0)
595 goto out;
596
597 if (ret == 1)
598 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
599 else
600 hole_size = len;
601 iomap->length = hole_size << inode->i_blkbits;
602 ret = 0;
603
604 out:
605 release_metapath(&clone);
606 return ret;
607 }
608
609 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
610 struct gfs2_glock *gl, unsigned int i,
611 unsigned offset, u64 bn)
612 {
613 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
614 ((i > 1) ? sizeof(struct gfs2_meta_header) :
615 sizeof(struct gfs2_dinode)));
616 BUG_ON(i < 1);
617 BUG_ON(mp->mp_bh[i] != NULL);
618 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
619 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
620 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
621 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
622 ptr += offset;
623 *ptr = cpu_to_be64(bn);
624 return ptr;
625 }
626
627 enum alloc_state {
628 ALLOC_DATA = 0,
629 ALLOC_GROW_DEPTH = 1,
630 ALLOC_GROW_HEIGHT = 2,
631
632 };
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
664 struct metapath *mp)
665 {
666 struct gfs2_inode *ip = GFS2_I(inode);
667 struct gfs2_sbd *sdp = GFS2_SB(inode);
668 struct buffer_head *dibh = mp->mp_bh[0];
669 u64 bn;
670 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
671 size_t dblks = iomap->length >> inode->i_blkbits;
672 const unsigned end_of_metadata = mp->mp_fheight - 1;
673 int ret;
674 enum alloc_state state;
675 __be64 *ptr;
676 __be64 zero_bn = 0;
677
678 BUG_ON(mp->mp_aheight < 1);
679 BUG_ON(dibh == NULL);
680 BUG_ON(dblks < 1);
681
682 gfs2_trans_add_meta(ip->i_gl, dibh);
683
684 down_write(&ip->i_rw_mutex);
685
686 if (mp->mp_fheight == mp->mp_aheight) {
687
688 state = ALLOC_DATA;
689 } else {
690
691 if (mp->mp_fheight == ip->i_height) {
692
693 iblks = mp->mp_fheight - mp->mp_aheight;
694 state = ALLOC_GROW_DEPTH;
695 } else {
696
697 state = ALLOC_GROW_HEIGHT;
698 iblks = mp->mp_fheight - ip->i_height;
699 branch_start = metapath_branch_start(mp);
700 iblks += (mp->mp_fheight - branch_start);
701 }
702 }
703
704
705
706 blks = dblks + iblks;
707 i = mp->mp_aheight;
708 do {
709 n = blks - alloced;
710 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
711 if (ret)
712 goto out;
713 alloced += n;
714 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
715 gfs2_trans_remove_revoke(sdp, bn, n);
716 switch (state) {
717
718 case ALLOC_GROW_HEIGHT:
719 if (i == 1) {
720 ptr = (__be64 *)(dibh->b_data +
721 sizeof(struct gfs2_dinode));
722 zero_bn = *ptr;
723 }
724 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
725 i++, n--)
726 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
727 if (i - 1 == mp->mp_fheight - ip->i_height) {
728 i--;
729 gfs2_buffer_copy_tail(mp->mp_bh[i],
730 sizeof(struct gfs2_meta_header),
731 dibh, sizeof(struct gfs2_dinode));
732 gfs2_buffer_clear_tail(dibh,
733 sizeof(struct gfs2_dinode) +
734 sizeof(__be64));
735 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
736 sizeof(struct gfs2_meta_header));
737 *ptr = zero_bn;
738 state = ALLOC_GROW_DEPTH;
739 for(i = branch_start; i < mp->mp_fheight; i++) {
740 if (mp->mp_bh[i] == NULL)
741 break;
742 brelse(mp->mp_bh[i]);
743 mp->mp_bh[i] = NULL;
744 }
745 i = branch_start;
746 }
747 if (n == 0)
748 break;
749
750 case ALLOC_GROW_DEPTH:
751 if (i > 1 && i < mp->mp_fheight)
752 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
753 for (; i < mp->mp_fheight && n > 0; i++, n--)
754 gfs2_indirect_init(mp, ip->i_gl, i,
755 mp->mp_list[i-1], bn++);
756 if (i == mp->mp_fheight)
757 state = ALLOC_DATA;
758 if (n == 0)
759 break;
760
761 case ALLOC_DATA:
762 BUG_ON(n > dblks);
763 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
764 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
765 dblks = n;
766 ptr = metapointer(end_of_metadata, mp);
767 iomap->addr = bn << inode->i_blkbits;
768 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
769 while (n-- > 0)
770 *ptr++ = cpu_to_be64(bn++);
771 break;
772 }
773 } while (iomap->addr == IOMAP_NULL_ADDR);
774
775 iomap->type = IOMAP_MAPPED;
776 iomap->length = (u64)dblks << inode->i_blkbits;
777 ip->i_height = mp->mp_fheight;
778 gfs2_add_inode_blocks(&ip->i_inode, alloced);
779 gfs2_dinode_out(ip, dibh->b_data);
780 out:
781 up_write(&ip->i_rw_mutex);
782 return ret;
783 }
784
785 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
786
787
788
789
790
791
792
793
794
795
796
797 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
798 {
799 struct gfs2_inode *ip = GFS2_I(inode);
800 struct gfs2_sbd *sdp = GFS2_SB(inode);
801 const __be64 *first, *ptr, *end;
802
803
804
805
806
807
808
809
810
811 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
812 unsigned int maxsize = mp->mp_fheight > 1 ?
813 sdp->sd_inptrs : sdp->sd_diptrs;
814 maxsize -= mp->mp_list[mp->mp_fheight - 1];
815 if (size > maxsize)
816 size = maxsize;
817 return size;
818 }
819
820 first = metapointer(ip->i_height - 1, mp);
821 end = metaend(ip->i_height - 1, mp);
822 if (end - first > size)
823 end = first + size;
824 for (ptr = first; ptr < end; ptr++) {
825 if (*ptr)
826 break;
827 }
828 return ptr - first;
829 }
830
831
832
833
834
835
836
837
838
839
840
841
842 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
843 unsigned flags, struct iomap *iomap,
844 struct metapath *mp)
845 {
846 struct gfs2_inode *ip = GFS2_I(inode);
847 struct gfs2_sbd *sdp = GFS2_SB(inode);
848 loff_t size = i_size_read(inode);
849 __be64 *ptr;
850 sector_t lblock;
851 sector_t lblock_stop;
852 int ret;
853 int eob;
854 u64 len;
855 struct buffer_head *dibh = NULL, *bh;
856 u8 height;
857
858 if (!length)
859 return -EINVAL;
860
861 down_read(&ip->i_rw_mutex);
862
863 ret = gfs2_meta_inode_buffer(ip, &dibh);
864 if (ret)
865 goto unlock;
866 mp->mp_bh[0] = dibh;
867
868 if (gfs2_is_stuffed(ip)) {
869 if (flags & IOMAP_WRITE) {
870 loff_t max_size = gfs2_max_stuffed_size(ip);
871
872 if (pos + length > max_size)
873 goto unstuff;
874 iomap->length = max_size;
875 } else {
876 if (pos >= size) {
877 if (flags & IOMAP_REPORT) {
878 ret = -ENOENT;
879 goto unlock;
880 } else {
881 iomap->offset = pos;
882 iomap->length = length;
883 goto hole_found;
884 }
885 }
886 iomap->length = size;
887 }
888 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
889 sizeof(struct gfs2_dinode);
890 iomap->type = IOMAP_INLINE;
891 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
892 goto out;
893 }
894
895 unstuff:
896 lblock = pos >> inode->i_blkbits;
897 iomap->offset = lblock << inode->i_blkbits;
898 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
899 len = lblock_stop - lblock + 1;
900 iomap->length = len << inode->i_blkbits;
901
902 height = ip->i_height;
903 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
904 height++;
905 find_metapath(sdp, lblock, mp, height);
906 if (height > ip->i_height || gfs2_is_stuffed(ip))
907 goto do_alloc;
908
909 ret = lookup_metapath(ip, mp);
910 if (ret)
911 goto unlock;
912
913 if (mp->mp_aheight != ip->i_height)
914 goto do_alloc;
915
916 ptr = metapointer(ip->i_height - 1, mp);
917 if (*ptr == 0)
918 goto do_alloc;
919
920 bh = mp->mp_bh[ip->i_height - 1];
921 len = gfs2_extent_length(bh, ptr, len, &eob);
922
923 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
924 iomap->length = len << inode->i_blkbits;
925 iomap->type = IOMAP_MAPPED;
926 iomap->flags |= IOMAP_F_MERGED;
927 if (eob)
928 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
929
930 out:
931 iomap->bdev = inode->i_sb->s_bdev;
932 unlock:
933 up_read(&ip->i_rw_mutex);
934 return ret;
935
936 do_alloc:
937 if (flags & IOMAP_REPORT) {
938 if (pos >= size)
939 ret = -ENOENT;
940 else if (height == ip->i_height)
941 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
942 else
943 iomap->length = size - pos;
944 } else if (flags & IOMAP_WRITE) {
945 u64 alloc_size;
946
947 if (flags & IOMAP_DIRECT)
948 goto out;
949
950 len = gfs2_alloc_size(inode, mp, len);
951 alloc_size = len << inode->i_blkbits;
952 if (alloc_size < iomap->length)
953 iomap->length = alloc_size;
954 } else {
955 if (pos < size && height == ip->i_height)
956 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
957 }
958 hole_found:
959 iomap->addr = IOMAP_NULL_ADDR;
960 iomap->type = IOMAP_HOLE;
961 goto out;
962 }
963
964
965
966
967
968
969
970
971
972
973
974
975 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
976 {
977 struct iomap iomap = { };
978 struct metapath mp = { .mp_aheight = 1, };
979 loff_t pos = (loff_t)lblock << inode->i_blkbits;
980 int ret;
981
982 ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
983 release_metapath(&mp);
984 if (ret == 0)
985 *dblock = iomap.addr >> inode->i_blkbits;
986
987 return ret;
988 }
989
990 static int gfs2_write_lock(struct inode *inode)
991 {
992 struct gfs2_inode *ip = GFS2_I(inode);
993 struct gfs2_sbd *sdp = GFS2_SB(inode);
994 int error;
995
996 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
997 error = gfs2_glock_nq(&ip->i_gh);
998 if (error)
999 goto out_uninit;
1000 if (&ip->i_inode == sdp->sd_rindex) {
1001 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1002
1003 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1004 GL_NOCACHE, &m_ip->i_gh);
1005 if (error)
1006 goto out_unlock;
1007 }
1008 return 0;
1009
1010 out_unlock:
1011 gfs2_glock_dq(&ip->i_gh);
1012 out_uninit:
1013 gfs2_holder_uninit(&ip->i_gh);
1014 return error;
1015 }
1016
1017 static void gfs2_write_unlock(struct inode *inode)
1018 {
1019 struct gfs2_inode *ip = GFS2_I(inode);
1020 struct gfs2_sbd *sdp = GFS2_SB(inode);
1021
1022 if (&ip->i_inode == sdp->sd_rindex) {
1023 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1024
1025 gfs2_glock_dq_uninit(&m_ip->i_gh);
1026 }
1027 gfs2_glock_dq_uninit(&ip->i_gh);
1028 }
1029
1030 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
1031 unsigned len, struct iomap *iomap)
1032 {
1033 unsigned int blockmask = i_blocksize(inode) - 1;
1034 struct gfs2_sbd *sdp = GFS2_SB(inode);
1035 unsigned int blocks;
1036
1037 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
1038 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
1039 }
1040
1041 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1042 unsigned copied, struct page *page,
1043 struct iomap *iomap)
1044 {
1045 struct gfs2_trans *tr = current->journal_info;
1046 struct gfs2_inode *ip = GFS2_I(inode);
1047 struct gfs2_sbd *sdp = GFS2_SB(inode);
1048
1049 if (page && !gfs2_is_stuffed(ip))
1050 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1051
1052 if (tr->tr_num_buf_new)
1053 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1054
1055 gfs2_trans_end(sdp);
1056 }
1057
1058 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1059 .page_prepare = gfs2_iomap_page_prepare,
1060 .page_done = gfs2_iomap_page_done,
1061 };
1062
1063 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1064 loff_t length, unsigned flags,
1065 struct iomap *iomap,
1066 struct metapath *mp)
1067 {
1068 struct gfs2_inode *ip = GFS2_I(inode);
1069 struct gfs2_sbd *sdp = GFS2_SB(inode);
1070 bool unstuff;
1071 int ret;
1072
1073 unstuff = gfs2_is_stuffed(ip) &&
1074 pos + length > gfs2_max_stuffed_size(ip);
1075
1076 if (unstuff || iomap->type == IOMAP_HOLE) {
1077 unsigned int data_blocks, ind_blocks;
1078 struct gfs2_alloc_parms ap = {};
1079 unsigned int rblocks;
1080 struct gfs2_trans *tr;
1081
1082 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1083 &ind_blocks);
1084 ap.target = data_blocks + ind_blocks;
1085 ret = gfs2_quota_lock_check(ip, &ap);
1086 if (ret)
1087 return ret;
1088
1089 ret = gfs2_inplace_reserve(ip, &ap);
1090 if (ret)
1091 goto out_qunlock;
1092
1093 rblocks = RES_DINODE + ind_blocks;
1094 if (gfs2_is_jdata(ip))
1095 rblocks += data_blocks;
1096 if (ind_blocks || data_blocks)
1097 rblocks += RES_STATFS + RES_QUOTA;
1098 if (inode == sdp->sd_rindex)
1099 rblocks += 2 * RES_STATFS;
1100 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1101
1102 ret = gfs2_trans_begin(sdp, rblocks,
1103 iomap->length >> inode->i_blkbits);
1104 if (ret)
1105 goto out_trans_fail;
1106
1107 if (unstuff) {
1108 ret = gfs2_unstuff_dinode(ip, NULL);
1109 if (ret)
1110 goto out_trans_end;
1111 release_metapath(mp);
1112 ret = gfs2_iomap_get(inode, iomap->offset,
1113 iomap->length, flags, iomap, mp);
1114 if (ret)
1115 goto out_trans_end;
1116 }
1117
1118 if (iomap->type == IOMAP_HOLE) {
1119 ret = gfs2_iomap_alloc(inode, iomap, mp);
1120 if (ret) {
1121 gfs2_trans_end(sdp);
1122 gfs2_inplace_release(ip);
1123 punch_hole(ip, iomap->offset, iomap->length);
1124 goto out_qunlock;
1125 }
1126 }
1127
1128 tr = current->journal_info;
1129 if (tr->tr_num_buf_new)
1130 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1131
1132 gfs2_trans_end(sdp);
1133 }
1134
1135 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1136 iomap->page_ops = &gfs2_iomap_page_ops;
1137 return 0;
1138
1139 out_trans_end:
1140 gfs2_trans_end(sdp);
1141 out_trans_fail:
1142 gfs2_inplace_release(ip);
1143 out_qunlock:
1144 gfs2_quota_unlock(ip);
1145 return ret;
1146 }
1147
1148 static inline bool gfs2_iomap_need_write_lock(unsigned flags)
1149 {
1150 return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
1151 }
1152
1153 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1154 unsigned flags, struct iomap *iomap)
1155 {
1156 struct gfs2_inode *ip = GFS2_I(inode);
1157 struct metapath mp = { .mp_aheight = 1, };
1158 int ret;
1159
1160 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1161
1162 trace_gfs2_iomap_start(ip, pos, length, flags);
1163 if (gfs2_iomap_need_write_lock(flags)) {
1164 ret = gfs2_write_lock(inode);
1165 if (ret)
1166 goto out;
1167 }
1168
1169 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1170 if (ret)
1171 goto out_unlock;
1172
1173 switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1174 case IOMAP_WRITE:
1175 if (flags & IOMAP_DIRECT) {
1176
1177
1178
1179
1180 if (iomap->type != IOMAP_MAPPED)
1181 ret = -ENOTBLK;
1182 goto out_unlock;
1183 }
1184 break;
1185 case IOMAP_ZERO:
1186 if (iomap->type == IOMAP_HOLE)
1187 goto out_unlock;
1188 break;
1189 default:
1190 goto out_unlock;
1191 }
1192
1193 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1194
1195 out_unlock:
1196 if (ret && gfs2_iomap_need_write_lock(flags))
1197 gfs2_write_unlock(inode);
1198 release_metapath(&mp);
1199 out:
1200 trace_gfs2_iomap_end(ip, iomap, ret);
1201 return ret;
1202 }
1203
1204 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1205 ssize_t written, unsigned flags, struct iomap *iomap)
1206 {
1207 struct gfs2_inode *ip = GFS2_I(inode);
1208 struct gfs2_sbd *sdp = GFS2_SB(inode);
1209
1210 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1211 case IOMAP_WRITE:
1212 if (flags & IOMAP_DIRECT)
1213 return 0;
1214 break;
1215 case IOMAP_ZERO:
1216 if (iomap->type == IOMAP_HOLE)
1217 return 0;
1218 break;
1219 default:
1220 return 0;
1221 }
1222
1223 if (!gfs2_is_stuffed(ip))
1224 gfs2_ordered_add_inode(ip);
1225
1226 if (inode == sdp->sd_rindex)
1227 adjust_fs_space(inode);
1228
1229 gfs2_inplace_release(ip);
1230
1231 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1232
1233 loff_t blockmask = i_blocksize(inode) - 1;
1234 loff_t end = (pos + length) & ~blockmask;
1235
1236 pos = (pos + written + blockmask) & ~blockmask;
1237 if (pos < end) {
1238 truncate_pagecache_range(inode, pos, end - 1);
1239 punch_hole(ip, pos, end - pos);
1240 }
1241 }
1242
1243 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1244 gfs2_quota_unlock(ip);
1245
1246 if (unlikely(!written))
1247 goto out_unlock;
1248
1249 if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1250 mark_inode_dirty(inode);
1251 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1252
1253 out_unlock:
1254 if (gfs2_iomap_need_write_lock(flags))
1255 gfs2_write_unlock(inode);
1256 return 0;
1257 }
1258
1259 const struct iomap_ops gfs2_iomap_ops = {
1260 .iomap_begin = gfs2_iomap_begin,
1261 .iomap_end = gfs2_iomap_end,
1262 };
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 int gfs2_block_map(struct inode *inode, sector_t lblock,
1286 struct buffer_head *bh_map, int create)
1287 {
1288 struct gfs2_inode *ip = GFS2_I(inode);
1289 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1290 loff_t length = bh_map->b_size;
1291 struct metapath mp = { .mp_aheight = 1, };
1292 struct iomap iomap = { };
1293 int ret;
1294
1295 clear_buffer_mapped(bh_map);
1296 clear_buffer_new(bh_map);
1297 clear_buffer_boundary(bh_map);
1298 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1299
1300 if (create) {
1301 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1302 if (!ret && iomap.type == IOMAP_HOLE)
1303 ret = gfs2_iomap_alloc(inode, &iomap, &mp);
1304 release_metapath(&mp);
1305 } else {
1306 ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1307 release_metapath(&mp);
1308 }
1309 if (ret)
1310 goto out;
1311
1312 if (iomap.length > bh_map->b_size) {
1313 iomap.length = bh_map->b_size;
1314 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1315 }
1316 if (iomap.addr != IOMAP_NULL_ADDR)
1317 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1318 bh_map->b_size = iomap.length;
1319 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1320 set_buffer_boundary(bh_map);
1321 if (iomap.flags & IOMAP_F_NEW)
1322 set_buffer_new(bh_map);
1323
1324 out:
1325 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1326 return ret;
1327 }
1328
1329
1330
1331
1332 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1333 {
1334 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1335 int ret;
1336 int create = *new;
1337
1338 BUG_ON(!extlen);
1339 BUG_ON(!dblock);
1340 BUG_ON(!new);
1341
1342 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1343 ret = gfs2_block_map(inode, lblock, &bh, create);
1344 *extlen = bh.b_size >> inode->i_blkbits;
1345 *dblock = bh.b_blocknr;
1346 if (buffer_new(&bh))
1347 *new = 1;
1348 else
1349 *new = 0;
1350 return ret;
1351 }
1352
1353 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1354 unsigned int length)
1355 {
1356 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
1357 }
1358
1359 #define GFS2_JTRUNC_REVOKES 8192
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1373 {
1374 struct gfs2_sbd *sdp = GFS2_SB(inode);
1375 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1376 u64 chunk;
1377 int error;
1378
1379 while (oldsize != newsize) {
1380 struct gfs2_trans *tr;
1381 unsigned int offs;
1382
1383 chunk = oldsize - newsize;
1384 if (chunk > max_chunk)
1385 chunk = max_chunk;
1386
1387 offs = oldsize & ~PAGE_MASK;
1388 if (offs && chunk > PAGE_SIZE)
1389 chunk = offs + ((chunk - offs) & PAGE_MASK);
1390
1391 truncate_pagecache(inode, oldsize - chunk);
1392 oldsize -= chunk;
1393
1394 tr = current->journal_info;
1395 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1396 continue;
1397
1398 gfs2_trans_end(sdp);
1399 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1400 if (error)
1401 return error;
1402 }
1403
1404 return 0;
1405 }
1406
1407 static int trunc_start(struct inode *inode, u64 newsize)
1408 {
1409 struct gfs2_inode *ip = GFS2_I(inode);
1410 struct gfs2_sbd *sdp = GFS2_SB(inode);
1411 struct buffer_head *dibh = NULL;
1412 int journaled = gfs2_is_jdata(ip);
1413 u64 oldsize = inode->i_size;
1414 int error;
1415
1416 if (journaled)
1417 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1418 else
1419 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1420 if (error)
1421 return error;
1422
1423 error = gfs2_meta_inode_buffer(ip, &dibh);
1424 if (error)
1425 goto out;
1426
1427 gfs2_trans_add_meta(ip->i_gl, dibh);
1428
1429 if (gfs2_is_stuffed(ip)) {
1430 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1431 } else {
1432 unsigned int blocksize = i_blocksize(inode);
1433 unsigned int offs = newsize & (blocksize - 1);
1434 if (offs) {
1435 error = gfs2_block_zero_range(inode, newsize,
1436 blocksize - offs);
1437 if (error)
1438 goto out;
1439 }
1440 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1441 }
1442
1443 i_size_write(inode, newsize);
1444 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1445 gfs2_dinode_out(ip, dibh->b_data);
1446
1447 if (journaled)
1448 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1449 else
1450 truncate_pagecache(inode, newsize);
1451
1452 out:
1453 brelse(dibh);
1454 if (current->journal_info)
1455 gfs2_trans_end(sdp);
1456 return error;
1457 }
1458
1459 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1460 struct iomap *iomap)
1461 {
1462 struct metapath mp = { .mp_aheight = 1, };
1463 int ret;
1464
1465 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1466 if (!ret && iomap->type == IOMAP_HOLE)
1467 ret = gfs2_iomap_alloc(inode, iomap, &mp);
1468 release_metapath(&mp);
1469 return ret;
1470 }
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1495 struct buffer_head *bh, __be64 *start, __be64 *end,
1496 bool meta, u32 *btotal)
1497 {
1498 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1499 struct gfs2_rgrpd *rgd;
1500 struct gfs2_trans *tr;
1501 __be64 *p;
1502 int blks_outside_rgrp;
1503 u64 bn, bstart, isize_blks;
1504 s64 blen;
1505 int ret = 0;
1506 bool buf_in_tr = false;
1507
1508 more_rgrps:
1509 rgd = NULL;
1510 if (gfs2_holder_initialized(rd_gh)) {
1511 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1512 gfs2_assert_withdraw(sdp,
1513 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1514 }
1515 blks_outside_rgrp = 0;
1516 bstart = 0;
1517 blen = 0;
1518
1519 for (p = start; p < end; p++) {
1520 if (!*p)
1521 continue;
1522 bn = be64_to_cpu(*p);
1523
1524 if (rgd) {
1525 if (!rgrp_contains_block(rgd, bn)) {
1526 blks_outside_rgrp++;
1527 continue;
1528 }
1529 } else {
1530 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1531 if (unlikely(!rgd)) {
1532 ret = -EIO;
1533 goto out;
1534 }
1535 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1536 0, rd_gh);
1537 if (ret)
1538 goto out;
1539
1540
1541 if (gfs2_rs_active(&ip->i_res) &&
1542 rgd == ip->i_res.rs_rbm.rgd)
1543 gfs2_rs_deltree(&ip->i_res);
1544 }
1545
1546
1547
1548
1549
1550
1551 if (current->journal_info == NULL) {
1552 unsigned int jblocks_rqsted, revokes;
1553
1554 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1555 RES_INDIRECT;
1556 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1557 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1558 jblocks_rqsted +=
1559 atomic_read(&sdp->sd_log_thresh2);
1560 else
1561 jblocks_rqsted += isize_blks;
1562 revokes = jblocks_rqsted;
1563 if (meta)
1564 revokes += end - start;
1565 else if (ip->i_depth)
1566 revokes += sdp->sd_inptrs;
1567 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1568 if (ret)
1569 goto out_unlock;
1570 down_write(&ip->i_rw_mutex);
1571 }
1572
1573 tr = current->journal_info;
1574 if (tr->tr_num_buf_new + RES_STATFS +
1575 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1576
1577
1578
1579 blks_outside_rgrp++;
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592 if (buf_in_tr)
1593 break;
1594 goto out_unlock;
1595 }
1596
1597 gfs2_trans_add_meta(ip->i_gl, bh);
1598 buf_in_tr = true;
1599 *p = 0;
1600 if (bstart + blen == bn) {
1601 blen++;
1602 continue;
1603 }
1604 if (bstart) {
1605 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1606 (*btotal) += blen;
1607 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1608 }
1609 bstart = bn;
1610 blen = 1;
1611 }
1612 if (bstart) {
1613 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1614 (*btotal) += blen;
1615 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1616 }
1617 out_unlock:
1618 if (!ret && blks_outside_rgrp) {
1619
1620
1621 if (current->journal_info) {
1622 struct buffer_head *dibh;
1623
1624 ret = gfs2_meta_inode_buffer(ip, &dibh);
1625 if (ret)
1626 goto out;
1627
1628
1629
1630 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1631 current_time(&ip->i_inode);
1632 gfs2_trans_add_meta(ip->i_gl, dibh);
1633 gfs2_dinode_out(ip, dibh->b_data);
1634 brelse(dibh);
1635 up_write(&ip->i_rw_mutex);
1636 gfs2_trans_end(sdp);
1637 buf_in_tr = false;
1638 }
1639 gfs2_glock_dq_uninit(rd_gh);
1640 cond_resched();
1641 goto more_rgrps;
1642 }
1643 out:
1644 return ret;
1645 }
1646
1647 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1648 {
1649 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1650 return false;
1651 return true;
1652 }
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1664 unsigned int h,
1665 __u16 *end_list, unsigned int end_aligned)
1666 {
1667 struct buffer_head *bh = mp->mp_bh[h];
1668 __be64 *first, *ptr, *end;
1669
1670 first = metaptr1(h, mp);
1671 ptr = first + mp->mp_list[h];
1672 end = (__be64 *)(bh->b_data + bh->b_size);
1673 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1674 bool keep_end = h < end_aligned;
1675 end = first + end_list[h] + keep_end;
1676 }
1677
1678 while (ptr < end) {
1679 if (*ptr) {
1680 mp->mp_list[h] = ptr - first;
1681 h++;
1682 if (h < GFS2_MAX_META_HEIGHT)
1683 mp->mp_list[h] = 0;
1684 return true;
1685 }
1686 ptr++;
1687 }
1688 return false;
1689 }
1690
1691 enum dealloc_states {
1692 DEALLOC_MP_FULL = 0,
1693 DEALLOC_MP_LOWER = 1,
1694 DEALLOC_FILL_MP = 2,
1695 DEALLOC_DONE = 3,
1696 };
1697
1698 static inline void
1699 metapointer_range(struct metapath *mp, int height,
1700 __u16 *start_list, unsigned int start_aligned,
1701 __u16 *end_list, unsigned int end_aligned,
1702 __be64 **start, __be64 **end)
1703 {
1704 struct buffer_head *bh = mp->mp_bh[height];
1705 __be64 *first;
1706
1707 first = metaptr1(height, mp);
1708 *start = first;
1709 if (mp_eq_to_hgt(mp, start_list, height)) {
1710 bool keep_start = height < start_aligned;
1711 *start = first + start_list[height] + keep_start;
1712 }
1713 *end = (__be64 *)(bh->b_data + bh->b_size);
1714 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1715 bool keep_end = height < end_aligned;
1716 *end = first + end_list[height] + keep_end;
1717 }
1718 }
1719
1720 static inline bool walk_done(struct gfs2_sbd *sdp,
1721 struct metapath *mp, int height,
1722 __u16 *end_list, unsigned int end_aligned)
1723 {
1724 __u16 end;
1725
1726 if (end_list) {
1727 bool keep_end = height < end_aligned;
1728 if (!mp_eq_to_hgt(mp, end_list, height))
1729 return false;
1730 end = end_list[height] + keep_end;
1731 } else
1732 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1733 return mp->mp_list[height] >= end;
1734 }
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1753 {
1754 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1755 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1756 struct metapath mp = {};
1757 struct buffer_head *dibh, *bh;
1758 struct gfs2_holder rd_gh;
1759 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1760 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1761 __u16 start_list[GFS2_MAX_META_HEIGHT];
1762 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1763 unsigned int start_aligned, uninitialized_var(end_aligned);
1764 unsigned int strip_h = ip->i_height - 1;
1765 u32 btotal = 0;
1766 int ret, state;
1767 int mp_h;
1768 u64 prev_bnr = 0;
1769 __be64 *start, *end;
1770
1771 if (offset >= maxsize) {
1772
1773
1774
1775
1776 return 0;
1777 }
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791 if (length) {
1792 u64 end_offset = offset + length;
1793 u64 lend;
1794
1795
1796
1797
1798
1799
1800 if (end_offset > maxsize)
1801 end_offset = maxsize;
1802 lend = end_offset >> bsize_shift;
1803
1804 if (lblock >= lend)
1805 return 0;
1806
1807 find_metapath(sdp, lend, &mp, ip->i_height);
1808 end_list = __end_list;
1809 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1810
1811 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1812 if (end_list[mp_h])
1813 break;
1814 }
1815 end_aligned = mp_h;
1816 }
1817
1818 find_metapath(sdp, lblock, &mp, ip->i_height);
1819 memcpy(start_list, mp.mp_list, sizeof(start_list));
1820
1821 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1822 if (start_list[mp_h])
1823 break;
1824 }
1825 start_aligned = mp_h;
1826
1827 ret = gfs2_meta_inode_buffer(ip, &dibh);
1828 if (ret)
1829 return ret;
1830
1831 mp.mp_bh[0] = dibh;
1832 ret = lookup_metapath(ip, &mp);
1833 if (ret)
1834 goto out_metapath;
1835
1836
1837 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1838 metapointer_range(&mp, mp_h, start_list, start_aligned,
1839 end_list, end_aligned, &start, &end);
1840 gfs2_metapath_ra(ip->i_gl, start, end);
1841 }
1842
1843 if (mp.mp_aheight == ip->i_height)
1844 state = DEALLOC_MP_FULL;
1845 else
1846 state = DEALLOC_FILL_MP;
1847
1848 ret = gfs2_rindex_update(sdp);
1849 if (ret)
1850 goto out_metapath;
1851
1852 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1853 if (ret)
1854 goto out_metapath;
1855 gfs2_holder_mark_uninitialized(&rd_gh);
1856
1857 mp_h = strip_h;
1858
1859 while (state != DEALLOC_DONE) {
1860 switch (state) {
1861
1862
1863 case DEALLOC_MP_FULL:
1864 bh = mp.mp_bh[mp_h];
1865 gfs2_assert_withdraw(sdp, bh);
1866 if (gfs2_assert_withdraw(sdp,
1867 prev_bnr != bh->b_blocknr)) {
1868 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1869 "s_h:%u, mp_h:%u\n",
1870 (unsigned long long)ip->i_no_addr,
1871 prev_bnr, ip->i_height, strip_h, mp_h);
1872 }
1873 prev_bnr = bh->b_blocknr;
1874
1875 if (gfs2_metatype_check(sdp, bh,
1876 (mp_h ? GFS2_METATYPE_IN :
1877 GFS2_METATYPE_DI))) {
1878 ret = -EIO;
1879 goto out;
1880 }
1881
1882
1883
1884
1885
1886
1887
1888 metapointer_range(&mp, mp_h, start_list, start_aligned,
1889 end_list, 0 ,
1890 &start, &end);
1891 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1892 start, end,
1893 mp_h != ip->i_height - 1,
1894 &btotal);
1895
1896
1897
1898 if (ret || !mp_h) {
1899 state = DEALLOC_DONE;
1900 break;
1901 }
1902 state = DEALLOC_MP_LOWER;
1903 break;
1904
1905
1906 case DEALLOC_MP_LOWER:
1907
1908
1909
1910 if (mp_h) {
1911 brelse(mp.mp_bh[mp_h]);
1912 mp.mp_bh[mp_h] = NULL;
1913 }
1914
1915
1916
1917 if (mp_h == 0) {
1918 strip_h--;
1919 memcpy(mp.mp_list, start_list, sizeof(start_list));
1920 mp_h = strip_h;
1921 state = DEALLOC_FILL_MP;
1922 break;
1923 }
1924 mp.mp_list[mp_h] = 0;
1925 mp_h--;
1926 mp.mp_list[mp_h]++;
1927 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1928 break;
1929
1930
1931
1932 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1933 state = DEALLOC_FILL_MP;
1934 mp_h++;
1935 }
1936
1937
1938 break;
1939
1940
1941 case DEALLOC_FILL_MP:
1942
1943 ret = fillup_metapath(ip, &mp, mp_h);
1944 if (ret < 0)
1945 goto out;
1946
1947
1948 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1949 unsigned int height = mp.mp_aheight - 1;
1950
1951
1952 if (mp.mp_aheight - 1 == strip_h)
1953 height--;
1954
1955 for (; height >= mp.mp_aheight - ret; height--) {
1956 metapointer_range(&mp, height,
1957 start_list, start_aligned,
1958 end_list, end_aligned,
1959 &start, &end);
1960 gfs2_metapath_ra(ip->i_gl, start, end);
1961 }
1962 }
1963
1964
1965 if (mp.mp_aheight - 1 == strip_h) {
1966 state = DEALLOC_MP_FULL;
1967 break;
1968 }
1969 if (mp.mp_aheight < ip->i_height)
1970 mp_h = mp.mp_aheight - 1;
1971
1972
1973
1974
1975 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1976 mp_h++;
1977 else
1978 state = DEALLOC_MP_LOWER;
1979 break;
1980 }
1981 }
1982
1983 if (btotal) {
1984 if (current->journal_info == NULL) {
1985 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1986 RES_QUOTA, 0);
1987 if (ret)
1988 goto out;
1989 down_write(&ip->i_rw_mutex);
1990 }
1991 gfs2_statfs_change(sdp, 0, +btotal, 0);
1992 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1993 ip->i_inode.i_gid);
1994 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1995 gfs2_trans_add_meta(ip->i_gl, dibh);
1996 gfs2_dinode_out(ip, dibh->b_data);
1997 up_write(&ip->i_rw_mutex);
1998 gfs2_trans_end(sdp);
1999 }
2000
2001 out:
2002 if (gfs2_holder_initialized(&rd_gh))
2003 gfs2_glock_dq_uninit(&rd_gh);
2004 if (current->journal_info) {
2005 up_write(&ip->i_rw_mutex);
2006 gfs2_trans_end(sdp);
2007 cond_resched();
2008 }
2009 gfs2_quota_unhold(ip);
2010 out_metapath:
2011 release_metapath(&mp);
2012 return ret;
2013 }
2014
2015 static int trunc_end(struct gfs2_inode *ip)
2016 {
2017 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2018 struct buffer_head *dibh;
2019 int error;
2020
2021 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2022 if (error)
2023 return error;
2024
2025 down_write(&ip->i_rw_mutex);
2026
2027 error = gfs2_meta_inode_buffer(ip, &dibh);
2028 if (error)
2029 goto out;
2030
2031 if (!i_size_read(&ip->i_inode)) {
2032 ip->i_height = 0;
2033 ip->i_goal = ip->i_no_addr;
2034 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2035 gfs2_ordered_del_inode(ip);
2036 }
2037 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2038 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2039
2040 gfs2_trans_add_meta(ip->i_gl, dibh);
2041 gfs2_dinode_out(ip, dibh->b_data);
2042 brelse(dibh);
2043
2044 out:
2045 up_write(&ip->i_rw_mutex);
2046 gfs2_trans_end(sdp);
2047 return error;
2048 }
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061 static int do_shrink(struct inode *inode, u64 newsize)
2062 {
2063 struct gfs2_inode *ip = GFS2_I(inode);
2064 int error;
2065
2066 error = trunc_start(inode, newsize);
2067 if (error < 0)
2068 return error;
2069 if (gfs2_is_stuffed(ip))
2070 return 0;
2071
2072 error = punch_hole(ip, newsize, 0);
2073 if (error == 0)
2074 error = trunc_end(ip);
2075
2076 return error;
2077 }
2078
2079 void gfs2_trim_blocks(struct inode *inode)
2080 {
2081 int ret;
2082
2083 ret = do_shrink(inode, inode->i_size);
2084 WARN_ON(ret != 0);
2085 }
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107 static int do_grow(struct inode *inode, u64 size)
2108 {
2109 struct gfs2_inode *ip = GFS2_I(inode);
2110 struct gfs2_sbd *sdp = GFS2_SB(inode);
2111 struct gfs2_alloc_parms ap = { .target = 1, };
2112 struct buffer_head *dibh;
2113 int error;
2114 int unstuff = 0;
2115
2116 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2117 error = gfs2_quota_lock_check(ip, &ap);
2118 if (error)
2119 return error;
2120
2121 error = gfs2_inplace_reserve(ip, &ap);
2122 if (error)
2123 goto do_grow_qunlock;
2124 unstuff = 1;
2125 }
2126
2127 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2128 (unstuff &&
2129 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2130 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2131 0 : RES_QUOTA), 0);
2132 if (error)
2133 goto do_grow_release;
2134
2135 if (unstuff) {
2136 error = gfs2_unstuff_dinode(ip, NULL);
2137 if (error)
2138 goto do_end_trans;
2139 }
2140
2141 error = gfs2_meta_inode_buffer(ip, &dibh);
2142 if (error)
2143 goto do_end_trans;
2144
2145 truncate_setsize(inode, size);
2146 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2147 gfs2_trans_add_meta(ip->i_gl, dibh);
2148 gfs2_dinode_out(ip, dibh->b_data);
2149 brelse(dibh);
2150
2151 do_end_trans:
2152 gfs2_trans_end(sdp);
2153 do_grow_release:
2154 if (unstuff) {
2155 gfs2_inplace_release(ip);
2156 do_grow_qunlock:
2157 gfs2_quota_unlock(ip);
2158 }
2159 return error;
2160 }
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2175 {
2176 struct gfs2_inode *ip = GFS2_I(inode);
2177 int ret;
2178
2179 BUG_ON(!S_ISREG(inode->i_mode));
2180
2181 ret = inode_newsize_ok(inode, newsize);
2182 if (ret)
2183 return ret;
2184
2185 inode_dio_wait(inode);
2186
2187 ret = gfs2_rsqa_alloc(ip);
2188 if (ret)
2189 goto out;
2190
2191 if (newsize >= inode->i_size) {
2192 ret = do_grow(inode, newsize);
2193 goto out;
2194 }
2195
2196 ret = do_shrink(inode, newsize);
2197 out:
2198 gfs2_rsqa_delete(ip, NULL);
2199 return ret;
2200 }
2201
2202 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2203 {
2204 int error;
2205 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2206 if (!error)
2207 error = trunc_end(ip);
2208 return error;
2209 }
2210
2211 int gfs2_file_dealloc(struct gfs2_inode *ip)
2212 {
2213 return punch_hole(ip, 0, 0);
2214 }
2215
2216
2217
2218
2219
2220
2221
2222 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2223 {
2224 struct gfs2_journal_extent *jext;
2225
2226 while(!list_empty(&jd->extent_list)) {
2227 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
2228 list_del(&jext->list);
2229 kfree(jext);
2230 }
2231 }
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2244 {
2245 struct gfs2_journal_extent *jext;
2246
2247 if (!list_empty(&jd->extent_list)) {
2248 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
2249 if ((jext->dblock + jext->blocks) == dblock) {
2250 jext->blocks += blocks;
2251 return 0;
2252 }
2253 }
2254
2255 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2256 if (jext == NULL)
2257 return -ENOMEM;
2258 jext->dblock = dblock;
2259 jext->lblock = lblock;
2260 jext->blocks = blocks;
2261 list_add_tail(&jext->list, &jd->extent_list);
2262 jd->nr_extents++;
2263 return 0;
2264 }
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2287 {
2288 u64 lblock = 0;
2289 u64 lblock_stop;
2290 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2291 struct buffer_head bh;
2292 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2293 u64 size;
2294 int rc;
2295 ktime_t start, end;
2296
2297 start = ktime_get();
2298 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2299 size = (lblock_stop - lblock) << shift;
2300 jd->nr_extents = 0;
2301 WARN_ON(!list_empty(&jd->extent_list));
2302
2303 do {
2304 bh.b_state = 0;
2305 bh.b_blocknr = 0;
2306 bh.b_size = size;
2307 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2308 if (rc || !buffer_mapped(&bh))
2309 goto fail;
2310 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2311 if (rc)
2312 goto fail;
2313 size -= bh.b_size;
2314 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2315 } while(size > 0);
2316
2317 end = ktime_get();
2318 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2319 jd->nr_extents, ktime_ms_delta(end, start));
2320 return 0;
2321
2322 fail:
2323 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2324 rc, jd->jd_jid,
2325 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2326 jd->nr_extents);
2327 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2328 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2329 bh.b_state, (unsigned long long)bh.b_size);
2330 gfs2_free_journal_extents(jd);
2331 return rc;
2332 }
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2344 unsigned int len)
2345 {
2346 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2347 struct buffer_head bh;
2348 unsigned int shift;
2349 u64 lblock, lblock_stop, size;
2350 u64 end_of_file;
2351
2352 if (!len)
2353 return 0;
2354
2355 if (gfs2_is_stuffed(ip)) {
2356 if (offset + len > gfs2_max_stuffed_size(ip))
2357 return 1;
2358 return 0;
2359 }
2360
2361 shift = sdp->sd_sb.sb_bsize_shift;
2362 BUG_ON(gfs2_is_dir(ip));
2363 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2364 lblock = offset >> shift;
2365 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2366 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2367 return 1;
2368
2369 size = (lblock_stop - lblock) << shift;
2370 do {
2371 bh.b_state = 0;
2372 bh.b_size = size;
2373 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2374 if (!buffer_mapped(&bh))
2375 return 1;
2376 size -= bh.b_size;
2377 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2378 } while(size > 0);
2379
2380 return 0;
2381 }
2382
2383 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2384 {
2385 struct gfs2_inode *ip = GFS2_I(inode);
2386 struct buffer_head *dibh;
2387 int error;
2388
2389 if (offset >= inode->i_size)
2390 return 0;
2391 if (offset + length > inode->i_size)
2392 length = inode->i_size - offset;
2393
2394 error = gfs2_meta_inode_buffer(ip, &dibh);
2395 if (error)
2396 return error;
2397 gfs2_trans_add_meta(ip->i_gl, dibh);
2398 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2399 length);
2400 brelse(dibh);
2401 return 0;
2402 }
2403
2404 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2405 loff_t length)
2406 {
2407 struct gfs2_sbd *sdp = GFS2_SB(inode);
2408 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2409 int error;
2410
2411 while (length) {
2412 struct gfs2_trans *tr;
2413 loff_t chunk;
2414 unsigned int offs;
2415
2416 chunk = length;
2417 if (chunk > max_chunk)
2418 chunk = max_chunk;
2419
2420 offs = offset & ~PAGE_MASK;
2421 if (offs && chunk > PAGE_SIZE)
2422 chunk = offs + ((chunk - offs) & PAGE_MASK);
2423
2424 truncate_pagecache_range(inode, offset, chunk);
2425 offset += chunk;
2426 length -= chunk;
2427
2428 tr = current->journal_info;
2429 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2430 continue;
2431
2432 gfs2_trans_end(sdp);
2433 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2434 if (error)
2435 return error;
2436 }
2437 return 0;
2438 }
2439
2440 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2441 {
2442 struct inode *inode = file_inode(file);
2443 struct gfs2_inode *ip = GFS2_I(inode);
2444 struct gfs2_sbd *sdp = GFS2_SB(inode);
2445 int error;
2446
2447 if (gfs2_is_jdata(ip))
2448 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2449 GFS2_JTRUNC_REVOKES);
2450 else
2451 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2452 if (error)
2453 return error;
2454
2455 if (gfs2_is_stuffed(ip)) {
2456 error = stuffed_zero_range(inode, offset, length);
2457 if (error)
2458 goto out;
2459 } else {
2460 unsigned int start_off, end_len, blocksize;
2461
2462 blocksize = i_blocksize(inode);
2463 start_off = offset & (blocksize - 1);
2464 end_len = (offset + length) & (blocksize - 1);
2465 if (start_off) {
2466 unsigned int len = length;
2467 if (length > blocksize - start_off)
2468 len = blocksize - start_off;
2469 error = gfs2_block_zero_range(inode, offset, len);
2470 if (error)
2471 goto out;
2472 if (start_off + length < blocksize)
2473 end_len = 0;
2474 }
2475 if (end_len) {
2476 error = gfs2_block_zero_range(inode,
2477 offset + length - end_len, end_len);
2478 if (error)
2479 goto out;
2480 }
2481 }
2482
2483 if (gfs2_is_jdata(ip)) {
2484 BUG_ON(!current->journal_info);
2485 gfs2_journaled_truncate_range(inode, offset, length);
2486 } else
2487 truncate_pagecache_range(inode, offset, offset + length - 1);
2488
2489 file_update_time(file);
2490 mark_inode_dirty(inode);
2491
2492 if (current->journal_info)
2493 gfs2_trans_end(sdp);
2494
2495 if (!gfs2_is_stuffed(ip))
2496 error = punch_hole(ip, offset, length);
2497
2498 out:
2499 if (current->journal_info)
2500 gfs2_trans_end(sdp);
2501 return error;
2502 }