This source file includes following definitions.
- NILFS_SUI
- nilfs_sufile_segment_usages_per_block
- nilfs_sufile_get_blkoff
- nilfs_sufile_get_offset
- nilfs_sufile_segment_usages_in_block
- nilfs_sufile_block_get_segment_usage
- nilfs_sufile_get_header_block
- nilfs_sufile_get_segment_usage_block
- nilfs_sufile_delete_segment_usage_block
- nilfs_sufile_mod_counter
- nilfs_sufile_get_ncleansegs
- nilfs_sufile_updatev
- nilfs_sufile_update
- nilfs_sufile_set_alloc_range
- nilfs_sufile_alloc
- nilfs_sufile_do_cancel_free
- nilfs_sufile_do_scrap
- nilfs_sufile_do_free
- nilfs_sufile_mark_dirty
- nilfs_sufile_set_segment_usage
- nilfs_sufile_get_stat
- nilfs_sufile_do_set_error
- nilfs_sufile_truncate_range
- nilfs_sufile_resize
- nilfs_sufile_get_suinfo
- nilfs_sufile_set_suinfo
- nilfs_sufile_trim_fs
- nilfs_sufile_read
1
2
3
4
5
6
7
8
9
10
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/string.h>
14 #include <linux/buffer_head.h>
15 #include <linux/errno.h>
16 #include "mdt.h"
17 #include "sufile.h"
18
19 #include <trace/events/nilfs2.h>
20
21
22
23
24
25
26
27
28 struct nilfs_sufile_info {
29 struct nilfs_mdt_info mi;
30 unsigned long ncleansegs;
31 __u64 allocmin;
32 __u64 allocmax;
33 };
34
35 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
36 {
37 return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
38 }
39
40 static inline unsigned long
41 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
42 {
43 return NILFS_MDT(sufile)->mi_entries_per_block;
44 }
45
46 static unsigned long
47 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
48 {
49 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
50
51 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
52 return (unsigned long)t;
53 }
54
55 static unsigned long
56 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
57 {
58 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
59
60 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
61 }
62
63 static unsigned long
64 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
65 __u64 max)
66 {
67 return min_t(unsigned long,
68 nilfs_sufile_segment_usages_per_block(sufile) -
69 nilfs_sufile_get_offset(sufile, curr),
70 max - curr + 1);
71 }
72
73 static struct nilfs_segment_usage *
74 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
75 struct buffer_head *bh, void *kaddr)
76 {
77 return kaddr + bh_offset(bh) +
78 nilfs_sufile_get_offset(sufile, segnum) *
79 NILFS_MDT(sufile)->mi_entry_size;
80 }
81
82 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
83 struct buffer_head **bhp)
84 {
85 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
86 }
87
88 static inline int
89 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
90 int create, struct buffer_head **bhp)
91 {
92 return nilfs_mdt_get_block(sufile,
93 nilfs_sufile_get_blkoff(sufile, segnum),
94 create, NULL, bhp);
95 }
96
97 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
98 __u64 segnum)
99 {
100 return nilfs_mdt_delete_block(sufile,
101 nilfs_sufile_get_blkoff(sufile, segnum));
102 }
103
104 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
105 u64 ncleanadd, u64 ndirtyadd)
106 {
107 struct nilfs_sufile_header *header;
108 void *kaddr;
109
110 kaddr = kmap_atomic(header_bh->b_page);
111 header = kaddr + bh_offset(header_bh);
112 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
113 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
114 kunmap_atomic(kaddr);
115
116 mark_buffer_dirty(header_bh);
117 }
118
119
120
121
122
123 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
124 {
125 return NILFS_SUI(sufile)->ncleansegs;
126 }
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
157 int create, size_t *ndone,
158 void (*dofunc)(struct inode *, __u64,
159 struct buffer_head *,
160 struct buffer_head *))
161 {
162 struct buffer_head *header_bh, *bh;
163 unsigned long blkoff, prev_blkoff;
164 __u64 *seg;
165 size_t nerr = 0, n = 0;
166 int ret = 0;
167
168 if (unlikely(nsegs == 0))
169 goto out;
170
171 down_write(&NILFS_MDT(sufile)->mi_sem);
172 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
173 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
174 nilfs_msg(sufile->i_sb, KERN_WARNING,
175 "%s: invalid segment number: %llu",
176 __func__, (unsigned long long)*seg);
177 nerr++;
178 }
179 }
180 if (nerr > 0) {
181 ret = -EINVAL;
182 goto out_sem;
183 }
184
185 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
186 if (ret < 0)
187 goto out_sem;
188
189 seg = segnumv;
190 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
191 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
192 if (ret < 0)
193 goto out_header;
194
195 for (;;) {
196 dofunc(sufile, *seg, header_bh, bh);
197
198 if (++seg >= segnumv + nsegs)
199 break;
200 prev_blkoff = blkoff;
201 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
202 if (blkoff == prev_blkoff)
203 continue;
204
205
206 brelse(bh);
207 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
208 if (unlikely(ret < 0))
209 goto out_header;
210 }
211 brelse(bh);
212
213 out_header:
214 n = seg - segnumv;
215 brelse(header_bh);
216 out_sem:
217 up_write(&NILFS_MDT(sufile)->mi_sem);
218 out:
219 if (ndone)
220 *ndone = n;
221 return ret;
222 }
223
224 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
225 void (*dofunc)(struct inode *, __u64,
226 struct buffer_head *,
227 struct buffer_head *))
228 {
229 struct buffer_head *header_bh, *bh;
230 int ret;
231
232 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
233 nilfs_msg(sufile->i_sb, KERN_WARNING,
234 "%s: invalid segment number: %llu",
235 __func__, (unsigned long long)segnum);
236 return -EINVAL;
237 }
238 down_write(&NILFS_MDT(sufile)->mi_sem);
239
240 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
241 if (ret < 0)
242 goto out_sem;
243
244 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
245 if (!ret) {
246 dofunc(sufile, segnum, header_bh, bh);
247 brelse(bh);
248 }
249 brelse(header_bh);
250
251 out_sem:
252 up_write(&NILFS_MDT(sufile)->mi_sem);
253 return ret;
254 }
255
256
257
258
259
260
261
262
263
264
265
266
267 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
268 {
269 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
270 __u64 nsegs;
271 int ret = -ERANGE;
272
273 down_write(&NILFS_MDT(sufile)->mi_sem);
274 nsegs = nilfs_sufile_get_nsegments(sufile);
275
276 if (start <= end && end < nsegs) {
277 sui->allocmin = start;
278 sui->allocmax = end;
279 ret = 0;
280 }
281 up_write(&NILFS_MDT(sufile)->mi_sem);
282 return ret;
283 }
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
303 {
304 struct buffer_head *header_bh, *su_bh;
305 struct nilfs_sufile_header *header;
306 struct nilfs_segment_usage *su;
307 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
308 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
309 __u64 segnum, maxsegnum, last_alloc;
310 void *kaddr;
311 unsigned long nsegments, nsus, cnt;
312 int ret, j;
313
314 down_write(&NILFS_MDT(sufile)->mi_sem);
315
316 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
317 if (ret < 0)
318 goto out_sem;
319 kaddr = kmap_atomic(header_bh->b_page);
320 header = kaddr + bh_offset(header_bh);
321 last_alloc = le64_to_cpu(header->sh_last_alloc);
322 kunmap_atomic(kaddr);
323
324 nsegments = nilfs_sufile_get_nsegments(sufile);
325 maxsegnum = sui->allocmax;
326 segnum = last_alloc + 1;
327 if (segnum < sui->allocmin || segnum > sui->allocmax)
328 segnum = sui->allocmin;
329
330 for (cnt = 0; cnt < nsegments; cnt += nsus) {
331 if (segnum > maxsegnum) {
332 if (cnt < sui->allocmax - sui->allocmin + 1) {
333
334
335
336
337
338 segnum = sui->allocmin;
339 maxsegnum = last_alloc;
340 } else if (segnum > sui->allocmin &&
341 sui->allocmax + 1 < nsegments) {
342 segnum = sui->allocmax + 1;
343 maxsegnum = nsegments - 1;
344 } else if (sui->allocmin > 0) {
345 segnum = 0;
346 maxsegnum = sui->allocmin - 1;
347 } else {
348 break;
349 }
350 }
351 trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
352 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
353 &su_bh);
354 if (ret < 0)
355 goto out_header;
356 kaddr = kmap_atomic(su_bh->b_page);
357 su = nilfs_sufile_block_get_segment_usage(
358 sufile, segnum, su_bh, kaddr);
359
360 nsus = nilfs_sufile_segment_usages_in_block(
361 sufile, segnum, maxsegnum);
362 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
363 if (!nilfs_segment_usage_clean(su))
364 continue;
365
366 nilfs_segment_usage_set_dirty(su);
367 kunmap_atomic(kaddr);
368
369 kaddr = kmap_atomic(header_bh->b_page);
370 header = kaddr + bh_offset(header_bh);
371 le64_add_cpu(&header->sh_ncleansegs, -1);
372 le64_add_cpu(&header->sh_ndirtysegs, 1);
373 header->sh_last_alloc = cpu_to_le64(segnum);
374 kunmap_atomic(kaddr);
375
376 sui->ncleansegs--;
377 mark_buffer_dirty(header_bh);
378 mark_buffer_dirty(su_bh);
379 nilfs_mdt_mark_dirty(sufile);
380 brelse(su_bh);
381 *segnump = segnum;
382
383 trace_nilfs2_segment_usage_allocated(sufile, segnum);
384
385 goto out_header;
386 }
387
388 kunmap_atomic(kaddr);
389 brelse(su_bh);
390 }
391
392
393 ret = -ENOSPC;
394
395 out_header:
396 brelse(header_bh);
397
398 out_sem:
399 up_write(&NILFS_MDT(sufile)->mi_sem);
400 return ret;
401 }
402
403 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
404 struct buffer_head *header_bh,
405 struct buffer_head *su_bh)
406 {
407 struct nilfs_segment_usage *su;
408 void *kaddr;
409
410 kaddr = kmap_atomic(su_bh->b_page);
411 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
412 if (unlikely(!nilfs_segment_usage_clean(su))) {
413 nilfs_msg(sufile->i_sb, KERN_WARNING,
414 "%s: segment %llu must be clean", __func__,
415 (unsigned long long)segnum);
416 kunmap_atomic(kaddr);
417 return;
418 }
419 nilfs_segment_usage_set_dirty(su);
420 kunmap_atomic(kaddr);
421
422 nilfs_sufile_mod_counter(header_bh, -1, 1);
423 NILFS_SUI(sufile)->ncleansegs--;
424
425 mark_buffer_dirty(su_bh);
426 nilfs_mdt_mark_dirty(sufile);
427 }
428
429 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
430 struct buffer_head *header_bh,
431 struct buffer_head *su_bh)
432 {
433 struct nilfs_segment_usage *su;
434 void *kaddr;
435 int clean, dirty;
436
437 kaddr = kmap_atomic(su_bh->b_page);
438 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
439 if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
440 su->su_nblocks == cpu_to_le32(0)) {
441 kunmap_atomic(kaddr);
442 return;
443 }
444 clean = nilfs_segment_usage_clean(su);
445 dirty = nilfs_segment_usage_dirty(su);
446
447
448 su->su_lastmod = cpu_to_le64(0);
449 su->su_nblocks = cpu_to_le32(0);
450 su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
451 kunmap_atomic(kaddr);
452
453 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
454 NILFS_SUI(sufile)->ncleansegs -= clean;
455
456 mark_buffer_dirty(su_bh);
457 nilfs_mdt_mark_dirty(sufile);
458 }
459
460 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
461 struct buffer_head *header_bh,
462 struct buffer_head *su_bh)
463 {
464 struct nilfs_segment_usage *su;
465 void *kaddr;
466 int sudirty;
467
468 kaddr = kmap_atomic(su_bh->b_page);
469 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
470 if (nilfs_segment_usage_clean(su)) {
471 nilfs_msg(sufile->i_sb, KERN_WARNING,
472 "%s: segment %llu is already clean",
473 __func__, (unsigned long long)segnum);
474 kunmap_atomic(kaddr);
475 return;
476 }
477 WARN_ON(nilfs_segment_usage_error(su));
478 WARN_ON(!nilfs_segment_usage_dirty(su));
479
480 sudirty = nilfs_segment_usage_dirty(su);
481 nilfs_segment_usage_set_clean(su);
482 kunmap_atomic(kaddr);
483 mark_buffer_dirty(su_bh);
484
485 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
486 NILFS_SUI(sufile)->ncleansegs++;
487
488 nilfs_mdt_mark_dirty(sufile);
489
490 trace_nilfs2_segment_usage_freed(sufile, segnum);
491 }
492
493
494
495
496
497
498 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
499 {
500 struct buffer_head *bh;
501 int ret;
502
503 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
504 if (!ret) {
505 mark_buffer_dirty(bh);
506 nilfs_mdt_mark_dirty(sufile);
507 brelse(bh);
508 }
509 return ret;
510 }
511
512
513
514
515
516
517
518
519 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
520 unsigned long nblocks, time64_t modtime)
521 {
522 struct buffer_head *bh;
523 struct nilfs_segment_usage *su;
524 void *kaddr;
525 int ret;
526
527 down_write(&NILFS_MDT(sufile)->mi_sem);
528 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
529 if (ret < 0)
530 goto out_sem;
531
532 kaddr = kmap_atomic(bh->b_page);
533 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
534 WARN_ON(nilfs_segment_usage_error(su));
535 if (modtime)
536 su->su_lastmod = cpu_to_le64(modtime);
537 su->su_nblocks = cpu_to_le32(nblocks);
538 kunmap_atomic(kaddr);
539
540 mark_buffer_dirty(bh);
541 nilfs_mdt_mark_dirty(sufile);
542 brelse(bh);
543
544 out_sem:
545 up_write(&NILFS_MDT(sufile)->mi_sem);
546 return ret;
547 }
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
566 {
567 struct buffer_head *header_bh;
568 struct nilfs_sufile_header *header;
569 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
570 void *kaddr;
571 int ret;
572
573 down_read(&NILFS_MDT(sufile)->mi_sem);
574
575 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
576 if (ret < 0)
577 goto out_sem;
578
579 kaddr = kmap_atomic(header_bh->b_page);
580 header = kaddr + bh_offset(header_bh);
581 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
582 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
583 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
584 sustat->ss_ctime = nilfs->ns_ctime;
585 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
586 spin_lock(&nilfs->ns_last_segment_lock);
587 sustat->ss_prot_seq = nilfs->ns_prot_seq;
588 spin_unlock(&nilfs->ns_last_segment_lock);
589 kunmap_atomic(kaddr);
590 brelse(header_bh);
591
592 out_sem:
593 up_read(&NILFS_MDT(sufile)->mi_sem);
594 return ret;
595 }
596
597 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
598 struct buffer_head *header_bh,
599 struct buffer_head *su_bh)
600 {
601 struct nilfs_segment_usage *su;
602 void *kaddr;
603 int suclean;
604
605 kaddr = kmap_atomic(su_bh->b_page);
606 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
607 if (nilfs_segment_usage_error(su)) {
608 kunmap_atomic(kaddr);
609 return;
610 }
611 suclean = nilfs_segment_usage_clean(su);
612 nilfs_segment_usage_set_error(su);
613 kunmap_atomic(kaddr);
614
615 if (suclean) {
616 nilfs_sufile_mod_counter(header_bh, -1, 0);
617 NILFS_SUI(sufile)->ncleansegs--;
618 }
619 mark_buffer_dirty(su_bh);
620 nilfs_mdt_mark_dirty(sufile);
621 }
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640 static int nilfs_sufile_truncate_range(struct inode *sufile,
641 __u64 start, __u64 end)
642 {
643 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
644 struct buffer_head *header_bh;
645 struct buffer_head *su_bh;
646 struct nilfs_segment_usage *su, *su2;
647 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
648 unsigned long segusages_per_block;
649 unsigned long nsegs, ncleaned;
650 __u64 segnum;
651 void *kaddr;
652 ssize_t n, nc;
653 int ret;
654 int j;
655
656 nsegs = nilfs_sufile_get_nsegments(sufile);
657
658 ret = -EINVAL;
659 if (start > end || start >= nsegs)
660 goto out;
661
662 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
663 if (ret < 0)
664 goto out;
665
666 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
667 ncleaned = 0;
668
669 for (segnum = start; segnum <= end; segnum += n) {
670 n = min_t(unsigned long,
671 segusages_per_block -
672 nilfs_sufile_get_offset(sufile, segnum),
673 end - segnum + 1);
674 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
675 &su_bh);
676 if (ret < 0) {
677 if (ret != -ENOENT)
678 goto out_header;
679
680 continue;
681 }
682 kaddr = kmap_atomic(su_bh->b_page);
683 su = nilfs_sufile_block_get_segment_usage(
684 sufile, segnum, su_bh, kaddr);
685 su2 = su;
686 for (j = 0; j < n; j++, su = (void *)su + susz) {
687 if ((le32_to_cpu(su->su_flags) &
688 ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
689 nilfs_segment_is_active(nilfs, segnum + j)) {
690 ret = -EBUSY;
691 kunmap_atomic(kaddr);
692 brelse(su_bh);
693 goto out_header;
694 }
695 }
696 nc = 0;
697 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
698 if (nilfs_segment_usage_error(su)) {
699 nilfs_segment_usage_set_clean(su);
700 nc++;
701 }
702 }
703 kunmap_atomic(kaddr);
704 if (nc > 0) {
705 mark_buffer_dirty(su_bh);
706 ncleaned += nc;
707 }
708 brelse(su_bh);
709
710 if (n == segusages_per_block) {
711
712 nilfs_sufile_delete_segment_usage_block(sufile, segnum);
713 }
714 }
715 ret = 0;
716
717 out_header:
718 if (ncleaned > 0) {
719 NILFS_SUI(sufile)->ncleansegs += ncleaned;
720 nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
721 nilfs_mdt_mark_dirty(sufile);
722 }
723 brelse(header_bh);
724 out:
725 return ret;
726 }
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
745 {
746 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
747 struct buffer_head *header_bh;
748 struct nilfs_sufile_header *header;
749 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
750 void *kaddr;
751 unsigned long nsegs, nrsvsegs;
752 int ret = 0;
753
754 down_write(&NILFS_MDT(sufile)->mi_sem);
755
756 nsegs = nilfs_sufile_get_nsegments(sufile);
757 if (nsegs == newnsegs)
758 goto out;
759
760 ret = -ENOSPC;
761 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
762 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
763 goto out;
764
765 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
766 if (ret < 0)
767 goto out;
768
769 if (newnsegs > nsegs) {
770 sui->ncleansegs += newnsegs - nsegs;
771 } else {
772 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
773 if (ret < 0)
774 goto out_header;
775
776 sui->ncleansegs -= nsegs - newnsegs;
777 }
778
779 kaddr = kmap_atomic(header_bh->b_page);
780 header = kaddr + bh_offset(header_bh);
781 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
782 kunmap_atomic(kaddr);
783
784 mark_buffer_dirty(header_bh);
785 nilfs_mdt_mark_dirty(sufile);
786 nilfs_set_nsegments(nilfs, newnsegs);
787
788 out_header:
789 brelse(header_bh);
790 out:
791 up_write(&NILFS_MDT(sufile)->mi_sem);
792 return ret;
793 }
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
813 unsigned int sisz, size_t nsi)
814 {
815 struct buffer_head *su_bh;
816 struct nilfs_segment_usage *su;
817 struct nilfs_suinfo *si = buf;
818 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
819 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
820 void *kaddr;
821 unsigned long nsegs, segusages_per_block;
822 ssize_t n;
823 int ret, i, j;
824
825 down_read(&NILFS_MDT(sufile)->mi_sem);
826
827 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
828 nsegs = min_t(unsigned long,
829 nilfs_sufile_get_nsegments(sufile) - segnum,
830 nsi);
831 for (i = 0; i < nsegs; i += n, segnum += n) {
832 n = min_t(unsigned long,
833 segusages_per_block -
834 nilfs_sufile_get_offset(sufile, segnum),
835 nsegs - i);
836 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
837 &su_bh);
838 if (ret < 0) {
839 if (ret != -ENOENT)
840 goto out;
841
842 memset(si, 0, sisz * n);
843 si = (void *)si + sisz * n;
844 continue;
845 }
846
847 kaddr = kmap_atomic(su_bh->b_page);
848 su = nilfs_sufile_block_get_segment_usage(
849 sufile, segnum, su_bh, kaddr);
850 for (j = 0; j < n;
851 j++, su = (void *)su + susz, si = (void *)si + sisz) {
852 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
853 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
854 si->sui_flags = le32_to_cpu(su->su_flags) &
855 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
856 if (nilfs_segment_is_active(nilfs, segnum + j))
857 si->sui_flags |=
858 BIT(NILFS_SEGMENT_USAGE_ACTIVE);
859 }
860 kunmap_atomic(kaddr);
861 brelse(su_bh);
862 }
863 ret = nsegs;
864
865 out:
866 up_read(&NILFS_MDT(sufile)->mi_sem);
867 return ret;
868 }
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
891 unsigned int supsz, size_t nsup)
892 {
893 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
894 struct buffer_head *header_bh, *bh;
895 struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
896 struct nilfs_segment_usage *su;
897 void *kaddr;
898 unsigned long blkoff, prev_blkoff;
899 int cleansi, cleansu, dirtysi, dirtysu;
900 long ncleaned = 0, ndirtied = 0;
901 int ret = 0;
902
903 if (unlikely(nsup == 0))
904 return ret;
905
906 for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
907 if (sup->sup_segnum >= nilfs->ns_nsegments
908 || (sup->sup_flags &
909 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
910 || (nilfs_suinfo_update_nblocks(sup) &&
911 sup->sup_sui.sui_nblocks >
912 nilfs->ns_blocks_per_segment))
913 return -EINVAL;
914 }
915
916 down_write(&NILFS_MDT(sufile)->mi_sem);
917
918 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
919 if (ret < 0)
920 goto out_sem;
921
922 sup = buf;
923 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
924 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
925 if (ret < 0)
926 goto out_header;
927
928 for (;;) {
929 kaddr = kmap_atomic(bh->b_page);
930 su = nilfs_sufile_block_get_segment_usage(
931 sufile, sup->sup_segnum, bh, kaddr);
932
933 if (nilfs_suinfo_update_lastmod(sup))
934 su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
935
936 if (nilfs_suinfo_update_nblocks(sup))
937 su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
938
939 if (nilfs_suinfo_update_flags(sup)) {
940
941
942
943
944
945 sup->sup_sui.sui_flags &=
946 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
947
948 cleansi = nilfs_suinfo_clean(&sup->sup_sui);
949 cleansu = nilfs_segment_usage_clean(su);
950 dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
951 dirtysu = nilfs_segment_usage_dirty(su);
952
953 if (cleansi && !cleansu)
954 ++ncleaned;
955 else if (!cleansi && cleansu)
956 --ncleaned;
957
958 if (dirtysi && !dirtysu)
959 ++ndirtied;
960 else if (!dirtysi && dirtysu)
961 --ndirtied;
962
963 su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
964 }
965
966 kunmap_atomic(kaddr);
967
968 sup = (void *)sup + supsz;
969 if (sup >= supend)
970 break;
971
972 prev_blkoff = blkoff;
973 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
974 if (blkoff == prev_blkoff)
975 continue;
976
977
978 mark_buffer_dirty(bh);
979 put_bh(bh);
980 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
981 if (unlikely(ret < 0))
982 goto out_mark;
983 }
984 mark_buffer_dirty(bh);
985 put_bh(bh);
986
987 out_mark:
988 if (ncleaned || ndirtied) {
989 nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
990 (u64)ndirtied);
991 NILFS_SUI(sufile)->ncleansegs += ncleaned;
992 }
993 nilfs_mdt_mark_dirty(sufile);
994 out_header:
995 put_bh(header_bh);
996 out_sem:
997 up_write(&NILFS_MDT(sufile)->mi_sem);
998 return ret;
999 }
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
1018 {
1019 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
1020 struct buffer_head *su_bh;
1021 struct nilfs_segment_usage *su;
1022 void *kaddr;
1023 size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
1024 sector_t seg_start, seg_end, start_block, end_block;
1025 sector_t start = 0, nblocks = 0;
1026 u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
1027 int ret = 0;
1028 unsigned int sects_per_block;
1029
1030 sects_per_block = (1 << nilfs->ns_blocksize_bits) /
1031 bdev_logical_block_size(nilfs->ns_bdev);
1032 len = range->len >> nilfs->ns_blocksize_bits;
1033 minlen = range->minlen >> nilfs->ns_blocksize_bits;
1034 max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
1035
1036 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
1037 return -EINVAL;
1038
1039 start_block = (range->start + nilfs->ns_blocksize - 1) >>
1040 nilfs->ns_blocksize_bits;
1041
1042
1043
1044
1045
1046
1047 if (max_blocks - start_block < len)
1048 end_block = max_blocks - 1;
1049 else
1050 end_block = start_block + len - 1;
1051
1052 segnum = nilfs_get_segnum_of_block(nilfs, start_block);
1053 segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
1054
1055 down_read(&NILFS_MDT(sufile)->mi_sem);
1056
1057 while (segnum <= segnum_end) {
1058 n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
1059 segnum_end);
1060
1061 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
1062 &su_bh);
1063 if (ret < 0) {
1064 if (ret != -ENOENT)
1065 goto out_sem;
1066
1067 segnum += n;
1068 continue;
1069 }
1070
1071 kaddr = kmap_atomic(su_bh->b_page);
1072 su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
1073 su_bh, kaddr);
1074 for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
1075 if (!nilfs_segment_usage_clean(su))
1076 continue;
1077
1078 nilfs_get_segment_range(nilfs, segnum, &seg_start,
1079 &seg_end);
1080
1081 if (!nblocks) {
1082
1083 start = seg_start;
1084 nblocks = seg_end - seg_start + 1;
1085 continue;
1086 }
1087
1088 if (start + nblocks == seg_start) {
1089
1090 nblocks += seg_end - seg_start + 1;
1091 continue;
1092 }
1093
1094
1095 if (start < start_block) {
1096 nblocks -= start_block - start;
1097 start = start_block;
1098 }
1099
1100 if (nblocks >= minlen) {
1101 kunmap_atomic(kaddr);
1102
1103 ret = blkdev_issue_discard(nilfs->ns_bdev,
1104 start * sects_per_block,
1105 nblocks * sects_per_block,
1106 GFP_NOFS, 0);
1107 if (ret < 0) {
1108 put_bh(su_bh);
1109 goto out_sem;
1110 }
1111
1112 ndiscarded += nblocks;
1113 kaddr = kmap_atomic(su_bh->b_page);
1114 su = nilfs_sufile_block_get_segment_usage(
1115 sufile, segnum, su_bh, kaddr);
1116 }
1117
1118
1119 start = seg_start;
1120 nblocks = seg_end - seg_start + 1;
1121 }
1122 kunmap_atomic(kaddr);
1123 put_bh(su_bh);
1124 }
1125
1126
1127 if (nblocks) {
1128
1129 if (start < start_block) {
1130 nblocks -= start_block - start;
1131 start = start_block;
1132 }
1133 if (start + nblocks > end_block + 1)
1134 nblocks = end_block - start + 1;
1135
1136 if (nblocks >= minlen) {
1137 ret = blkdev_issue_discard(nilfs->ns_bdev,
1138 start * sects_per_block,
1139 nblocks * sects_per_block,
1140 GFP_NOFS, 0);
1141 if (!ret)
1142 ndiscarded += nblocks;
1143 }
1144 }
1145
1146 out_sem:
1147 up_read(&NILFS_MDT(sufile)->mi_sem);
1148
1149 range->len = ndiscarded << nilfs->ns_blocksize_bits;
1150 return ret;
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160 int nilfs_sufile_read(struct super_block *sb, size_t susize,
1161 struct nilfs_inode *raw_inode, struct inode **inodep)
1162 {
1163 struct inode *sufile;
1164 struct nilfs_sufile_info *sui;
1165 struct buffer_head *header_bh;
1166 struct nilfs_sufile_header *header;
1167 void *kaddr;
1168 int err;
1169
1170 if (susize > sb->s_blocksize) {
1171 nilfs_msg(sb, KERN_ERR,
1172 "too large segment usage size: %zu bytes", susize);
1173 return -EINVAL;
1174 } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
1175 nilfs_msg(sb, KERN_ERR,
1176 "too small segment usage size: %zu bytes", susize);
1177 return -EINVAL;
1178 }
1179
1180 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
1181 if (unlikely(!sufile))
1182 return -ENOMEM;
1183 if (!(sufile->i_state & I_NEW))
1184 goto out;
1185
1186 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
1187 if (err)
1188 goto failed;
1189
1190 nilfs_mdt_set_entry_size(sufile, susize,
1191 sizeof(struct nilfs_sufile_header));
1192
1193 err = nilfs_read_inode_common(sufile, raw_inode);
1194 if (err)
1195 goto failed;
1196
1197 err = nilfs_sufile_get_header_block(sufile, &header_bh);
1198 if (err)
1199 goto failed;
1200
1201 sui = NILFS_SUI(sufile);
1202 kaddr = kmap_atomic(header_bh->b_page);
1203 header = kaddr + bh_offset(header_bh);
1204 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
1205 kunmap_atomic(kaddr);
1206 brelse(header_bh);
1207
1208 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
1209 sui->allocmin = 0;
1210
1211 unlock_new_inode(sufile);
1212 out:
1213 *inodep = sufile;
1214 return 0;
1215 failed:
1216 iget_failed(sufile);
1217 return err;
1218 }