This source file includes following definitions.
- f2fs_build_fault_attr
- f2fs_printk
- f2fs_sb_read_encoding
- limit_reserve_root
- init_once
- f2fs_set_qf_name
- f2fs_clear_qf_name
- f2fs_check_quota_options
- parse_options
- f2fs_alloc_inode
- f2fs_drop_inode
- f2fs_inode_dirtied
- f2fs_inode_synced
- f2fs_dirty_inode
- f2fs_free_inode
- destroy_percpu_info
- destroy_device_list
- f2fs_put_super
- f2fs_sync_fs
- f2fs_freeze
- f2fs_unfreeze
- f2fs_statfs_project
- f2fs_statfs
- f2fs_show_quota_options
- f2fs_show_options
- default_options
- f2fs_disable_checkpoint
- f2fs_enable_checkpoint
- f2fs_remount
- f2fs_quota_read
- f2fs_quota_write
- f2fs_get_dquots
- f2fs_get_reserved_space
- f2fs_quota_on_mount
- f2fs_enable_quota_files
- f2fs_quota_enable
- f2fs_enable_quotas
- f2fs_quota_sync
- f2fs_quota_on
- __f2fs_quota_off
- f2fs_quota_off
- f2fs_quota_off_umount
- f2fs_truncate_quota_inode_pages
- f2fs_dquot_commit
- f2fs_dquot_acquire
- f2fs_dquot_release
- f2fs_dquot_mark_dquot_dirty
- f2fs_dquot_commit_info
- f2fs_get_projid
- f2fs_quota_sync
- f2fs_quota_off_umount
- f2fs_get_context
- f2fs_set_context
- f2fs_dummy_context
- f2fs_nfs_get_inode
- f2fs_fh_to_dentry
- f2fs_fh_to_parent
- max_file_blocks
- __f2fs_commit_super
- sanity_check_area_boundary
- sanity_check_raw_super
- f2fs_sanity_check_ckpt
- init_sb_info
- init_percpu_info
- init_blkz_info
- read_raw_super_block
- f2fs_commit_super
- f2fs_scan_devices
- f2fs_setup_casefold
- f2fs_tuning_parameters
- f2fs_fill_super
- f2fs_mount
- kill_f2fs_super
- init_inodecache
- destroy_inodecache
- init_f2fs_fs
- exit_f2fs_fs
1
2
3
4
5
6
7
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/fs.h>
11 #include <linux/statfs.h>
12 #include <linux/buffer_head.h>
13 #include <linux/backing-dev.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27
28 #include "f2fs.h"
29 #include "node.h"
30 #include "segment.h"
31 #include "xattr.h"
32 #include "gc.h"
33 #include "trace.h"
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
37
38 static struct kmem_cache *f2fs_inode_cachep;
39
40 #ifdef CONFIG_F2FS_FAULT_INJECTION
41
42 const char *f2fs_fault_name[FAULT_MAX] = {
43 [FAULT_KMALLOC] = "kmalloc",
44 [FAULT_KVMALLOC] = "kvmalloc",
45 [FAULT_PAGE_ALLOC] = "page alloc",
46 [FAULT_PAGE_GET] = "page get",
47 [FAULT_ALLOC_BIO] = "alloc bio",
48 [FAULT_ALLOC_NID] = "alloc nid",
49 [FAULT_ORPHAN] = "orphan",
50 [FAULT_BLOCK] = "no more block",
51 [FAULT_DIR_DEPTH] = "too big dir depth",
52 [FAULT_EVICT_INODE] = "evict_inode fail",
53 [FAULT_TRUNCATE] = "truncate fail",
54 [FAULT_READ_IO] = "read IO error",
55 [FAULT_CHECKPOINT] = "checkpoint error",
56 [FAULT_DISCARD] = "discard error",
57 [FAULT_WRITE_IO] = "write IO error",
58 };
59
60 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
61 unsigned int type)
62 {
63 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
64
65 if (rate) {
66 atomic_set(&ffi->inject_ops, 0);
67 ffi->inject_rate = rate;
68 }
69
70 if (type)
71 ffi->inject_type = type;
72
73 if (!rate && !type)
74 memset(ffi, 0, sizeof(struct f2fs_fault_info));
75 }
76 #endif
77
78
79 static struct shrinker f2fs_shrinker_info = {
80 .scan_objects = f2fs_shrink_scan,
81 .count_objects = f2fs_shrink_count,
82 .seeks = DEFAULT_SEEKS,
83 };
84
85 enum {
86 Opt_gc_background,
87 Opt_disable_roll_forward,
88 Opt_norecovery,
89 Opt_discard,
90 Opt_nodiscard,
91 Opt_noheap,
92 Opt_heap,
93 Opt_user_xattr,
94 Opt_nouser_xattr,
95 Opt_acl,
96 Opt_noacl,
97 Opt_active_logs,
98 Opt_disable_ext_identify,
99 Opt_inline_xattr,
100 Opt_noinline_xattr,
101 Opt_inline_xattr_size,
102 Opt_inline_data,
103 Opt_inline_dentry,
104 Opt_noinline_dentry,
105 Opt_flush_merge,
106 Opt_noflush_merge,
107 Opt_nobarrier,
108 Opt_fastboot,
109 Opt_extent_cache,
110 Opt_noextent_cache,
111 Opt_noinline_data,
112 Opt_data_flush,
113 Opt_reserve_root,
114 Opt_resgid,
115 Opt_resuid,
116 Opt_mode,
117 Opt_io_size_bits,
118 Opt_fault_injection,
119 Opt_fault_type,
120 Opt_lazytime,
121 Opt_nolazytime,
122 Opt_quota,
123 Opt_noquota,
124 Opt_usrquota,
125 Opt_grpquota,
126 Opt_prjquota,
127 Opt_usrjquota,
128 Opt_grpjquota,
129 Opt_prjjquota,
130 Opt_offusrjquota,
131 Opt_offgrpjquota,
132 Opt_offprjjquota,
133 Opt_jqfmt_vfsold,
134 Opt_jqfmt_vfsv0,
135 Opt_jqfmt_vfsv1,
136 Opt_whint,
137 Opt_alloc,
138 Opt_fsync,
139 Opt_test_dummy_encryption,
140 Opt_checkpoint_disable,
141 Opt_checkpoint_disable_cap,
142 Opt_checkpoint_disable_cap_perc,
143 Opt_checkpoint_enable,
144 Opt_err,
145 };
146
147 static match_table_t f2fs_tokens = {
148 {Opt_gc_background, "background_gc=%s"},
149 {Opt_disable_roll_forward, "disable_roll_forward"},
150 {Opt_norecovery, "norecovery"},
151 {Opt_discard, "discard"},
152 {Opt_nodiscard, "nodiscard"},
153 {Opt_noheap, "no_heap"},
154 {Opt_heap, "heap"},
155 {Opt_user_xattr, "user_xattr"},
156 {Opt_nouser_xattr, "nouser_xattr"},
157 {Opt_acl, "acl"},
158 {Opt_noacl, "noacl"},
159 {Opt_active_logs, "active_logs=%u"},
160 {Opt_disable_ext_identify, "disable_ext_identify"},
161 {Opt_inline_xattr, "inline_xattr"},
162 {Opt_noinline_xattr, "noinline_xattr"},
163 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
164 {Opt_inline_data, "inline_data"},
165 {Opt_inline_dentry, "inline_dentry"},
166 {Opt_noinline_dentry, "noinline_dentry"},
167 {Opt_flush_merge, "flush_merge"},
168 {Opt_noflush_merge, "noflush_merge"},
169 {Opt_nobarrier, "nobarrier"},
170 {Opt_fastboot, "fastboot"},
171 {Opt_extent_cache, "extent_cache"},
172 {Opt_noextent_cache, "noextent_cache"},
173 {Opt_noinline_data, "noinline_data"},
174 {Opt_data_flush, "data_flush"},
175 {Opt_reserve_root, "reserve_root=%u"},
176 {Opt_resgid, "resgid=%u"},
177 {Opt_resuid, "resuid=%u"},
178 {Opt_mode, "mode=%s"},
179 {Opt_io_size_bits, "io_bits=%u"},
180 {Opt_fault_injection, "fault_injection=%u"},
181 {Opt_fault_type, "fault_type=%u"},
182 {Opt_lazytime, "lazytime"},
183 {Opt_nolazytime, "nolazytime"},
184 {Opt_quota, "quota"},
185 {Opt_noquota, "noquota"},
186 {Opt_usrquota, "usrquota"},
187 {Opt_grpquota, "grpquota"},
188 {Opt_prjquota, "prjquota"},
189 {Opt_usrjquota, "usrjquota=%s"},
190 {Opt_grpjquota, "grpjquota=%s"},
191 {Opt_prjjquota, "prjjquota=%s"},
192 {Opt_offusrjquota, "usrjquota="},
193 {Opt_offgrpjquota, "grpjquota="},
194 {Opt_offprjjquota, "prjjquota="},
195 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
196 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
197 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
198 {Opt_whint, "whint_mode=%s"},
199 {Opt_alloc, "alloc_mode=%s"},
200 {Opt_fsync, "fsync_mode=%s"},
201 {Opt_test_dummy_encryption, "test_dummy_encryption"},
202 {Opt_checkpoint_disable, "checkpoint=disable"},
203 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
204 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
205 {Opt_checkpoint_enable, "checkpoint=enable"},
206 {Opt_err, NULL},
207 };
208
209 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
210 {
211 struct va_format vaf;
212 va_list args;
213 int level;
214
215 va_start(args, fmt);
216
217 level = printk_get_level(fmt);
218 vaf.fmt = printk_skip_level(fmt);
219 vaf.va = &args;
220 printk("%c%cF2FS-fs (%s): %pV\n",
221 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
222
223 va_end(args);
224 }
225
226 #ifdef CONFIG_UNICODE
227 static const struct f2fs_sb_encodings {
228 __u16 magic;
229 char *name;
230 char *version;
231 } f2fs_sb_encoding_map[] = {
232 {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
233 };
234
235 static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
236 const struct f2fs_sb_encodings **encoding,
237 __u16 *flags)
238 {
239 __u16 magic = le16_to_cpu(sb->s_encoding);
240 int i;
241
242 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
243 if (magic == f2fs_sb_encoding_map[i].magic)
244 break;
245
246 if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
247 return -EINVAL;
248
249 *encoding = &f2fs_sb_encoding_map[i];
250 *flags = le16_to_cpu(sb->s_encoding_flags);
251
252 return 0;
253 }
254 #endif
255
256 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
257 {
258 block_t limit = min((sbi->user_block_count << 1) / 1000,
259 sbi->user_block_count - sbi->reserved_blocks);
260
261
262 if (test_opt(sbi, RESERVE_ROOT) &&
263 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
264 F2FS_OPTION(sbi).root_reserved_blocks = limit;
265 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
266 F2FS_OPTION(sbi).root_reserved_blocks);
267 }
268 if (!test_opt(sbi, RESERVE_ROOT) &&
269 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
270 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
271 !gid_eq(F2FS_OPTION(sbi).s_resgid,
272 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
273 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
274 from_kuid_munged(&init_user_ns,
275 F2FS_OPTION(sbi).s_resuid),
276 from_kgid_munged(&init_user_ns,
277 F2FS_OPTION(sbi).s_resgid));
278 }
279
280 static void init_once(void *foo)
281 {
282 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
283
284 inode_init_once(&fi->vfs_inode);
285 }
286
287 #ifdef CONFIG_QUOTA
288 static const char * const quotatypes[] = INITQFNAMES;
289 #define QTYPE2NAME(t) (quotatypes[t])
290 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
291 substring_t *args)
292 {
293 struct f2fs_sb_info *sbi = F2FS_SB(sb);
294 char *qname;
295 int ret = -EINVAL;
296
297 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
298 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
299 return -EINVAL;
300 }
301 if (f2fs_sb_has_quota_ino(sbi)) {
302 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
303 return 0;
304 }
305
306 qname = match_strdup(args);
307 if (!qname) {
308 f2fs_err(sbi, "Not enough memory for storing quotafile name");
309 return -ENOMEM;
310 }
311 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
312 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
313 ret = 0;
314 else
315 f2fs_err(sbi, "%s quota file already specified",
316 QTYPE2NAME(qtype));
317 goto errout;
318 }
319 if (strchr(qname, '/')) {
320 f2fs_err(sbi, "quotafile must be on filesystem root");
321 goto errout;
322 }
323 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
324 set_opt(sbi, QUOTA);
325 return 0;
326 errout:
327 kvfree(qname);
328 return ret;
329 }
330
331 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
332 {
333 struct f2fs_sb_info *sbi = F2FS_SB(sb);
334
335 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
336 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
337 return -EINVAL;
338 }
339 kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
340 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
341 return 0;
342 }
343
344 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
345 {
346
347
348
349
350
351 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
352 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
353 return -1;
354 }
355 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
356 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
357 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
358 if (test_opt(sbi, USRQUOTA) &&
359 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
360 clear_opt(sbi, USRQUOTA);
361
362 if (test_opt(sbi, GRPQUOTA) &&
363 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
364 clear_opt(sbi, GRPQUOTA);
365
366 if (test_opt(sbi, PRJQUOTA) &&
367 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
368 clear_opt(sbi, PRJQUOTA);
369
370 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
371 test_opt(sbi, PRJQUOTA)) {
372 f2fs_err(sbi, "old and new quota format mixing");
373 return -1;
374 }
375
376 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
377 f2fs_err(sbi, "journaled quota format not specified");
378 return -1;
379 }
380 }
381
382 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
383 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
384 F2FS_OPTION(sbi).s_jquota_fmt = 0;
385 }
386 return 0;
387 }
388 #endif
389
390 static int parse_options(struct super_block *sb, char *options)
391 {
392 struct f2fs_sb_info *sbi = F2FS_SB(sb);
393 substring_t args[MAX_OPT_ARGS];
394 char *p, *name;
395 int arg = 0;
396 kuid_t uid;
397 kgid_t gid;
398 #ifdef CONFIG_QUOTA
399 int ret;
400 #endif
401
402 if (!options)
403 return 0;
404
405 while ((p = strsep(&options, ",")) != NULL) {
406 int token;
407 if (!*p)
408 continue;
409
410
411
412
413 args[0].to = args[0].from = NULL;
414 token = match_token(p, f2fs_tokens, args);
415
416 switch (token) {
417 case Opt_gc_background:
418 name = match_strdup(&args[0]);
419
420 if (!name)
421 return -ENOMEM;
422 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
423 set_opt(sbi, BG_GC);
424 clear_opt(sbi, FORCE_FG_GC);
425 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
426 clear_opt(sbi, BG_GC);
427 clear_opt(sbi, FORCE_FG_GC);
428 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
429 set_opt(sbi, BG_GC);
430 set_opt(sbi, FORCE_FG_GC);
431 } else {
432 kvfree(name);
433 return -EINVAL;
434 }
435 kvfree(name);
436 break;
437 case Opt_disable_roll_forward:
438 set_opt(sbi, DISABLE_ROLL_FORWARD);
439 break;
440 case Opt_norecovery:
441
442 set_opt(sbi, NORECOVERY);
443 if (!f2fs_readonly(sb))
444 return -EINVAL;
445 break;
446 case Opt_discard:
447 set_opt(sbi, DISCARD);
448 break;
449 case Opt_nodiscard:
450 if (f2fs_sb_has_blkzoned(sbi)) {
451 f2fs_warn(sbi, "discard is required for zoned block devices");
452 return -EINVAL;
453 }
454 clear_opt(sbi, DISCARD);
455 break;
456 case Opt_noheap:
457 set_opt(sbi, NOHEAP);
458 break;
459 case Opt_heap:
460 clear_opt(sbi, NOHEAP);
461 break;
462 #ifdef CONFIG_F2FS_FS_XATTR
463 case Opt_user_xattr:
464 set_opt(sbi, XATTR_USER);
465 break;
466 case Opt_nouser_xattr:
467 clear_opt(sbi, XATTR_USER);
468 break;
469 case Opt_inline_xattr:
470 set_opt(sbi, INLINE_XATTR);
471 break;
472 case Opt_noinline_xattr:
473 clear_opt(sbi, INLINE_XATTR);
474 break;
475 case Opt_inline_xattr_size:
476 if (args->from && match_int(args, &arg))
477 return -EINVAL;
478 set_opt(sbi, INLINE_XATTR_SIZE);
479 F2FS_OPTION(sbi).inline_xattr_size = arg;
480 break;
481 #else
482 case Opt_user_xattr:
483 f2fs_info(sbi, "user_xattr options not supported");
484 break;
485 case Opt_nouser_xattr:
486 f2fs_info(sbi, "nouser_xattr options not supported");
487 break;
488 case Opt_inline_xattr:
489 f2fs_info(sbi, "inline_xattr options not supported");
490 break;
491 case Opt_noinline_xattr:
492 f2fs_info(sbi, "noinline_xattr options not supported");
493 break;
494 #endif
495 #ifdef CONFIG_F2FS_FS_POSIX_ACL
496 case Opt_acl:
497 set_opt(sbi, POSIX_ACL);
498 break;
499 case Opt_noacl:
500 clear_opt(sbi, POSIX_ACL);
501 break;
502 #else
503 case Opt_acl:
504 f2fs_info(sbi, "acl options not supported");
505 break;
506 case Opt_noacl:
507 f2fs_info(sbi, "noacl options not supported");
508 break;
509 #endif
510 case Opt_active_logs:
511 if (args->from && match_int(args, &arg))
512 return -EINVAL;
513 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
514 return -EINVAL;
515 F2FS_OPTION(sbi).active_logs = arg;
516 break;
517 case Opt_disable_ext_identify:
518 set_opt(sbi, DISABLE_EXT_IDENTIFY);
519 break;
520 case Opt_inline_data:
521 set_opt(sbi, INLINE_DATA);
522 break;
523 case Opt_inline_dentry:
524 set_opt(sbi, INLINE_DENTRY);
525 break;
526 case Opt_noinline_dentry:
527 clear_opt(sbi, INLINE_DENTRY);
528 break;
529 case Opt_flush_merge:
530 set_opt(sbi, FLUSH_MERGE);
531 break;
532 case Opt_noflush_merge:
533 clear_opt(sbi, FLUSH_MERGE);
534 break;
535 case Opt_nobarrier:
536 set_opt(sbi, NOBARRIER);
537 break;
538 case Opt_fastboot:
539 set_opt(sbi, FASTBOOT);
540 break;
541 case Opt_extent_cache:
542 set_opt(sbi, EXTENT_CACHE);
543 break;
544 case Opt_noextent_cache:
545 clear_opt(sbi, EXTENT_CACHE);
546 break;
547 case Opt_noinline_data:
548 clear_opt(sbi, INLINE_DATA);
549 break;
550 case Opt_data_flush:
551 set_opt(sbi, DATA_FLUSH);
552 break;
553 case Opt_reserve_root:
554 if (args->from && match_int(args, &arg))
555 return -EINVAL;
556 if (test_opt(sbi, RESERVE_ROOT)) {
557 f2fs_info(sbi, "Preserve previous reserve_root=%u",
558 F2FS_OPTION(sbi).root_reserved_blocks);
559 } else {
560 F2FS_OPTION(sbi).root_reserved_blocks = arg;
561 set_opt(sbi, RESERVE_ROOT);
562 }
563 break;
564 case Opt_resuid:
565 if (args->from && match_int(args, &arg))
566 return -EINVAL;
567 uid = make_kuid(current_user_ns(), arg);
568 if (!uid_valid(uid)) {
569 f2fs_err(sbi, "Invalid uid value %d", arg);
570 return -EINVAL;
571 }
572 F2FS_OPTION(sbi).s_resuid = uid;
573 break;
574 case Opt_resgid:
575 if (args->from && match_int(args, &arg))
576 return -EINVAL;
577 gid = make_kgid(current_user_ns(), arg);
578 if (!gid_valid(gid)) {
579 f2fs_err(sbi, "Invalid gid value %d", arg);
580 return -EINVAL;
581 }
582 F2FS_OPTION(sbi).s_resgid = gid;
583 break;
584 case Opt_mode:
585 name = match_strdup(&args[0]);
586
587 if (!name)
588 return -ENOMEM;
589 if (strlen(name) == 8 &&
590 !strncmp(name, "adaptive", 8)) {
591 if (f2fs_sb_has_blkzoned(sbi)) {
592 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
593 kvfree(name);
594 return -EINVAL;
595 }
596 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
597 } else if (strlen(name) == 3 &&
598 !strncmp(name, "lfs", 3)) {
599 set_opt_mode(sbi, F2FS_MOUNT_LFS);
600 } else {
601 kvfree(name);
602 return -EINVAL;
603 }
604 kvfree(name);
605 break;
606 case Opt_io_size_bits:
607 if (args->from && match_int(args, &arg))
608 return -EINVAL;
609 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
610 f2fs_warn(sbi, "Not support %d, larger than %d",
611 1 << arg, BIO_MAX_PAGES);
612 return -EINVAL;
613 }
614 F2FS_OPTION(sbi).write_io_size_bits = arg;
615 break;
616 #ifdef CONFIG_F2FS_FAULT_INJECTION
617 case Opt_fault_injection:
618 if (args->from && match_int(args, &arg))
619 return -EINVAL;
620 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
621 set_opt(sbi, FAULT_INJECTION);
622 break;
623
624 case Opt_fault_type:
625 if (args->from && match_int(args, &arg))
626 return -EINVAL;
627 f2fs_build_fault_attr(sbi, 0, arg);
628 set_opt(sbi, FAULT_INJECTION);
629 break;
630 #else
631 case Opt_fault_injection:
632 f2fs_info(sbi, "fault_injection options not supported");
633 break;
634
635 case Opt_fault_type:
636 f2fs_info(sbi, "fault_type options not supported");
637 break;
638 #endif
639 case Opt_lazytime:
640 sb->s_flags |= SB_LAZYTIME;
641 break;
642 case Opt_nolazytime:
643 sb->s_flags &= ~SB_LAZYTIME;
644 break;
645 #ifdef CONFIG_QUOTA
646 case Opt_quota:
647 case Opt_usrquota:
648 set_opt(sbi, USRQUOTA);
649 break;
650 case Opt_grpquota:
651 set_opt(sbi, GRPQUOTA);
652 break;
653 case Opt_prjquota:
654 set_opt(sbi, PRJQUOTA);
655 break;
656 case Opt_usrjquota:
657 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
658 if (ret)
659 return ret;
660 break;
661 case Opt_grpjquota:
662 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
663 if (ret)
664 return ret;
665 break;
666 case Opt_prjjquota:
667 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
668 if (ret)
669 return ret;
670 break;
671 case Opt_offusrjquota:
672 ret = f2fs_clear_qf_name(sb, USRQUOTA);
673 if (ret)
674 return ret;
675 break;
676 case Opt_offgrpjquota:
677 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
678 if (ret)
679 return ret;
680 break;
681 case Opt_offprjjquota:
682 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
683 if (ret)
684 return ret;
685 break;
686 case Opt_jqfmt_vfsold:
687 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
688 break;
689 case Opt_jqfmt_vfsv0:
690 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
691 break;
692 case Opt_jqfmt_vfsv1:
693 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
694 break;
695 case Opt_noquota:
696 clear_opt(sbi, QUOTA);
697 clear_opt(sbi, USRQUOTA);
698 clear_opt(sbi, GRPQUOTA);
699 clear_opt(sbi, PRJQUOTA);
700 break;
701 #else
702 case Opt_quota:
703 case Opt_usrquota:
704 case Opt_grpquota:
705 case Opt_prjquota:
706 case Opt_usrjquota:
707 case Opt_grpjquota:
708 case Opt_prjjquota:
709 case Opt_offusrjquota:
710 case Opt_offgrpjquota:
711 case Opt_offprjjquota:
712 case Opt_jqfmt_vfsold:
713 case Opt_jqfmt_vfsv0:
714 case Opt_jqfmt_vfsv1:
715 case Opt_noquota:
716 f2fs_info(sbi, "quota operations not supported");
717 break;
718 #endif
719 case Opt_whint:
720 name = match_strdup(&args[0]);
721 if (!name)
722 return -ENOMEM;
723 if (strlen(name) == 10 &&
724 !strncmp(name, "user-based", 10)) {
725 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
726 } else if (strlen(name) == 3 &&
727 !strncmp(name, "off", 3)) {
728 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
729 } else if (strlen(name) == 8 &&
730 !strncmp(name, "fs-based", 8)) {
731 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
732 } else {
733 kvfree(name);
734 return -EINVAL;
735 }
736 kvfree(name);
737 break;
738 case Opt_alloc:
739 name = match_strdup(&args[0]);
740 if (!name)
741 return -ENOMEM;
742
743 if (strlen(name) == 7 &&
744 !strncmp(name, "default", 7)) {
745 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
746 } else if (strlen(name) == 5 &&
747 !strncmp(name, "reuse", 5)) {
748 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
749 } else {
750 kvfree(name);
751 return -EINVAL;
752 }
753 kvfree(name);
754 break;
755 case Opt_fsync:
756 name = match_strdup(&args[0]);
757 if (!name)
758 return -ENOMEM;
759 if (strlen(name) == 5 &&
760 !strncmp(name, "posix", 5)) {
761 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
762 } else if (strlen(name) == 6 &&
763 !strncmp(name, "strict", 6)) {
764 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
765 } else if (strlen(name) == 9 &&
766 !strncmp(name, "nobarrier", 9)) {
767 F2FS_OPTION(sbi).fsync_mode =
768 FSYNC_MODE_NOBARRIER;
769 } else {
770 kvfree(name);
771 return -EINVAL;
772 }
773 kvfree(name);
774 break;
775 case Opt_test_dummy_encryption:
776 #ifdef CONFIG_FS_ENCRYPTION
777 if (!f2fs_sb_has_encrypt(sbi)) {
778 f2fs_err(sbi, "Encrypt feature is off");
779 return -EINVAL;
780 }
781
782 F2FS_OPTION(sbi).test_dummy_encryption = true;
783 f2fs_info(sbi, "Test dummy encryption mode enabled");
784 #else
785 f2fs_info(sbi, "Test dummy encryption mount option ignored");
786 #endif
787 break;
788 case Opt_checkpoint_disable_cap_perc:
789 if (args->from && match_int(args, &arg))
790 return -EINVAL;
791 if (arg < 0 || arg > 100)
792 return -EINVAL;
793 if (arg == 100)
794 F2FS_OPTION(sbi).unusable_cap =
795 sbi->user_block_count;
796 else
797 F2FS_OPTION(sbi).unusable_cap =
798 (sbi->user_block_count / 100) * arg;
799 set_opt(sbi, DISABLE_CHECKPOINT);
800 break;
801 case Opt_checkpoint_disable_cap:
802 if (args->from && match_int(args, &arg))
803 return -EINVAL;
804 F2FS_OPTION(sbi).unusable_cap = arg;
805 set_opt(sbi, DISABLE_CHECKPOINT);
806 break;
807 case Opt_checkpoint_disable:
808 set_opt(sbi, DISABLE_CHECKPOINT);
809 break;
810 case Opt_checkpoint_enable:
811 clear_opt(sbi, DISABLE_CHECKPOINT);
812 break;
813 default:
814 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
815 p);
816 return -EINVAL;
817 }
818 }
819 #ifdef CONFIG_QUOTA
820 if (f2fs_check_quota_options(sbi))
821 return -EINVAL;
822 #else
823 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
824 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
825 return -EINVAL;
826 }
827 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
828 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
829 return -EINVAL;
830 }
831 #endif
832 #ifndef CONFIG_UNICODE
833 if (f2fs_sb_has_casefold(sbi)) {
834 f2fs_err(sbi,
835 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
836 return -EINVAL;
837 }
838 #endif
839
840 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
841 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
842 F2FS_IO_SIZE_KB(sbi));
843 return -EINVAL;
844 }
845
846 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
847 int min_size, max_size;
848
849 if (!f2fs_sb_has_extra_attr(sbi) ||
850 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
851 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
852 return -EINVAL;
853 }
854 if (!test_opt(sbi, INLINE_XATTR)) {
855 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
856 return -EINVAL;
857 }
858
859 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
860 max_size = MAX_INLINE_XATTR_SIZE;
861
862 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
863 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
864 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
865 min_size, max_size);
866 return -EINVAL;
867 }
868 }
869
870 if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
871 f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
872 return -EINVAL;
873 }
874
875
876
877
878 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
879 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
880 return 0;
881 }
882
883 static struct inode *f2fs_alloc_inode(struct super_block *sb)
884 {
885 struct f2fs_inode_info *fi;
886
887 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
888 if (!fi)
889 return NULL;
890
891 init_once((void *) fi);
892
893
894 atomic_set(&fi->dirty_pages, 0);
895 init_rwsem(&fi->i_sem);
896 INIT_LIST_HEAD(&fi->dirty_list);
897 INIT_LIST_HEAD(&fi->gdirty_list);
898 INIT_LIST_HEAD(&fi->inmem_ilist);
899 INIT_LIST_HEAD(&fi->inmem_pages);
900 mutex_init(&fi->inmem_lock);
901 init_rwsem(&fi->i_gc_rwsem[READ]);
902 init_rwsem(&fi->i_gc_rwsem[WRITE]);
903 init_rwsem(&fi->i_mmap_sem);
904 init_rwsem(&fi->i_xattr_sem);
905
906
907 fi->i_dir_level = F2FS_SB(sb)->dir_level;
908
909 return &fi->vfs_inode;
910 }
911
912 static int f2fs_drop_inode(struct inode *inode)
913 {
914 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
915 int ret;
916
917
918
919
920
921 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
922 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
923 inode->i_ino == F2FS_META_INO(sbi)) {
924 trace_f2fs_drop_inode(inode, 1);
925 return 1;
926 }
927 }
928
929
930
931
932
933
934
935
936 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
937 if (!inode->i_nlink && !is_bad_inode(inode)) {
938
939 atomic_inc(&inode->i_count);
940 spin_unlock(&inode->i_lock);
941
942
943 if (f2fs_is_atomic_file(inode))
944 f2fs_drop_inmem_pages(inode);
945
946
947 f2fs_destroy_extent_node(inode);
948
949 sb_start_intwrite(inode->i_sb);
950 f2fs_i_size_write(inode, 0);
951
952 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
953 inode, NULL, 0, DATA);
954 truncate_inode_pages_final(inode->i_mapping);
955
956 if (F2FS_HAS_BLOCKS(inode))
957 f2fs_truncate(inode);
958
959 sb_end_intwrite(inode->i_sb);
960
961 spin_lock(&inode->i_lock);
962 atomic_dec(&inode->i_count);
963 }
964 trace_f2fs_drop_inode(inode, 0);
965 return 0;
966 }
967 ret = generic_drop_inode(inode);
968 if (!ret)
969 ret = fscrypt_drop_inode(inode);
970 trace_f2fs_drop_inode(inode, ret);
971 return ret;
972 }
973
974 int f2fs_inode_dirtied(struct inode *inode, bool sync)
975 {
976 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
977 int ret = 0;
978
979 spin_lock(&sbi->inode_lock[DIRTY_META]);
980 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
981 ret = 1;
982 } else {
983 set_inode_flag(inode, FI_DIRTY_INODE);
984 stat_inc_dirty_inode(sbi, DIRTY_META);
985 }
986 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
987 list_add_tail(&F2FS_I(inode)->gdirty_list,
988 &sbi->inode_list[DIRTY_META]);
989 inc_page_count(sbi, F2FS_DIRTY_IMETA);
990 }
991 spin_unlock(&sbi->inode_lock[DIRTY_META]);
992 return ret;
993 }
994
995 void f2fs_inode_synced(struct inode *inode)
996 {
997 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
998
999 spin_lock(&sbi->inode_lock[DIRTY_META]);
1000 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1001 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1002 return;
1003 }
1004 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1005 list_del_init(&F2FS_I(inode)->gdirty_list);
1006 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1007 }
1008 clear_inode_flag(inode, FI_DIRTY_INODE);
1009 clear_inode_flag(inode, FI_AUTO_RECOVER);
1010 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1011 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1012 }
1013
1014
1015
1016
1017
1018
1019 static void f2fs_dirty_inode(struct inode *inode, int flags)
1020 {
1021 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1022
1023 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1024 inode->i_ino == F2FS_META_INO(sbi))
1025 return;
1026
1027 if (flags == I_DIRTY_TIME)
1028 return;
1029
1030 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1031 clear_inode_flag(inode, FI_AUTO_RECOVER);
1032
1033 f2fs_inode_dirtied(inode, false);
1034 }
1035
1036 static void f2fs_free_inode(struct inode *inode)
1037 {
1038 fscrypt_free_inode(inode);
1039 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1040 }
1041
1042 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1043 {
1044 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1045 percpu_counter_destroy(&sbi->total_valid_inode_count);
1046 }
1047
1048 static void destroy_device_list(struct f2fs_sb_info *sbi)
1049 {
1050 int i;
1051
1052 for (i = 0; i < sbi->s_ndevs; i++) {
1053 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1054 #ifdef CONFIG_BLK_DEV_ZONED
1055 kvfree(FDEV(i).blkz_seq);
1056 #endif
1057 }
1058 kvfree(sbi->devs);
1059 }
1060
1061 static void f2fs_put_super(struct super_block *sb)
1062 {
1063 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1064 int i;
1065 bool dropped;
1066
1067 f2fs_quota_off_umount(sb);
1068
1069
1070 mutex_lock(&sbi->umount_mutex);
1071
1072
1073
1074
1075
1076
1077 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1078 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1079 struct cp_control cpc = {
1080 .reason = CP_UMOUNT,
1081 };
1082 f2fs_write_checkpoint(sbi, &cpc);
1083 }
1084
1085
1086 dropped = f2fs_issue_discard_timeout(sbi);
1087
1088 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1089 !sbi->discard_blks && !dropped) {
1090 struct cp_control cpc = {
1091 .reason = CP_UMOUNT | CP_TRIMMED,
1092 };
1093 f2fs_write_checkpoint(sbi, &cpc);
1094 }
1095
1096
1097
1098
1099
1100 f2fs_release_ino_entry(sbi, true);
1101
1102 f2fs_leave_shrinker(sbi);
1103 mutex_unlock(&sbi->umount_mutex);
1104
1105
1106 f2fs_flush_merged_writes(sbi);
1107
1108 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1109
1110 f2fs_bug_on(sbi, sbi->fsync_node_num);
1111
1112 iput(sbi->node_inode);
1113 sbi->node_inode = NULL;
1114
1115 iput(sbi->meta_inode);
1116 sbi->meta_inode = NULL;
1117
1118
1119
1120
1121
1122 f2fs_destroy_stats(sbi);
1123
1124
1125 f2fs_destroy_node_manager(sbi);
1126 f2fs_destroy_segment_manager(sbi);
1127
1128 kvfree(sbi->ckpt);
1129
1130 f2fs_unregister_sysfs(sbi);
1131
1132 sb->s_fs_info = NULL;
1133 if (sbi->s_chksum_driver)
1134 crypto_free_shash(sbi->s_chksum_driver);
1135 kvfree(sbi->raw_super);
1136
1137 destroy_device_list(sbi);
1138 mempool_destroy(sbi->write_io_dummy);
1139 #ifdef CONFIG_QUOTA
1140 for (i = 0; i < MAXQUOTAS; i++)
1141 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
1142 #endif
1143 destroy_percpu_info(sbi);
1144 for (i = 0; i < NR_PAGE_TYPE; i++)
1145 kvfree(sbi->write_io[i]);
1146 #ifdef CONFIG_UNICODE
1147 utf8_unload(sbi->s_encoding);
1148 #endif
1149 kvfree(sbi);
1150 }
1151
1152 int f2fs_sync_fs(struct super_block *sb, int sync)
1153 {
1154 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1155 int err = 0;
1156
1157 if (unlikely(f2fs_cp_error(sbi)))
1158 return 0;
1159 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1160 return 0;
1161
1162 trace_f2fs_sync_fs(sb, sync);
1163
1164 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1165 return -EAGAIN;
1166
1167 if (sync) {
1168 struct cp_control cpc;
1169
1170 cpc.reason = __get_cp_reason(sbi);
1171
1172 mutex_lock(&sbi->gc_mutex);
1173 err = f2fs_write_checkpoint(sbi, &cpc);
1174 mutex_unlock(&sbi->gc_mutex);
1175 }
1176 f2fs_trace_ios(NULL, 1);
1177
1178 return err;
1179 }
1180
1181 static int f2fs_freeze(struct super_block *sb)
1182 {
1183 if (f2fs_readonly(sb))
1184 return 0;
1185
1186
1187 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1188 return -EIO;
1189
1190
1191 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1192 return -EINVAL;
1193 return 0;
1194 }
1195
1196 static int f2fs_unfreeze(struct super_block *sb)
1197 {
1198 return 0;
1199 }
1200
1201 #ifdef CONFIG_QUOTA
1202 static int f2fs_statfs_project(struct super_block *sb,
1203 kprojid_t projid, struct kstatfs *buf)
1204 {
1205 struct kqid qid;
1206 struct dquot *dquot;
1207 u64 limit;
1208 u64 curblock;
1209
1210 qid = make_kqid_projid(projid);
1211 dquot = dqget(sb, qid);
1212 if (IS_ERR(dquot))
1213 return PTR_ERR(dquot);
1214 spin_lock(&dquot->dq_dqb_lock);
1215
1216 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1217 dquot->dq_dqb.dqb_bhardlimit);
1218 if (limit)
1219 limit >>= sb->s_blocksize_bits;
1220
1221 if (limit && buf->f_blocks > limit) {
1222 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
1223 buf->f_blocks = limit;
1224 buf->f_bfree = buf->f_bavail =
1225 (buf->f_blocks > curblock) ?
1226 (buf->f_blocks - curblock) : 0;
1227 }
1228
1229 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1230 dquot->dq_dqb.dqb_ihardlimit);
1231
1232 if (limit && buf->f_files > limit) {
1233 buf->f_files = limit;
1234 buf->f_ffree =
1235 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1236 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1237 }
1238
1239 spin_unlock(&dquot->dq_dqb_lock);
1240 dqput(dquot);
1241 return 0;
1242 }
1243 #endif
1244
1245 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1246 {
1247 struct super_block *sb = dentry->d_sb;
1248 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1249 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1250 block_t total_count, user_block_count, start_count;
1251 u64 avail_node_count;
1252
1253 total_count = le64_to_cpu(sbi->raw_super->block_count);
1254 user_block_count = sbi->user_block_count;
1255 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1256 buf->f_type = F2FS_SUPER_MAGIC;
1257 buf->f_bsize = sbi->blocksize;
1258
1259 buf->f_blocks = total_count - start_count;
1260 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1261 sbi->current_reserved_blocks;
1262
1263 spin_lock(&sbi->stat_lock);
1264 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1265 buf->f_bfree = 0;
1266 else
1267 buf->f_bfree -= sbi->unusable_block_count;
1268 spin_unlock(&sbi->stat_lock);
1269
1270 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1271 buf->f_bavail = buf->f_bfree -
1272 F2FS_OPTION(sbi).root_reserved_blocks;
1273 else
1274 buf->f_bavail = 0;
1275
1276 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1277
1278 if (avail_node_count > user_block_count) {
1279 buf->f_files = user_block_count;
1280 buf->f_ffree = buf->f_bavail;
1281 } else {
1282 buf->f_files = avail_node_count;
1283 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1284 buf->f_bavail);
1285 }
1286
1287 buf->f_namelen = F2FS_NAME_LEN;
1288 buf->f_fsid.val[0] = (u32)id;
1289 buf->f_fsid.val[1] = (u32)(id >> 32);
1290
1291 #ifdef CONFIG_QUOTA
1292 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1293 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1294 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1295 }
1296 #endif
1297 return 0;
1298 }
1299
1300 static inline void f2fs_show_quota_options(struct seq_file *seq,
1301 struct super_block *sb)
1302 {
1303 #ifdef CONFIG_QUOTA
1304 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1305
1306 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1307 char *fmtname = "";
1308
1309 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1310 case QFMT_VFS_OLD:
1311 fmtname = "vfsold";
1312 break;
1313 case QFMT_VFS_V0:
1314 fmtname = "vfsv0";
1315 break;
1316 case QFMT_VFS_V1:
1317 fmtname = "vfsv1";
1318 break;
1319 }
1320 seq_printf(seq, ",jqfmt=%s", fmtname);
1321 }
1322
1323 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1324 seq_show_option(seq, "usrjquota",
1325 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1326
1327 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1328 seq_show_option(seq, "grpjquota",
1329 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1330
1331 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1332 seq_show_option(seq, "prjjquota",
1333 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1334 #endif
1335 }
1336
1337 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1338 {
1339 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1340
1341 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
1342 if (test_opt(sbi, FORCE_FG_GC))
1343 seq_printf(seq, ",background_gc=%s", "sync");
1344 else
1345 seq_printf(seq, ",background_gc=%s", "on");
1346 } else {
1347 seq_printf(seq, ",background_gc=%s", "off");
1348 }
1349 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1350 seq_puts(seq, ",disable_roll_forward");
1351 if (test_opt(sbi, NORECOVERY))
1352 seq_puts(seq, ",norecovery");
1353 if (test_opt(sbi, DISCARD))
1354 seq_puts(seq, ",discard");
1355 else
1356 seq_puts(seq, ",nodiscard");
1357 if (test_opt(sbi, NOHEAP))
1358 seq_puts(seq, ",no_heap");
1359 else
1360 seq_puts(seq, ",heap");
1361 #ifdef CONFIG_F2FS_FS_XATTR
1362 if (test_opt(sbi, XATTR_USER))
1363 seq_puts(seq, ",user_xattr");
1364 else
1365 seq_puts(seq, ",nouser_xattr");
1366 if (test_opt(sbi, INLINE_XATTR))
1367 seq_puts(seq, ",inline_xattr");
1368 else
1369 seq_puts(seq, ",noinline_xattr");
1370 if (test_opt(sbi, INLINE_XATTR_SIZE))
1371 seq_printf(seq, ",inline_xattr_size=%u",
1372 F2FS_OPTION(sbi).inline_xattr_size);
1373 #endif
1374 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1375 if (test_opt(sbi, POSIX_ACL))
1376 seq_puts(seq, ",acl");
1377 else
1378 seq_puts(seq, ",noacl");
1379 #endif
1380 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1381 seq_puts(seq, ",disable_ext_identify");
1382 if (test_opt(sbi, INLINE_DATA))
1383 seq_puts(seq, ",inline_data");
1384 else
1385 seq_puts(seq, ",noinline_data");
1386 if (test_opt(sbi, INLINE_DENTRY))
1387 seq_puts(seq, ",inline_dentry");
1388 else
1389 seq_puts(seq, ",noinline_dentry");
1390 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1391 seq_puts(seq, ",flush_merge");
1392 if (test_opt(sbi, NOBARRIER))
1393 seq_puts(seq, ",nobarrier");
1394 if (test_opt(sbi, FASTBOOT))
1395 seq_puts(seq, ",fastboot");
1396 if (test_opt(sbi, EXTENT_CACHE))
1397 seq_puts(seq, ",extent_cache");
1398 else
1399 seq_puts(seq, ",noextent_cache");
1400 if (test_opt(sbi, DATA_FLUSH))
1401 seq_puts(seq, ",data_flush");
1402
1403 seq_puts(seq, ",mode=");
1404 if (test_opt(sbi, ADAPTIVE))
1405 seq_puts(seq, "adaptive");
1406 else if (test_opt(sbi, LFS))
1407 seq_puts(seq, "lfs");
1408 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1409 if (test_opt(sbi, RESERVE_ROOT))
1410 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1411 F2FS_OPTION(sbi).root_reserved_blocks,
1412 from_kuid_munged(&init_user_ns,
1413 F2FS_OPTION(sbi).s_resuid),
1414 from_kgid_munged(&init_user_ns,
1415 F2FS_OPTION(sbi).s_resgid));
1416 if (F2FS_IO_SIZE_BITS(sbi))
1417 seq_printf(seq, ",io_bits=%u",
1418 F2FS_OPTION(sbi).write_io_size_bits);
1419 #ifdef CONFIG_F2FS_FAULT_INJECTION
1420 if (test_opt(sbi, FAULT_INJECTION)) {
1421 seq_printf(seq, ",fault_injection=%u",
1422 F2FS_OPTION(sbi).fault_info.inject_rate);
1423 seq_printf(seq, ",fault_type=%u",
1424 F2FS_OPTION(sbi).fault_info.inject_type);
1425 }
1426 #endif
1427 #ifdef CONFIG_QUOTA
1428 if (test_opt(sbi, QUOTA))
1429 seq_puts(seq, ",quota");
1430 if (test_opt(sbi, USRQUOTA))
1431 seq_puts(seq, ",usrquota");
1432 if (test_opt(sbi, GRPQUOTA))
1433 seq_puts(seq, ",grpquota");
1434 if (test_opt(sbi, PRJQUOTA))
1435 seq_puts(seq, ",prjquota");
1436 #endif
1437 f2fs_show_quota_options(seq, sbi->sb);
1438 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1439 seq_printf(seq, ",whint_mode=%s", "user-based");
1440 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1441 seq_printf(seq, ",whint_mode=%s", "fs-based");
1442 #ifdef CONFIG_FS_ENCRYPTION
1443 if (F2FS_OPTION(sbi).test_dummy_encryption)
1444 seq_puts(seq, ",test_dummy_encryption");
1445 #endif
1446
1447 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1448 seq_printf(seq, ",alloc_mode=%s", "default");
1449 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1450 seq_printf(seq, ",alloc_mode=%s", "reuse");
1451
1452 if (test_opt(sbi, DISABLE_CHECKPOINT))
1453 seq_printf(seq, ",checkpoint=disable:%u",
1454 F2FS_OPTION(sbi).unusable_cap);
1455 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1456 seq_printf(seq, ",fsync_mode=%s", "posix");
1457 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1458 seq_printf(seq, ",fsync_mode=%s", "strict");
1459 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1460 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1461 return 0;
1462 }
1463
1464 static void default_options(struct f2fs_sb_info *sbi)
1465 {
1466
1467 F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
1468 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1469 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1470 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1471 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1472 F2FS_OPTION(sbi).test_dummy_encryption = false;
1473 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1474 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1475
1476 set_opt(sbi, BG_GC);
1477 set_opt(sbi, INLINE_XATTR);
1478 set_opt(sbi, INLINE_DATA);
1479 set_opt(sbi, INLINE_DENTRY);
1480 set_opt(sbi, EXTENT_CACHE);
1481 set_opt(sbi, NOHEAP);
1482 clear_opt(sbi, DISABLE_CHECKPOINT);
1483 F2FS_OPTION(sbi).unusable_cap = 0;
1484 sbi->sb->s_flags |= SB_LAZYTIME;
1485 set_opt(sbi, FLUSH_MERGE);
1486 set_opt(sbi, DISCARD);
1487 if (f2fs_sb_has_blkzoned(sbi))
1488 set_opt_mode(sbi, F2FS_MOUNT_LFS);
1489 else
1490 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
1491
1492 #ifdef CONFIG_F2FS_FS_XATTR
1493 set_opt(sbi, XATTR_USER);
1494 #endif
1495 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1496 set_opt(sbi, POSIX_ACL);
1497 #endif
1498
1499 f2fs_build_fault_attr(sbi, 0, 0);
1500 }
1501
1502 #ifdef CONFIG_QUOTA
1503 static int f2fs_enable_quotas(struct super_block *sb);
1504 #endif
1505
1506 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1507 {
1508 unsigned int s_flags = sbi->sb->s_flags;
1509 struct cp_control cpc;
1510 int err = 0;
1511 int ret;
1512 block_t unusable;
1513
1514 if (s_flags & SB_RDONLY) {
1515 f2fs_err(sbi, "checkpoint=disable on readonly fs");
1516 return -EINVAL;
1517 }
1518 sbi->sb->s_flags |= SB_ACTIVE;
1519
1520 f2fs_update_time(sbi, DISABLE_TIME);
1521
1522 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
1523 mutex_lock(&sbi->gc_mutex);
1524 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1525 if (err == -ENODATA) {
1526 err = 0;
1527 break;
1528 }
1529 if (err && err != -EAGAIN)
1530 break;
1531 }
1532
1533 ret = sync_filesystem(sbi->sb);
1534 if (ret || err) {
1535 err = ret ? ret: err;
1536 goto restore_flag;
1537 }
1538
1539 unusable = f2fs_get_unusable_blocks(sbi);
1540 if (f2fs_disable_cp_again(sbi, unusable)) {
1541 err = -EAGAIN;
1542 goto restore_flag;
1543 }
1544
1545 mutex_lock(&sbi->gc_mutex);
1546 cpc.reason = CP_PAUSE;
1547 set_sbi_flag(sbi, SBI_CP_DISABLED);
1548 err = f2fs_write_checkpoint(sbi, &cpc);
1549 if (err)
1550 goto out_unlock;
1551
1552 spin_lock(&sbi->stat_lock);
1553 sbi->unusable_block_count = unusable;
1554 spin_unlock(&sbi->stat_lock);
1555
1556 out_unlock:
1557 mutex_unlock(&sbi->gc_mutex);
1558 restore_flag:
1559 sbi->sb->s_flags = s_flags;
1560 return err;
1561 }
1562
1563 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
1564 {
1565 mutex_lock(&sbi->gc_mutex);
1566 f2fs_dirty_to_prefree(sbi);
1567
1568 clear_sbi_flag(sbi, SBI_CP_DISABLED);
1569 set_sbi_flag(sbi, SBI_IS_DIRTY);
1570 mutex_unlock(&sbi->gc_mutex);
1571
1572 f2fs_sync_fs(sbi->sb, 1);
1573 }
1574
1575 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1576 {
1577 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1578 struct f2fs_mount_info org_mount_opt;
1579 unsigned long old_sb_flags;
1580 int err;
1581 bool need_restart_gc = false;
1582 bool need_stop_gc = false;
1583 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
1584 bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
1585 bool no_io_align = !F2FS_IO_ALIGNED(sbi);
1586 bool checkpoint_changed;
1587 #ifdef CONFIG_QUOTA
1588 int i, j;
1589 #endif
1590
1591
1592
1593
1594
1595 org_mount_opt = sbi->mount_opt;
1596 old_sb_flags = sb->s_flags;
1597
1598 #ifdef CONFIG_QUOTA
1599 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
1600 for (i = 0; i < MAXQUOTAS; i++) {
1601 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1602 org_mount_opt.s_qf_names[i] =
1603 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
1604 GFP_KERNEL);
1605 if (!org_mount_opt.s_qf_names[i]) {
1606 for (j = 0; j < i; j++)
1607 kvfree(org_mount_opt.s_qf_names[j]);
1608 return -ENOMEM;
1609 }
1610 } else {
1611 org_mount_opt.s_qf_names[i] = NULL;
1612 }
1613 }
1614 #endif
1615
1616
1617 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1618 err = f2fs_commit_super(sbi, false);
1619 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
1620 err);
1621 if (!err)
1622 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1623 }
1624
1625 default_options(sbi);
1626
1627
1628 err = parse_options(sb, data);
1629 if (err)
1630 goto restore_opts;
1631 checkpoint_changed =
1632 disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
1633
1634
1635
1636
1637
1638 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1639 goto skip;
1640
1641 #ifdef CONFIG_QUOTA
1642 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1643 err = dquot_suspend(sb, -1);
1644 if (err < 0)
1645 goto restore_opts;
1646 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
1647
1648 sb->s_flags &= ~SB_RDONLY;
1649 if (sb_any_quota_suspended(sb)) {
1650 dquot_resume(sb, -1);
1651 } else if (f2fs_sb_has_quota_ino(sbi)) {
1652 err = f2fs_enable_quotas(sb);
1653 if (err)
1654 goto restore_opts;
1655 }
1656 }
1657 #endif
1658
1659 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1660 err = -EINVAL;
1661 f2fs_warn(sbi, "switch extent_cache option is not allowed");
1662 goto restore_opts;
1663 }
1664
1665 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
1666 err = -EINVAL;
1667 f2fs_warn(sbi, "switch io_bits option is not allowed");
1668 goto restore_opts;
1669 }
1670
1671 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
1672 err = -EINVAL;
1673 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
1674 goto restore_opts;
1675 }
1676
1677
1678
1679
1680
1681
1682 if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
1683 if (sbi->gc_thread) {
1684 f2fs_stop_gc_thread(sbi);
1685 need_restart_gc = true;
1686 }
1687 } else if (!sbi->gc_thread) {
1688 err = f2fs_start_gc_thread(sbi);
1689 if (err)
1690 goto restore_opts;
1691 need_stop_gc = true;
1692 }
1693
1694 if (*flags & SB_RDONLY ||
1695 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1696 writeback_inodes_sb(sb, WB_REASON_SYNC);
1697 sync_inodes_sb(sb);
1698
1699 set_sbi_flag(sbi, SBI_IS_DIRTY);
1700 set_sbi_flag(sbi, SBI_IS_CLOSE);
1701 f2fs_sync_fs(sb, 1);
1702 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1703 }
1704
1705 if (checkpoint_changed) {
1706 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1707 err = f2fs_disable_checkpoint(sbi);
1708 if (err)
1709 goto restore_gc;
1710 } else {
1711 f2fs_enable_checkpoint(sbi);
1712 }
1713 }
1714
1715
1716
1717
1718
1719 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1720 clear_opt(sbi, FLUSH_MERGE);
1721 f2fs_destroy_flush_cmd_control(sbi, false);
1722 } else {
1723 err = f2fs_create_flush_cmd_control(sbi);
1724 if (err)
1725 goto restore_gc;
1726 }
1727 skip:
1728 #ifdef CONFIG_QUOTA
1729
1730 for (i = 0; i < MAXQUOTAS; i++)
1731 kvfree(org_mount_opt.s_qf_names[i]);
1732 #endif
1733
1734 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1735 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1736
1737 limit_reserve_root(sbi);
1738 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
1739 return 0;
1740 restore_gc:
1741 if (need_restart_gc) {
1742 if (f2fs_start_gc_thread(sbi))
1743 f2fs_warn(sbi, "background gc thread has stopped");
1744 } else if (need_stop_gc) {
1745 f2fs_stop_gc_thread(sbi);
1746 }
1747 restore_opts:
1748 #ifdef CONFIG_QUOTA
1749 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
1750 for (i = 0; i < MAXQUOTAS; i++) {
1751 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
1752 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
1753 }
1754 #endif
1755 sbi->mount_opt = org_mount_opt;
1756 sb->s_flags = old_sb_flags;
1757 return err;
1758 }
1759
1760 #ifdef CONFIG_QUOTA
1761
1762 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1763 size_t len, loff_t off)
1764 {
1765 struct inode *inode = sb_dqopt(sb)->files[type];
1766 struct address_space *mapping = inode->i_mapping;
1767 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1768 int offset = off & (sb->s_blocksize - 1);
1769 int tocopy;
1770 size_t toread;
1771 loff_t i_size = i_size_read(inode);
1772 struct page *page;
1773 char *kaddr;
1774
1775 if (off > i_size)
1776 return 0;
1777
1778 if (off + len > i_size)
1779 len = i_size - off;
1780 toread = len;
1781 while (toread > 0) {
1782 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1783 repeat:
1784 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
1785 if (IS_ERR(page)) {
1786 if (PTR_ERR(page) == -ENOMEM) {
1787 congestion_wait(BLK_RW_ASYNC, HZ/50);
1788 goto repeat;
1789 }
1790 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1791 return PTR_ERR(page);
1792 }
1793
1794 lock_page(page);
1795
1796 if (unlikely(page->mapping != mapping)) {
1797 f2fs_put_page(page, 1);
1798 goto repeat;
1799 }
1800 if (unlikely(!PageUptodate(page))) {
1801 f2fs_put_page(page, 1);
1802 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1803 return -EIO;
1804 }
1805
1806 kaddr = kmap_atomic(page);
1807 memcpy(data, kaddr + offset, tocopy);
1808 kunmap_atomic(kaddr);
1809 f2fs_put_page(page, 1);
1810
1811 offset = 0;
1812 toread -= tocopy;
1813 data += tocopy;
1814 blkidx++;
1815 }
1816 return len;
1817 }
1818
1819
1820 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1821 const char *data, size_t len, loff_t off)
1822 {
1823 struct inode *inode = sb_dqopt(sb)->files[type];
1824 struct address_space *mapping = inode->i_mapping;
1825 const struct address_space_operations *a_ops = mapping->a_ops;
1826 int offset = off & (sb->s_blocksize - 1);
1827 size_t towrite = len;
1828 struct page *page;
1829 void *fsdata = NULL;
1830 char *kaddr;
1831 int err = 0;
1832 int tocopy;
1833
1834 while (towrite > 0) {
1835 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1836 towrite);
1837 retry:
1838 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1839 &page, &fsdata);
1840 if (unlikely(err)) {
1841 if (err == -ENOMEM) {
1842 congestion_wait(BLK_RW_ASYNC, HZ/50);
1843 goto retry;
1844 }
1845 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1846 break;
1847 }
1848
1849 kaddr = kmap_atomic(page);
1850 memcpy(kaddr + offset, data, tocopy);
1851 kunmap_atomic(kaddr);
1852 flush_dcache_page(page);
1853
1854 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1855 page, fsdata);
1856 offset = 0;
1857 towrite -= tocopy;
1858 off += tocopy;
1859 data += tocopy;
1860 cond_resched();
1861 }
1862
1863 if (len == towrite)
1864 return err;
1865 inode->i_mtime = inode->i_ctime = current_time(inode);
1866 f2fs_mark_inode_dirty_sync(inode, false);
1867 return len - towrite;
1868 }
1869
1870 static struct dquot **f2fs_get_dquots(struct inode *inode)
1871 {
1872 return F2FS_I(inode)->i_dquot;
1873 }
1874
1875 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1876 {
1877 return &F2FS_I(inode)->i_reserved_quota;
1878 }
1879
1880 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1881 {
1882 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
1883 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
1884 return 0;
1885 }
1886
1887 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
1888 F2FS_OPTION(sbi).s_jquota_fmt, type);
1889 }
1890
1891 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1892 {
1893 int enabled = 0;
1894 int i, err;
1895
1896 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
1897 err = f2fs_enable_quotas(sbi->sb);
1898 if (err) {
1899 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
1900 return 0;
1901 }
1902 return 1;
1903 }
1904
1905 for (i = 0; i < MAXQUOTAS; i++) {
1906 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1907 err = f2fs_quota_on_mount(sbi, i);
1908 if (!err) {
1909 enabled = 1;
1910 continue;
1911 }
1912 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
1913 err, i);
1914 }
1915 }
1916 return enabled;
1917 }
1918
1919 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1920 unsigned int flags)
1921 {
1922 struct inode *qf_inode;
1923 unsigned long qf_inum;
1924 int err;
1925
1926 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
1927
1928 qf_inum = f2fs_qf_ino(sb, type);
1929 if (!qf_inum)
1930 return -EPERM;
1931
1932 qf_inode = f2fs_iget(sb, qf_inum);
1933 if (IS_ERR(qf_inode)) {
1934 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
1935 return PTR_ERR(qf_inode);
1936 }
1937
1938
1939 qf_inode->i_flags |= S_NOQUOTA;
1940 err = dquot_enable(qf_inode, type, format_id, flags);
1941 iput(qf_inode);
1942 return err;
1943 }
1944
1945 static int f2fs_enable_quotas(struct super_block *sb)
1946 {
1947 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1948 int type, err = 0;
1949 unsigned long qf_inum;
1950 bool quota_mopt[MAXQUOTAS] = {
1951 test_opt(sbi, USRQUOTA),
1952 test_opt(sbi, GRPQUOTA),
1953 test_opt(sbi, PRJQUOTA),
1954 };
1955
1956 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
1957 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
1958 return 0;
1959 }
1960
1961 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1962
1963 for (type = 0; type < MAXQUOTAS; type++) {
1964 qf_inum = f2fs_qf_ino(sb, type);
1965 if (qf_inum) {
1966 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
1967 DQUOT_USAGE_ENABLED |
1968 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
1969 if (err) {
1970 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
1971 type, err);
1972 for (type--; type >= 0; type--)
1973 dquot_quota_off(sb, type);
1974 set_sbi_flag(F2FS_SB(sb),
1975 SBI_QUOTA_NEED_REPAIR);
1976 return err;
1977 }
1978 }
1979 }
1980 return 0;
1981 }
1982
1983 int f2fs_quota_sync(struct super_block *sb, int type)
1984 {
1985 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1986 struct quota_info *dqopt = sb_dqopt(sb);
1987 int cnt;
1988 int ret;
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999 f2fs_lock_op(sbi);
2000
2001 down_read(&sbi->quota_sem);
2002 ret = dquot_writeback_dquots(sb, type);
2003 if (ret)
2004 goto out;
2005
2006
2007
2008
2009
2010 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2011 struct address_space *mapping;
2012
2013 if (type != -1 && cnt != type)
2014 continue;
2015 if (!sb_has_quota_active(sb, cnt))
2016 continue;
2017
2018 mapping = dqopt->files[cnt]->i_mapping;
2019
2020 ret = filemap_fdatawrite(mapping);
2021 if (ret)
2022 goto out;
2023
2024
2025 if (is_journalled_quota(sbi))
2026 continue;
2027
2028 ret = filemap_fdatawait(mapping);
2029 if (ret)
2030 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2031
2032 inode_lock(dqopt->files[cnt]);
2033 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2034 inode_unlock(dqopt->files[cnt]);
2035 }
2036 out:
2037 if (ret)
2038 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2039 up_read(&sbi->quota_sem);
2040 f2fs_unlock_op(sbi);
2041 return ret;
2042 }
2043
2044 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2045 const struct path *path)
2046 {
2047 struct inode *inode;
2048 int err;
2049
2050
2051 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2052 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2053 return -EBUSY;
2054 }
2055
2056 err = f2fs_quota_sync(sb, type);
2057 if (err)
2058 return err;
2059
2060 err = dquot_quota_on(sb, type, format_id, path);
2061 if (err)
2062 return err;
2063
2064 inode = d_inode(path->dentry);
2065
2066 inode_lock(inode);
2067 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2068 f2fs_set_inode_flags(inode);
2069 inode_unlock(inode);
2070 f2fs_mark_inode_dirty_sync(inode, false);
2071
2072 return 0;
2073 }
2074
2075 static int __f2fs_quota_off(struct super_block *sb, int type)
2076 {
2077 struct inode *inode = sb_dqopt(sb)->files[type];
2078 int err;
2079
2080 if (!inode || !igrab(inode))
2081 return dquot_quota_off(sb, type);
2082
2083 err = f2fs_quota_sync(sb, type);
2084 if (err)
2085 goto out_put;
2086
2087 err = dquot_quota_off(sb, type);
2088 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2089 goto out_put;
2090
2091 inode_lock(inode);
2092 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2093 f2fs_set_inode_flags(inode);
2094 inode_unlock(inode);
2095 f2fs_mark_inode_dirty_sync(inode, false);
2096 out_put:
2097 iput(inode);
2098 return err;
2099 }
2100
2101 static int f2fs_quota_off(struct super_block *sb, int type)
2102 {
2103 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2104 int err;
2105
2106 err = __f2fs_quota_off(sb, type);
2107
2108
2109
2110
2111
2112
2113 if (is_journalled_quota(sbi))
2114 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2115 return err;
2116 }
2117
2118 void f2fs_quota_off_umount(struct super_block *sb)
2119 {
2120 int type;
2121 int err;
2122
2123 for (type = 0; type < MAXQUOTAS; type++) {
2124 err = __f2fs_quota_off(sb, type);
2125 if (err) {
2126 int ret = dquot_quota_off(sb, type);
2127
2128 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2129 type, err, ret);
2130 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2131 }
2132 }
2133
2134
2135
2136
2137
2138 sync_filesystem(sb);
2139 }
2140
2141 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2142 {
2143 struct quota_info *dqopt = sb_dqopt(sb);
2144 int type;
2145
2146 for (type = 0; type < MAXQUOTAS; type++) {
2147 if (!dqopt->files[type])
2148 continue;
2149 f2fs_inode_synced(dqopt->files[type]);
2150 }
2151 }
2152
2153 static int f2fs_dquot_commit(struct dquot *dquot)
2154 {
2155 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2156 int ret;
2157
2158 down_read(&sbi->quota_sem);
2159 ret = dquot_commit(dquot);
2160 if (ret < 0)
2161 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2162 up_read(&sbi->quota_sem);
2163 return ret;
2164 }
2165
2166 static int f2fs_dquot_acquire(struct dquot *dquot)
2167 {
2168 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2169 int ret;
2170
2171 down_read(&sbi->quota_sem);
2172 ret = dquot_acquire(dquot);
2173 if (ret < 0)
2174 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2175 up_read(&sbi->quota_sem);
2176 return ret;
2177 }
2178
2179 static int f2fs_dquot_release(struct dquot *dquot)
2180 {
2181 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2182 int ret;
2183
2184 down_read(&sbi->quota_sem);
2185 ret = dquot_release(dquot);
2186 if (ret < 0)
2187 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2188 up_read(&sbi->quota_sem);
2189 return ret;
2190 }
2191
2192 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2193 {
2194 struct super_block *sb = dquot->dq_sb;
2195 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2196 int ret;
2197
2198 down_read(&sbi->quota_sem);
2199 ret = dquot_mark_dquot_dirty(dquot);
2200
2201
2202 if (is_journalled_quota(sbi))
2203 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2204
2205 up_read(&sbi->quota_sem);
2206 return ret;
2207 }
2208
2209 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2210 {
2211 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2212 int ret;
2213
2214 down_read(&sbi->quota_sem);
2215 ret = dquot_commit_info(sb, type);
2216 if (ret < 0)
2217 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2218 up_read(&sbi->quota_sem);
2219 return ret;
2220 }
2221
2222 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2223 {
2224 *projid = F2FS_I(inode)->i_projid;
2225 return 0;
2226 }
2227
2228 static const struct dquot_operations f2fs_quota_operations = {
2229 .get_reserved_space = f2fs_get_reserved_space,
2230 .write_dquot = f2fs_dquot_commit,
2231 .acquire_dquot = f2fs_dquot_acquire,
2232 .release_dquot = f2fs_dquot_release,
2233 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2234 .write_info = f2fs_dquot_commit_info,
2235 .alloc_dquot = dquot_alloc,
2236 .destroy_dquot = dquot_destroy,
2237 .get_projid = f2fs_get_projid,
2238 .get_next_id = dquot_get_next_id,
2239 };
2240
2241 static const struct quotactl_ops f2fs_quotactl_ops = {
2242 .quota_on = f2fs_quota_on,
2243 .quota_off = f2fs_quota_off,
2244 .quota_sync = f2fs_quota_sync,
2245 .get_state = dquot_get_state,
2246 .set_info = dquot_set_dqinfo,
2247 .get_dqblk = dquot_get_dqblk,
2248 .set_dqblk = dquot_set_dqblk,
2249 .get_nextdqblk = dquot_get_next_dqblk,
2250 };
2251 #else
2252 int f2fs_quota_sync(struct super_block *sb, int type)
2253 {
2254 return 0;
2255 }
2256
2257 void f2fs_quota_off_umount(struct super_block *sb)
2258 {
2259 }
2260 #endif
2261
2262 static const struct super_operations f2fs_sops = {
2263 .alloc_inode = f2fs_alloc_inode,
2264 .free_inode = f2fs_free_inode,
2265 .drop_inode = f2fs_drop_inode,
2266 .write_inode = f2fs_write_inode,
2267 .dirty_inode = f2fs_dirty_inode,
2268 .show_options = f2fs_show_options,
2269 #ifdef CONFIG_QUOTA
2270 .quota_read = f2fs_quota_read,
2271 .quota_write = f2fs_quota_write,
2272 .get_dquots = f2fs_get_dquots,
2273 #endif
2274 .evict_inode = f2fs_evict_inode,
2275 .put_super = f2fs_put_super,
2276 .sync_fs = f2fs_sync_fs,
2277 .freeze_fs = f2fs_freeze,
2278 .unfreeze_fs = f2fs_unfreeze,
2279 .statfs = f2fs_statfs,
2280 .remount_fs = f2fs_remount,
2281 };
2282
2283 #ifdef CONFIG_FS_ENCRYPTION
2284 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2285 {
2286 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2287 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2288 ctx, len, NULL);
2289 }
2290
2291 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2292 void *fs_data)
2293 {
2294 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2295
2296
2297
2298
2299
2300
2301
2302 if (f2fs_sb_has_lost_found(sbi) &&
2303 inode->i_ino == F2FS_ROOT_INO(sbi))
2304 return -EPERM;
2305
2306 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2307 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2308 ctx, len, fs_data, XATTR_CREATE);
2309 }
2310
2311 static bool f2fs_dummy_context(struct inode *inode)
2312 {
2313 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
2314 }
2315
2316 static const struct fscrypt_operations f2fs_cryptops = {
2317 .key_prefix = "f2fs:",
2318 .get_context = f2fs_get_context,
2319 .set_context = f2fs_set_context,
2320 .dummy_context = f2fs_dummy_context,
2321 .empty_dir = f2fs_empty_dir,
2322 .max_namelen = F2FS_NAME_LEN,
2323 };
2324 #endif
2325
2326 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2327 u64 ino, u32 generation)
2328 {
2329 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2330 struct inode *inode;
2331
2332 if (f2fs_check_nid_range(sbi, ino))
2333 return ERR_PTR(-ESTALE);
2334
2335
2336
2337
2338
2339
2340 inode = f2fs_iget(sb, ino);
2341 if (IS_ERR(inode))
2342 return ERR_CAST(inode);
2343 if (unlikely(generation && inode->i_generation != generation)) {
2344
2345 iput(inode);
2346 return ERR_PTR(-ESTALE);
2347 }
2348 return inode;
2349 }
2350
2351 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2352 int fh_len, int fh_type)
2353 {
2354 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
2355 f2fs_nfs_get_inode);
2356 }
2357
2358 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
2359 int fh_len, int fh_type)
2360 {
2361 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
2362 f2fs_nfs_get_inode);
2363 }
2364
2365 static const struct export_operations f2fs_export_ops = {
2366 .fh_to_dentry = f2fs_fh_to_dentry,
2367 .fh_to_parent = f2fs_fh_to_parent,
2368 .get_parent = f2fs_get_parent,
2369 };
2370
2371 static loff_t max_file_blocks(void)
2372 {
2373 loff_t result = 0;
2374 loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384 result += (leaf_count * 2);
2385
2386
2387 leaf_count *= NIDS_PER_BLOCK;
2388 result += (leaf_count * 2);
2389
2390
2391 leaf_count *= NIDS_PER_BLOCK;
2392 result += leaf_count;
2393
2394 return result;
2395 }
2396
2397 static int __f2fs_commit_super(struct buffer_head *bh,
2398 struct f2fs_super_block *super)
2399 {
2400 lock_buffer(bh);
2401 if (super)
2402 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2403 set_buffer_dirty(bh);
2404 unlock_buffer(bh);
2405
2406
2407 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2408 }
2409
2410 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2411 struct buffer_head *bh)
2412 {
2413 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2414 (bh->b_data + F2FS_SUPER_OFFSET);
2415 struct super_block *sb = sbi->sb;
2416 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2417 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2418 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2419 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2420 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2421 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2422 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2423 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2424 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2425 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2426 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2427 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2428 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2429 u64 main_end_blkaddr = main_blkaddr +
2430 (segment_count_main << log_blocks_per_seg);
2431 u64 seg_end_blkaddr = segment0_blkaddr +
2432 (segment_count << log_blocks_per_seg);
2433
2434 if (segment0_blkaddr != cp_blkaddr) {
2435 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2436 segment0_blkaddr, cp_blkaddr);
2437 return true;
2438 }
2439
2440 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2441 sit_blkaddr) {
2442 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2443 cp_blkaddr, sit_blkaddr,
2444 segment_count_ckpt << log_blocks_per_seg);
2445 return true;
2446 }
2447
2448 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2449 nat_blkaddr) {
2450 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2451 sit_blkaddr, nat_blkaddr,
2452 segment_count_sit << log_blocks_per_seg);
2453 return true;
2454 }
2455
2456 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2457 ssa_blkaddr) {
2458 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2459 nat_blkaddr, ssa_blkaddr,
2460 segment_count_nat << log_blocks_per_seg);
2461 return true;
2462 }
2463
2464 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2465 main_blkaddr) {
2466 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2467 ssa_blkaddr, main_blkaddr,
2468 segment_count_ssa << log_blocks_per_seg);
2469 return true;
2470 }
2471
2472 if (main_end_blkaddr > seg_end_blkaddr) {
2473 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2474 main_blkaddr,
2475 segment0_blkaddr +
2476 (segment_count << log_blocks_per_seg),
2477 segment_count_main << log_blocks_per_seg);
2478 return true;
2479 } else if (main_end_blkaddr < seg_end_blkaddr) {
2480 int err = 0;
2481 char *res;
2482
2483
2484 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
2485 segment0_blkaddr) >> log_blocks_per_seg);
2486
2487 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2488 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2489 res = "internally";
2490 } else {
2491 err = __f2fs_commit_super(bh, NULL);
2492 res = err ? "failed" : "done";
2493 }
2494 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%u) block(%u)",
2495 res, main_blkaddr,
2496 segment0_blkaddr +
2497 (segment_count << log_blocks_per_seg),
2498 segment_count_main << log_blocks_per_seg);
2499 if (err)
2500 return true;
2501 }
2502 return false;
2503 }
2504
2505 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2506 struct buffer_head *bh)
2507 {
2508 block_t segment_count, segs_per_sec, secs_per_zone;
2509 block_t total_sections, blocks_per_seg;
2510 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2511 (bh->b_data + F2FS_SUPER_OFFSET);
2512 unsigned int blocksize;
2513 size_t crc_offset = 0;
2514 __u32 crc = 0;
2515
2516 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
2517 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2518 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2519 return -EINVAL;
2520 }
2521
2522
2523 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
2524 crc_offset = le32_to_cpu(raw_super->checksum_offset);
2525 if (crc_offset !=
2526 offsetof(struct f2fs_super_block, crc)) {
2527 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
2528 crc_offset);
2529 return -EFSCORRUPTED;
2530 }
2531 crc = le32_to_cpu(raw_super->crc);
2532 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2533 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
2534 return -EFSCORRUPTED;
2535 }
2536 }
2537
2538
2539 if (F2FS_BLKSIZE != PAGE_SIZE) {
2540 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
2541 PAGE_SIZE);
2542 return -EFSCORRUPTED;
2543 }
2544
2545
2546 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2547 if (blocksize != F2FS_BLKSIZE) {
2548 f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
2549 blocksize);
2550 return -EFSCORRUPTED;
2551 }
2552
2553
2554 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2555 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
2556 le32_to_cpu(raw_super->log_blocks_per_seg));
2557 return -EFSCORRUPTED;
2558 }
2559
2560
2561 if (le32_to_cpu(raw_super->log_sectorsize) >
2562 F2FS_MAX_LOG_SECTOR_SIZE ||
2563 le32_to_cpu(raw_super->log_sectorsize) <
2564 F2FS_MIN_LOG_SECTOR_SIZE) {
2565 f2fs_info(sbi, "Invalid log sectorsize (%u)",
2566 le32_to_cpu(raw_super->log_sectorsize));
2567 return -EFSCORRUPTED;
2568 }
2569 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2570 le32_to_cpu(raw_super->log_sectorsize) !=
2571 F2FS_MAX_LOG_SECTOR_SIZE) {
2572 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
2573 le32_to_cpu(raw_super->log_sectors_per_block),
2574 le32_to_cpu(raw_super->log_sectorsize));
2575 return -EFSCORRUPTED;
2576 }
2577
2578 segment_count = le32_to_cpu(raw_super->segment_count);
2579 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2580 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2581 total_sections = le32_to_cpu(raw_super->section_count);
2582
2583
2584 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
2585
2586 if (segment_count > F2FS_MAX_SEGMENT ||
2587 segment_count < F2FS_MIN_SEGMENTS) {
2588 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
2589 return -EFSCORRUPTED;
2590 }
2591
2592 if (total_sections > segment_count ||
2593 total_sections < F2FS_MIN_SEGMENTS ||
2594 segs_per_sec > segment_count || !segs_per_sec) {
2595 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
2596 segment_count, total_sections, segs_per_sec);
2597 return -EFSCORRUPTED;
2598 }
2599
2600 if ((segment_count / segs_per_sec) < total_sections) {
2601 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
2602 segment_count, segs_per_sec, total_sections);
2603 return -EFSCORRUPTED;
2604 }
2605
2606 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2607 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
2608 segment_count, le64_to_cpu(raw_super->block_count));
2609 return -EFSCORRUPTED;
2610 }
2611
2612 if (secs_per_zone > total_sections || !secs_per_zone) {
2613 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
2614 secs_per_zone, total_sections);
2615 return -EFSCORRUPTED;
2616 }
2617 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2618 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
2619 (le32_to_cpu(raw_super->extension_count) +
2620 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
2621 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
2622 le32_to_cpu(raw_super->extension_count),
2623 raw_super->hot_ext_count,
2624 F2FS_MAX_EXTENSION);
2625 return -EFSCORRUPTED;
2626 }
2627
2628 if (le32_to_cpu(raw_super->cp_payload) >
2629 (blocks_per_seg - F2FS_CP_PACKS)) {
2630 f2fs_info(sbi, "Insane cp_payload (%u > %u)",
2631 le32_to_cpu(raw_super->cp_payload),
2632 blocks_per_seg - F2FS_CP_PACKS);
2633 return -EFSCORRUPTED;
2634 }
2635
2636
2637 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2638 le32_to_cpu(raw_super->meta_ino) != 2 ||
2639 le32_to_cpu(raw_super->root_ino) != 3) {
2640 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2641 le32_to_cpu(raw_super->node_ino),
2642 le32_to_cpu(raw_super->meta_ino),
2643 le32_to_cpu(raw_super->root_ino));
2644 return -EFSCORRUPTED;
2645 }
2646
2647
2648 if (sanity_check_area_boundary(sbi, bh))
2649 return -EFSCORRUPTED;
2650
2651 return 0;
2652 }
2653
2654 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2655 {
2656 unsigned int total, fsmeta;
2657 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2658 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2659 unsigned int ovp_segments, reserved_segments;
2660 unsigned int main_segs, blocks_per_seg;
2661 unsigned int sit_segs, nat_segs;
2662 unsigned int sit_bitmap_size, nat_bitmap_size;
2663 unsigned int log_blocks_per_seg;
2664 unsigned int segment_count_main;
2665 unsigned int cp_pack_start_sum, cp_payload;
2666 block_t user_block_count, valid_user_blocks;
2667 block_t avail_node_count, valid_node_count;
2668 int i, j;
2669
2670 total = le32_to_cpu(raw_super->segment_count);
2671 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2672 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
2673 fsmeta += sit_segs;
2674 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
2675 fsmeta += nat_segs;
2676 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2677 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2678
2679 if (unlikely(fsmeta >= total))
2680 return 1;
2681
2682 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2683 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2684
2685 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
2686 ovp_segments == 0 || reserved_segments == 0)) {
2687 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
2688 return 1;
2689 }
2690
2691 user_block_count = le64_to_cpu(ckpt->user_block_count);
2692 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2693 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2694 if (!user_block_count || user_block_count >=
2695 segment_count_main << log_blocks_per_seg) {
2696 f2fs_err(sbi, "Wrong user_block_count: %u",
2697 user_block_count);
2698 return 1;
2699 }
2700
2701 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
2702 if (valid_user_blocks > user_block_count) {
2703 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
2704 valid_user_blocks, user_block_count);
2705 return 1;
2706 }
2707
2708 valid_node_count = le32_to_cpu(ckpt->valid_node_count);
2709 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
2710 if (valid_node_count > avail_node_count) {
2711 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
2712 valid_node_count, avail_node_count);
2713 return 1;
2714 }
2715
2716 main_segs = le32_to_cpu(raw_super->segment_count_main);
2717 blocks_per_seg = sbi->blocks_per_seg;
2718
2719 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2720 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
2721 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
2722 return 1;
2723 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
2724 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2725 le32_to_cpu(ckpt->cur_node_segno[j])) {
2726 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
2727 i, j,
2728 le32_to_cpu(ckpt->cur_node_segno[i]));
2729 return 1;
2730 }
2731 }
2732 }
2733 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
2734 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
2735 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
2736 return 1;
2737 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
2738 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
2739 le32_to_cpu(ckpt->cur_data_segno[j])) {
2740 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
2741 i, j,
2742 le32_to_cpu(ckpt->cur_data_segno[i]));
2743 return 1;
2744 }
2745 }
2746 }
2747 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2748 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
2749 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2750 le32_to_cpu(ckpt->cur_data_segno[j])) {
2751 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
2752 i, j,
2753 le32_to_cpu(ckpt->cur_node_segno[i]));
2754 return 1;
2755 }
2756 }
2757 }
2758
2759 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2760 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2761
2762 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2763 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2764 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
2765 sit_bitmap_size, nat_bitmap_size);
2766 return 1;
2767 }
2768
2769 cp_pack_start_sum = __start_sum_addr(sbi);
2770 cp_payload = __cp_payload(sbi);
2771 if (cp_pack_start_sum < cp_payload + 1 ||
2772 cp_pack_start_sum > blocks_per_seg - 1 -
2773 NR_CURSEG_TYPE) {
2774 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
2775 cp_pack_start_sum);
2776 return 1;
2777 }
2778
2779 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
2780 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
2781 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
2782 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
2783 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
2784 le32_to_cpu(ckpt->checksum_offset));
2785 return 1;
2786 }
2787
2788 if (unlikely(f2fs_cp_error(sbi))) {
2789 f2fs_err(sbi, "A bug case: need to run fsck");
2790 return 1;
2791 }
2792 return 0;
2793 }
2794
2795 static void init_sb_info(struct f2fs_sb_info *sbi)
2796 {
2797 struct f2fs_super_block *raw_super = sbi->raw_super;
2798 int i;
2799
2800 sbi->log_sectors_per_block =
2801 le32_to_cpu(raw_super->log_sectors_per_block);
2802 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
2803 sbi->blocksize = 1 << sbi->log_blocksize;
2804 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2805 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
2806 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2807 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2808 sbi->total_sections = le32_to_cpu(raw_super->section_count);
2809 sbi->total_node_count =
2810 (le32_to_cpu(raw_super->segment_count_nat) / 2)
2811 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
2812 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
2813 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
2814 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
2815 sbi->cur_victim_sec = NULL_SECNO;
2816 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
2817 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
2818 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
2819 sbi->migration_granularity = sbi->segs_per_sec;
2820
2821 sbi->dir_level = DEF_DIR_LEVEL;
2822 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
2823 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
2824 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
2825 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
2826 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
2827 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
2828 DEF_UMOUNT_DISCARD_TIMEOUT;
2829 clear_sbi_flag(sbi, SBI_NEED_FSCK);
2830
2831 for (i = 0; i < NR_COUNT_TYPE; i++)
2832 atomic_set(&sbi->nr_pages[i], 0);
2833
2834 for (i = 0; i < META; i++)
2835 atomic_set(&sbi->wb_sync_req[i], 0);
2836
2837 INIT_LIST_HEAD(&sbi->s_list);
2838 mutex_init(&sbi->umount_mutex);
2839 init_rwsem(&sbi->io_order_lock);
2840 spin_lock_init(&sbi->cp_lock);
2841
2842 sbi->dirty_device = 0;
2843 spin_lock_init(&sbi->dev_lock);
2844
2845 init_rwsem(&sbi->sb_lock);
2846 }
2847
2848 static int init_percpu_info(struct f2fs_sb_info *sbi)
2849 {
2850 int err;
2851
2852 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
2853 if (err)
2854 return err;
2855
2856 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
2857 GFP_KERNEL);
2858 if (err)
2859 percpu_counter_destroy(&sbi->alloc_valid_block_count);
2860
2861 return err;
2862 }
2863
2864 #ifdef CONFIG_BLK_DEV_ZONED
2865 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
2866 {
2867 struct block_device *bdev = FDEV(devi).bdev;
2868 sector_t nr_sectors = bdev->bd_part->nr_sects;
2869 sector_t sector = 0;
2870 struct blk_zone *zones;
2871 unsigned int i, nr_zones;
2872 unsigned int n = 0;
2873 int err = -EIO;
2874
2875 if (!f2fs_sb_has_blkzoned(sbi))
2876 return 0;
2877
2878 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
2879 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
2880 return -EINVAL;
2881 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
2882 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
2883 __ilog2_u32(sbi->blocks_per_blkz))
2884 return -EINVAL;
2885 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
2886 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
2887 sbi->log_blocks_per_blkz;
2888 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
2889 FDEV(devi).nr_blkz++;
2890
2891 FDEV(devi).blkz_seq = f2fs_kzalloc(sbi,
2892 BITS_TO_LONGS(FDEV(devi).nr_blkz)
2893 * sizeof(unsigned long),
2894 GFP_KERNEL);
2895 if (!FDEV(devi).blkz_seq)
2896 return -ENOMEM;
2897
2898 #define F2FS_REPORT_NR_ZONES 4096
2899
2900 zones = f2fs_kzalloc(sbi,
2901 array_size(F2FS_REPORT_NR_ZONES,
2902 sizeof(struct blk_zone)),
2903 GFP_KERNEL);
2904 if (!zones)
2905 return -ENOMEM;
2906
2907
2908 while (zones && sector < nr_sectors) {
2909
2910 nr_zones = F2FS_REPORT_NR_ZONES;
2911 err = blkdev_report_zones(bdev, sector, zones, &nr_zones);
2912 if (err)
2913 break;
2914 if (!nr_zones) {
2915 err = -EIO;
2916 break;
2917 }
2918
2919 for (i = 0; i < nr_zones; i++) {
2920 if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
2921 set_bit(n, FDEV(devi).blkz_seq);
2922 sector += zones[i].len;
2923 n++;
2924 }
2925 }
2926
2927 kvfree(zones);
2928
2929 return err;
2930 }
2931 #endif
2932
2933
2934
2935
2936
2937
2938
2939 static int read_raw_super_block(struct f2fs_sb_info *sbi,
2940 struct f2fs_super_block **raw_super,
2941 int *valid_super_block, int *recovery)
2942 {
2943 struct super_block *sb = sbi->sb;
2944 int block;
2945 struct buffer_head *bh;
2946 struct f2fs_super_block *super;
2947 int err = 0;
2948
2949 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
2950 if (!super)
2951 return -ENOMEM;
2952
2953 for (block = 0; block < 2; block++) {
2954 bh = sb_bread(sb, block);
2955 if (!bh) {
2956 f2fs_err(sbi, "Unable to read %dth superblock",
2957 block + 1);
2958 err = -EIO;
2959 continue;
2960 }
2961
2962
2963 err = sanity_check_raw_super(sbi, bh);
2964 if (err) {
2965 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
2966 block + 1);
2967 brelse(bh);
2968 continue;
2969 }
2970
2971 if (!*raw_super) {
2972 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
2973 sizeof(*super));
2974 *valid_super_block = block;
2975 *raw_super = super;
2976 }
2977 brelse(bh);
2978 }
2979
2980
2981 if (err < 0)
2982 *recovery = 1;
2983
2984
2985 if (!*raw_super)
2986 kvfree(super);
2987 else
2988 err = 0;
2989
2990 return err;
2991 }
2992
2993 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
2994 {
2995 struct buffer_head *bh;
2996 __u32 crc = 0;
2997 int err;
2998
2999 if ((recover && f2fs_readonly(sbi->sb)) ||
3000 bdev_read_only(sbi->sb->s_bdev)) {
3001 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3002 return -EROFS;
3003 }
3004
3005
3006 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3007 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3008 offsetof(struct f2fs_super_block, crc));
3009 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3010 }
3011
3012
3013 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3014 if (!bh)
3015 return -EIO;
3016 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3017 brelse(bh);
3018
3019
3020 if (recover || err)
3021 return err;
3022
3023
3024 bh = sb_bread(sbi->sb, sbi->valid_super_block);
3025 if (!bh)
3026 return -EIO;
3027 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3028 brelse(bh);
3029 return err;
3030 }
3031
3032 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3033 {
3034 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3035 unsigned int max_devices = MAX_DEVICES;
3036 int i;
3037
3038
3039 if (!RDEV(0).path[0]) {
3040 if (!bdev_is_zoned(sbi->sb->s_bdev))
3041 return 0;
3042 max_devices = 1;
3043 }
3044
3045
3046
3047
3048
3049 sbi->devs = f2fs_kzalloc(sbi,
3050 array_size(max_devices,
3051 sizeof(struct f2fs_dev_info)),
3052 GFP_KERNEL);
3053 if (!sbi->devs)
3054 return -ENOMEM;
3055
3056 for (i = 0; i < max_devices; i++) {
3057
3058 if (i > 0 && !RDEV(i).path[0])
3059 break;
3060
3061 if (max_devices == 1) {
3062
3063 FDEV(0).bdev =
3064 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3065 sbi->sb->s_mode, sbi->sb->s_type);
3066 } else {
3067
3068 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3069 FDEV(i).total_segments =
3070 le32_to_cpu(RDEV(i).total_segments);
3071 if (i == 0) {
3072 FDEV(i).start_blk = 0;
3073 FDEV(i).end_blk = FDEV(i).start_blk +
3074 (FDEV(i).total_segments <<
3075 sbi->log_blocks_per_seg) - 1 +
3076 le32_to_cpu(raw_super->segment0_blkaddr);
3077 } else {
3078 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3079 FDEV(i).end_blk = FDEV(i).start_blk +
3080 (FDEV(i).total_segments <<
3081 sbi->log_blocks_per_seg) - 1;
3082 }
3083 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3084 sbi->sb->s_mode, sbi->sb->s_type);
3085 }
3086 if (IS_ERR(FDEV(i).bdev))
3087 return PTR_ERR(FDEV(i).bdev);
3088
3089
3090 sbi->s_ndevs = i + 1;
3091
3092 #ifdef CONFIG_BLK_DEV_ZONED
3093 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3094 !f2fs_sb_has_blkzoned(sbi)) {
3095 f2fs_err(sbi, "Zoned block device feature not enabled\n");
3096 return -EINVAL;
3097 }
3098 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3099 if (init_blkz_info(sbi, i)) {
3100 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3101 return -EINVAL;
3102 }
3103 if (max_devices == 1)
3104 break;
3105 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3106 i, FDEV(i).path,
3107 FDEV(i).total_segments,
3108 FDEV(i).start_blk, FDEV(i).end_blk,
3109 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3110 "Host-aware" : "Host-managed");
3111 continue;
3112 }
3113 #endif
3114 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3115 i, FDEV(i).path,
3116 FDEV(i).total_segments,
3117 FDEV(i).start_blk, FDEV(i).end_blk);
3118 }
3119 f2fs_info(sbi,
3120 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3121 return 0;
3122 }
3123
3124 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3125 {
3126 #ifdef CONFIG_UNICODE
3127 if (f2fs_sb_has_casefold(sbi) && !sbi->s_encoding) {
3128 const struct f2fs_sb_encodings *encoding_info;
3129 struct unicode_map *encoding;
3130 __u16 encoding_flags;
3131
3132 if (f2fs_sb_has_encrypt(sbi)) {
3133 f2fs_err(sbi,
3134 "Can't mount with encoding and encryption");
3135 return -EINVAL;
3136 }
3137
3138 if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
3139 &encoding_flags)) {
3140 f2fs_err(sbi,
3141 "Encoding requested by superblock is unknown");
3142 return -EINVAL;
3143 }
3144
3145 encoding = utf8_load(encoding_info->version);
3146 if (IS_ERR(encoding)) {
3147 f2fs_err(sbi,
3148 "can't mount with superblock charset: %s-%s "
3149 "not supported by the kernel. flags: 0x%x.",
3150 encoding_info->name, encoding_info->version,
3151 encoding_flags);
3152 return PTR_ERR(encoding);
3153 }
3154 f2fs_info(sbi, "Using encoding defined by superblock: "
3155 "%s-%s with flags 0x%hx", encoding_info->name,
3156 encoding_info->version?:"\b", encoding_flags);
3157
3158 sbi->s_encoding = encoding;
3159 sbi->s_encoding_flags = encoding_flags;
3160 sbi->sb->s_d_op = &f2fs_dentry_ops;
3161 }
3162 #else
3163 if (f2fs_sb_has_casefold(sbi)) {
3164 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3165 return -EINVAL;
3166 }
3167 #endif
3168 return 0;
3169 }
3170
3171 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3172 {
3173 struct f2fs_sm_info *sm_i = SM_I(sbi);
3174
3175
3176 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3177 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3178 sm_i->dcc_info->discard_granularity = 1;
3179 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
3180 }
3181
3182 sbi->readdir_ra = 1;
3183 }
3184
3185 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3186 {
3187 struct f2fs_sb_info *sbi;
3188 struct f2fs_super_block *raw_super;
3189 struct inode *root;
3190 int err;
3191 bool skip_recovery = false, need_fsck = false;
3192 char *options = NULL;
3193 int recovery, i, valid_super_block;
3194 struct curseg_info *seg_i;
3195 int retry_cnt = 1;
3196
3197 try_onemore:
3198 err = -EINVAL;
3199 raw_super = NULL;
3200 valid_super_block = -1;
3201 recovery = 0;
3202
3203
3204 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3205 if (!sbi)
3206 return -ENOMEM;
3207
3208 sbi->sb = sb;
3209
3210
3211 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3212 if (IS_ERR(sbi->s_chksum_driver)) {
3213 f2fs_err(sbi, "Cannot load crc32 driver.");
3214 err = PTR_ERR(sbi->s_chksum_driver);
3215 sbi->s_chksum_driver = NULL;
3216 goto free_sbi;
3217 }
3218
3219
3220 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3221 f2fs_err(sbi, "unable to set blocksize");
3222 goto free_sbi;
3223 }
3224
3225 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3226 &recovery);
3227 if (err)
3228 goto free_sbi;
3229
3230 sb->s_fs_info = sbi;
3231 sbi->raw_super = raw_super;
3232
3233
3234 if (f2fs_sb_has_inode_chksum(sbi))
3235 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
3236 sizeof(raw_super->uuid));
3237
3238
3239
3240
3241
3242
3243 #ifndef CONFIG_BLK_DEV_ZONED
3244 if (f2fs_sb_has_blkzoned(sbi)) {
3245 f2fs_err(sbi, "Zoned block device support is not enabled");
3246 err = -EOPNOTSUPP;
3247 goto free_sb_buf;
3248 }
3249 #endif
3250 default_options(sbi);
3251
3252 options = kstrdup((const char *)data, GFP_KERNEL);
3253 if (data && !options) {
3254 err = -ENOMEM;
3255 goto free_sb_buf;
3256 }
3257
3258 err = parse_options(sb, options);
3259 if (err)
3260 goto free_options;
3261
3262 sbi->max_file_blocks = max_file_blocks();
3263 sb->s_maxbytes = sbi->max_file_blocks <<
3264 le32_to_cpu(raw_super->log_blocksize);
3265 sb->s_max_links = F2FS_LINK_MAX;
3266
3267 err = f2fs_setup_casefold(sbi);
3268 if (err)
3269 goto free_options;
3270
3271 #ifdef CONFIG_QUOTA
3272 sb->dq_op = &f2fs_quota_operations;
3273 sb->s_qcop = &f2fs_quotactl_ops;
3274 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3275
3276 if (f2fs_sb_has_quota_ino(sbi)) {
3277 for (i = 0; i < MAXQUOTAS; i++) {
3278 if (f2fs_qf_ino(sbi->sb, i))
3279 sbi->nquota_files++;
3280 }
3281 }
3282 #endif
3283
3284 sb->s_op = &f2fs_sops;
3285 #ifdef CONFIG_FS_ENCRYPTION
3286 sb->s_cop = &f2fs_cryptops;
3287 #endif
3288 #ifdef CONFIG_FS_VERITY
3289 sb->s_vop = &f2fs_verityops;
3290 #endif
3291 sb->s_xattr = f2fs_xattr_handlers;
3292 sb->s_export_op = &f2fs_export_ops;
3293 sb->s_magic = F2FS_SUPER_MAGIC;
3294 sb->s_time_gran = 1;
3295 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3296 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3297 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
3298 sb->s_iflags |= SB_I_CGROUPWB;
3299
3300
3301 sbi->valid_super_block = valid_super_block;
3302 mutex_init(&sbi->gc_mutex);
3303 mutex_init(&sbi->writepages);
3304 mutex_init(&sbi->cp_mutex);
3305 mutex_init(&sbi->resize_mutex);
3306 init_rwsem(&sbi->node_write);
3307 init_rwsem(&sbi->node_change);
3308
3309
3310 set_sbi_flag(sbi, SBI_POR_DOING);
3311 spin_lock_init(&sbi->stat_lock);
3312
3313
3314 spin_lock_init(&sbi->iostat_lock);
3315 sbi->iostat_enable = false;
3316
3317 for (i = 0; i < NR_PAGE_TYPE; i++) {
3318 int n = (i == META) ? 1: NR_TEMP_TYPE;
3319 int j;
3320
3321 sbi->write_io[i] =
3322 f2fs_kmalloc(sbi,
3323 array_size(n,
3324 sizeof(struct f2fs_bio_info)),
3325 GFP_KERNEL);
3326 if (!sbi->write_io[i]) {
3327 err = -ENOMEM;
3328 goto free_bio_info;
3329 }
3330
3331 for (j = HOT; j < n; j++) {
3332 init_rwsem(&sbi->write_io[i][j].io_rwsem);
3333 sbi->write_io[i][j].sbi = sbi;
3334 sbi->write_io[i][j].bio = NULL;
3335 spin_lock_init(&sbi->write_io[i][j].io_lock);
3336 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
3337 }
3338 }
3339
3340 init_rwsem(&sbi->cp_rwsem);
3341 init_rwsem(&sbi->quota_sem);
3342 init_waitqueue_head(&sbi->cp_wait);
3343 init_sb_info(sbi);
3344
3345 err = init_percpu_info(sbi);
3346 if (err)
3347 goto free_bio_info;
3348
3349 if (F2FS_IO_ALIGNED(sbi)) {
3350 sbi->write_io_dummy =
3351 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
3352 if (!sbi->write_io_dummy) {
3353 err = -ENOMEM;
3354 goto free_percpu;
3355 }
3356 }
3357
3358
3359 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
3360 if (IS_ERR(sbi->meta_inode)) {
3361 f2fs_err(sbi, "Failed to read F2FS meta data inode");
3362 err = PTR_ERR(sbi->meta_inode);
3363 goto free_io_dummy;
3364 }
3365
3366 err = f2fs_get_valid_checkpoint(sbi);
3367 if (err) {
3368 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
3369 goto free_meta_inode;
3370 }
3371
3372 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
3373 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3374 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
3375 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3376 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
3377 }
3378
3379 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
3380 set_sbi_flag(sbi, SBI_NEED_FSCK);
3381
3382
3383 err = f2fs_scan_devices(sbi);
3384 if (err) {
3385 f2fs_err(sbi, "Failed to find devices");
3386 goto free_devices;
3387 }
3388
3389 sbi->total_valid_node_count =
3390 le32_to_cpu(sbi->ckpt->valid_node_count);
3391 percpu_counter_set(&sbi->total_valid_inode_count,
3392 le32_to_cpu(sbi->ckpt->valid_inode_count));
3393 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
3394 sbi->total_valid_block_count =
3395 le64_to_cpu(sbi->ckpt->valid_block_count);
3396 sbi->last_valid_block_count = sbi->total_valid_block_count;
3397 sbi->reserved_blocks = 0;
3398 sbi->current_reserved_blocks = 0;
3399 limit_reserve_root(sbi);
3400
3401 for (i = 0; i < NR_INODE_TYPE; i++) {
3402 INIT_LIST_HEAD(&sbi->inode_list[i]);
3403 spin_lock_init(&sbi->inode_lock[i]);
3404 }
3405 mutex_init(&sbi->flush_lock);
3406
3407 f2fs_init_extent_cache_info(sbi);
3408
3409 f2fs_init_ino_entry_info(sbi);
3410
3411 f2fs_init_fsync_node_info(sbi);
3412
3413
3414 err = f2fs_build_segment_manager(sbi);
3415 if (err) {
3416 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
3417 err);
3418 goto free_sm;
3419 }
3420 err = f2fs_build_node_manager(sbi);
3421 if (err) {
3422 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
3423 err);
3424 goto free_nm;
3425 }
3426
3427
3428 if (sb->s_bdev->bd_part)
3429 sbi->sectors_written_start =
3430 (u64)part_stat_read(sb->s_bdev->bd_part,
3431 sectors[STAT_WRITE]);
3432
3433
3434 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
3435 if (__exist_node_summaries(sbi))
3436 sbi->kbytes_written =
3437 le64_to_cpu(seg_i->journal->info.kbytes_written);
3438
3439 f2fs_build_gc_manager(sbi);
3440
3441 err = f2fs_build_stats(sbi);
3442 if (err)
3443 goto free_nm;
3444
3445
3446 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
3447 if (IS_ERR(sbi->node_inode)) {
3448 f2fs_err(sbi, "Failed to read node inode");
3449 err = PTR_ERR(sbi->node_inode);
3450 goto free_stats;
3451 }
3452
3453
3454 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
3455 if (IS_ERR(root)) {
3456 f2fs_err(sbi, "Failed to read root inode");
3457 err = PTR_ERR(root);
3458 goto free_node_inode;
3459 }
3460 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
3461 !root->i_size || !root->i_nlink) {
3462 iput(root);
3463 err = -EINVAL;
3464 goto free_node_inode;
3465 }
3466
3467 sb->s_root = d_make_root(root);
3468 if (!sb->s_root) {
3469 err = -ENOMEM;
3470 goto free_node_inode;
3471 }
3472
3473 err = f2fs_register_sysfs(sbi);
3474 if (err)
3475 goto free_root_inode;
3476
3477 #ifdef CONFIG_QUOTA
3478
3479 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
3480 err = f2fs_enable_quotas(sb);
3481 if (err)
3482 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
3483 }
3484 #endif
3485
3486 err = f2fs_recover_orphan_inodes(sbi);
3487 if (err)
3488 goto free_meta;
3489
3490 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
3491 goto reset_checkpoint;
3492
3493
3494 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
3495 !test_opt(sbi, NORECOVERY)) {
3496
3497
3498
3499
3500 if (f2fs_hw_is_readonly(sbi)) {
3501 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3502 err = -EROFS;
3503 f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
3504 goto free_meta;
3505 }
3506 f2fs_info(sbi, "write access unavailable, skipping recovery");
3507 goto reset_checkpoint;
3508 }
3509
3510 if (need_fsck)
3511 set_sbi_flag(sbi, SBI_NEED_FSCK);
3512
3513 if (skip_recovery)
3514 goto reset_checkpoint;
3515
3516 err = f2fs_recover_fsync_data(sbi, false);
3517 if (err < 0) {
3518 if (err != -ENOMEM)
3519 skip_recovery = true;
3520 need_fsck = true;
3521 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
3522 err);
3523 goto free_meta;
3524 }
3525 } else {
3526 err = f2fs_recover_fsync_data(sbi, true);
3527
3528 if (!f2fs_readonly(sb) && err > 0) {
3529 err = -EINVAL;
3530 f2fs_err(sbi, "Need to recover fsync data");
3531 goto free_meta;
3532 }
3533 }
3534 reset_checkpoint:
3535
3536 clear_sbi_flag(sbi, SBI_POR_DOING);
3537
3538 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
3539 err = f2fs_disable_checkpoint(sbi);
3540 if (err)
3541 goto sync_free_meta;
3542 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
3543 f2fs_enable_checkpoint(sbi);
3544 }
3545
3546
3547
3548
3549
3550 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
3551
3552 err = f2fs_start_gc_thread(sbi);
3553 if (err)
3554 goto sync_free_meta;
3555 }
3556 kvfree(options);
3557
3558
3559 if (recovery) {
3560 err = f2fs_commit_super(sbi, true);
3561 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
3562 sbi->valid_super_block ? 1 : 2, err);
3563 }
3564
3565 f2fs_join_shrinker(sbi);
3566
3567 f2fs_tuning_parameters(sbi);
3568
3569 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
3570 cur_cp_version(F2FS_CKPT(sbi)));
3571 f2fs_update_time(sbi, CP_TIME);
3572 f2fs_update_time(sbi, REQ_TIME);
3573 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3574 return 0;
3575
3576 sync_free_meta:
3577
3578 sync_filesystem(sbi->sb);
3579 retry_cnt = 0;
3580
3581 free_meta:
3582 #ifdef CONFIG_QUOTA
3583 f2fs_truncate_quota_inode_pages(sb);
3584 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
3585 f2fs_quota_off_umount(sbi->sb);
3586 #endif
3587
3588
3589
3590
3591
3592
3593 truncate_inode_pages_final(META_MAPPING(sbi));
3594
3595 evict_inodes(sb);
3596 f2fs_unregister_sysfs(sbi);
3597 free_root_inode:
3598 dput(sb->s_root);
3599 sb->s_root = NULL;
3600 free_node_inode:
3601 f2fs_release_ino_entry(sbi, true);
3602 truncate_inode_pages_final(NODE_MAPPING(sbi));
3603 iput(sbi->node_inode);
3604 sbi->node_inode = NULL;
3605 free_stats:
3606 f2fs_destroy_stats(sbi);
3607 free_nm:
3608 f2fs_destroy_node_manager(sbi);
3609 free_sm:
3610 f2fs_destroy_segment_manager(sbi);
3611 free_devices:
3612 destroy_device_list(sbi);
3613 kvfree(sbi->ckpt);
3614 free_meta_inode:
3615 make_bad_inode(sbi->meta_inode);
3616 iput(sbi->meta_inode);
3617 sbi->meta_inode = NULL;
3618 free_io_dummy:
3619 mempool_destroy(sbi->write_io_dummy);
3620 free_percpu:
3621 destroy_percpu_info(sbi);
3622 free_bio_info:
3623 for (i = 0; i < NR_PAGE_TYPE; i++)
3624 kvfree(sbi->write_io[i]);
3625
3626 #ifdef CONFIG_UNICODE
3627 utf8_unload(sbi->s_encoding);
3628 #endif
3629 free_options:
3630 #ifdef CONFIG_QUOTA
3631 for (i = 0; i < MAXQUOTAS; i++)
3632 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
3633 #endif
3634 kvfree(options);
3635 free_sb_buf:
3636 kvfree(raw_super);
3637 free_sbi:
3638 if (sbi->s_chksum_driver)
3639 crypto_free_shash(sbi->s_chksum_driver);
3640 kvfree(sbi);
3641
3642
3643 if (retry_cnt > 0 && skip_recovery) {
3644 retry_cnt--;
3645 shrink_dcache_sb(sb);
3646 goto try_onemore;
3647 }
3648 return err;
3649 }
3650
3651 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
3652 const char *dev_name, void *data)
3653 {
3654 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
3655 }
3656
3657 static void kill_f2fs_super(struct super_block *sb)
3658 {
3659 if (sb->s_root) {
3660 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3661
3662 set_sbi_flag(sbi, SBI_IS_CLOSE);
3663 f2fs_stop_gc_thread(sbi);
3664 f2fs_stop_discard_thread(sbi);
3665
3666 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
3667 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3668 struct cp_control cpc = {
3669 .reason = CP_UMOUNT,
3670 };
3671 f2fs_write_checkpoint(sbi, &cpc);
3672 }
3673
3674 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
3675 sb->s_flags &= ~SB_RDONLY;
3676 }
3677 kill_block_super(sb);
3678 }
3679
3680 static struct file_system_type f2fs_fs_type = {
3681 .owner = THIS_MODULE,
3682 .name = "f2fs",
3683 .mount = f2fs_mount,
3684 .kill_sb = kill_f2fs_super,
3685 .fs_flags = FS_REQUIRES_DEV,
3686 };
3687 MODULE_ALIAS_FS("f2fs");
3688
3689 static int __init init_inodecache(void)
3690 {
3691 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
3692 sizeof(struct f2fs_inode_info), 0,
3693 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
3694 if (!f2fs_inode_cachep)
3695 return -ENOMEM;
3696 return 0;
3697 }
3698
3699 static void destroy_inodecache(void)
3700 {
3701
3702
3703
3704
3705 rcu_barrier();
3706 kmem_cache_destroy(f2fs_inode_cachep);
3707 }
3708
3709 static int __init init_f2fs_fs(void)
3710 {
3711 int err;
3712
3713 if (PAGE_SIZE != F2FS_BLKSIZE) {
3714 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
3715 PAGE_SIZE, F2FS_BLKSIZE);
3716 return -EINVAL;
3717 }
3718
3719 f2fs_build_trace_ios();
3720
3721 err = init_inodecache();
3722 if (err)
3723 goto fail;
3724 err = f2fs_create_node_manager_caches();
3725 if (err)
3726 goto free_inodecache;
3727 err = f2fs_create_segment_manager_caches();
3728 if (err)
3729 goto free_node_manager_caches;
3730 err = f2fs_create_checkpoint_caches();
3731 if (err)
3732 goto free_segment_manager_caches;
3733 err = f2fs_create_extent_cache();
3734 if (err)
3735 goto free_checkpoint_caches;
3736 err = f2fs_init_sysfs();
3737 if (err)
3738 goto free_extent_cache;
3739 err = register_shrinker(&f2fs_shrinker_info);
3740 if (err)
3741 goto free_sysfs;
3742 err = register_filesystem(&f2fs_fs_type);
3743 if (err)
3744 goto free_shrinker;
3745 f2fs_create_root_stats();
3746 err = f2fs_init_post_read_processing();
3747 if (err)
3748 goto free_root_stats;
3749 return 0;
3750
3751 free_root_stats:
3752 f2fs_destroy_root_stats();
3753 unregister_filesystem(&f2fs_fs_type);
3754 free_shrinker:
3755 unregister_shrinker(&f2fs_shrinker_info);
3756 free_sysfs:
3757 f2fs_exit_sysfs();
3758 free_extent_cache:
3759 f2fs_destroy_extent_cache();
3760 free_checkpoint_caches:
3761 f2fs_destroy_checkpoint_caches();
3762 free_segment_manager_caches:
3763 f2fs_destroy_segment_manager_caches();
3764 free_node_manager_caches:
3765 f2fs_destroy_node_manager_caches();
3766 free_inodecache:
3767 destroy_inodecache();
3768 fail:
3769 return err;
3770 }
3771
3772 static void __exit exit_f2fs_fs(void)
3773 {
3774 f2fs_destroy_post_read_processing();
3775 f2fs_destroy_root_stats();
3776 unregister_filesystem(&f2fs_fs_type);
3777 unregister_shrinker(&f2fs_shrinker_info);
3778 f2fs_exit_sysfs();
3779 f2fs_destroy_extent_cache();
3780 f2fs_destroy_checkpoint_caches();
3781 f2fs_destroy_segment_manager_caches();
3782 f2fs_destroy_node_manager_caches();
3783 destroy_inodecache();
3784 f2fs_destroy_trace_ios();
3785 }
3786
3787 module_init(init_f2fs_fs)
3788 module_exit(exit_f2fs_fs)
3789
3790 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3791 MODULE_DESCRIPTION("Flash Friendly File System");
3792 MODULE_LICENSE("GPL");
3793