This source file includes following definitions.
- super_cache_scan
- super_cache_count
- destroy_super_work
- destroy_super_rcu
- destroy_unused_super
- alloc_super
- __put_super
- put_super
- deactivate_locked_super
- deactivate_super
- grab_super
- trylock_super
- generic_shutdown_super
- mount_capable
- sget_fc
- sget
- drop_super
- drop_super_exclusive
- __iterate_supers
- iterate_supers
- iterate_supers_type
- __get_super
- get_super
- __get_super_thawed
- get_super_thawed
- get_super_exclusive_thawed
- get_active_super
- user_get_super
- reconfigure_super
- do_emergency_remount_callback
- do_emergency_remount
- emergency_remount
- do_thaw_all_callback
- do_thaw_all
- emergency_thaw_all
- get_anon_bdev
- free_anon_bdev
- set_anon_super
- kill_anon_super
- kill_litter_super
- set_anon_super_fc
- test_keyed_super
- test_single_super
- vfs_get_super
- get_tree_nodev
- get_tree_single
- get_tree_single_reconf
- get_tree_keyed
- set_bdev_super
- set_bdev_super_fc
- test_bdev_super_fc
- get_tree_bdev
- test_bdev_super
- mount_bdev
- kill_block_super
- mount_nodev
- reconfigure_single
- compare_single
- mount_single
- vfs_get_tree
- super_setup_bdi_name
- super_setup_bdi
- __sb_end_write
- __sb_start_write
- sb_wait_write
- lockdep_sb_freeze_release
- lockdep_sb_freeze_acquire
- sb_freeze_unlock
- freeze_super
- thaw_super_locked
- thaw_super
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h>
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/cleancache.h>
35 #include <linux/fscrypt.h>
36 #include <linux/fsnotify.h>
37 #include <linux/lockdep.h>
38 #include <linux/user_namespace.h>
39 #include <linux/fs_context.h>
40 #include <uapi/linux/mount.h>
41 #include "internal.h"
42
43 static int thaw_super_locked(struct super_block *sb);
44
45 static LIST_HEAD(super_blocks);
46 static DEFINE_SPINLOCK(sb_lock);
47
48 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
49 "sb_writers",
50 "sb_pagefaults",
51 "sb_internal",
52 };
53
54
55
56
57
58
59
60
61 static unsigned long super_cache_scan(struct shrinker *shrink,
62 struct shrink_control *sc)
63 {
64 struct super_block *sb;
65 long fs_objects = 0;
66 long total_objects;
67 long freed = 0;
68 long dentries;
69 long inodes;
70
71 sb = container_of(shrink, struct super_block, s_shrink);
72
73
74
75
76
77 if (!(sc->gfp_mask & __GFP_FS))
78 return SHRINK_STOP;
79
80 if (!trylock_super(sb))
81 return SHRINK_STOP;
82
83 if (sb->s_op->nr_cached_objects)
84 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
85
86 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
87 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
88 total_objects = dentries + inodes + fs_objects + 1;
89 if (!total_objects)
90 total_objects = 1;
91
92
93 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
94 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
95 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
96
97
98
99
100
101
102
103
104 sc->nr_to_scan = dentries + 1;
105 freed = prune_dcache_sb(sb, sc);
106 sc->nr_to_scan = inodes + 1;
107 freed += prune_icache_sb(sb, sc);
108
109 if (fs_objects) {
110 sc->nr_to_scan = fs_objects + 1;
111 freed += sb->s_op->free_cached_objects(sb, sc);
112 }
113
114 up_read(&sb->s_umount);
115 return freed;
116 }
117
118 static unsigned long super_cache_count(struct shrinker *shrink,
119 struct shrink_control *sc)
120 {
121 struct super_block *sb;
122 long total_objects = 0;
123
124 sb = container_of(shrink, struct super_block, s_shrink);
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140 if (!(sb->s_flags & SB_BORN))
141 return 0;
142 smp_rmb();
143
144 if (sb->s_op && sb->s_op->nr_cached_objects)
145 total_objects = sb->s_op->nr_cached_objects(sb, sc);
146
147 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
148 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
149
150 if (!total_objects)
151 return SHRINK_EMPTY;
152
153 total_objects = vfs_pressure_ratio(total_objects);
154 return total_objects;
155 }
156
157 static void destroy_super_work(struct work_struct *work)
158 {
159 struct super_block *s = container_of(work, struct super_block,
160 destroy_work);
161 int i;
162
163 for (i = 0; i < SB_FREEZE_LEVELS; i++)
164 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
165 kfree(s);
166 }
167
168 static void destroy_super_rcu(struct rcu_head *head)
169 {
170 struct super_block *s = container_of(head, struct super_block, rcu);
171 INIT_WORK(&s->destroy_work, destroy_super_work);
172 schedule_work(&s->destroy_work);
173 }
174
175
176 static void destroy_unused_super(struct super_block *s)
177 {
178 if (!s)
179 return;
180 up_write(&s->s_umount);
181 list_lru_destroy(&s->s_dentry_lru);
182 list_lru_destroy(&s->s_inode_lru);
183 security_sb_free(s);
184 put_user_ns(s->s_user_ns);
185 kfree(s->s_subtype);
186 free_prealloced_shrinker(&s->s_shrink);
187
188 destroy_super_work(&s->destroy_work);
189 }
190
191
192
193
194
195
196
197
198
199
200 static struct super_block *alloc_super(struct file_system_type *type, int flags,
201 struct user_namespace *user_ns)
202 {
203 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
204 static const struct super_operations default_op;
205 int i;
206
207 if (!s)
208 return NULL;
209
210 INIT_LIST_HEAD(&s->s_mounts);
211 s->s_user_ns = get_user_ns(user_ns);
212 init_rwsem(&s->s_umount);
213 lockdep_set_class(&s->s_umount, &type->s_umount_key);
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
230
231 if (security_sb_alloc(s))
232 goto fail;
233
234 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
235 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
236 sb_writers_name[i],
237 &type->s_writers_key[i]))
238 goto fail;
239 }
240 init_waitqueue_head(&s->s_writers.wait_unfrozen);
241 s->s_bdi = &noop_backing_dev_info;
242 s->s_flags = flags;
243 if (s->s_user_ns != &init_user_ns)
244 s->s_iflags |= SB_I_NODEV;
245 INIT_HLIST_NODE(&s->s_instances);
246 INIT_HLIST_BL_HEAD(&s->s_roots);
247 mutex_init(&s->s_sync_lock);
248 INIT_LIST_HEAD(&s->s_inodes);
249 spin_lock_init(&s->s_inode_list_lock);
250 INIT_LIST_HEAD(&s->s_inodes_wb);
251 spin_lock_init(&s->s_inode_wblist_lock);
252
253 s->s_count = 1;
254 atomic_set(&s->s_active, 1);
255 mutex_init(&s->s_vfs_rename_mutex);
256 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
257 init_rwsem(&s->s_dquot.dqio_sem);
258 s->s_maxbytes = MAX_NON_LFS;
259 s->s_op = &default_op;
260 s->s_time_gran = 1000000000;
261 s->s_time_min = TIME64_MIN;
262 s->s_time_max = TIME64_MAX;
263 s->cleancache_poolid = CLEANCACHE_NO_POOL;
264
265 s->s_shrink.seeks = DEFAULT_SEEKS;
266 s->s_shrink.scan_objects = super_cache_scan;
267 s->s_shrink.count_objects = super_cache_count;
268 s->s_shrink.batch = 1024;
269 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
270 if (prealloc_shrinker(&s->s_shrink))
271 goto fail;
272 if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
273 goto fail;
274 if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
275 goto fail;
276 return s;
277
278 fail:
279 destroy_unused_super(s);
280 return NULL;
281 }
282
283
284
285
286
287
288 static void __put_super(struct super_block *s)
289 {
290 if (!--s->s_count) {
291 list_del_init(&s->s_list);
292 WARN_ON(s->s_dentry_lru.node);
293 WARN_ON(s->s_inode_lru.node);
294 WARN_ON(!list_empty(&s->s_mounts));
295 security_sb_free(s);
296 fscrypt_sb_free(s);
297 put_user_ns(s->s_user_ns);
298 kfree(s->s_subtype);
299 call_rcu(&s->rcu, destroy_super_rcu);
300 }
301 }
302
303
304
305
306
307
308
309
310 static void put_super(struct super_block *sb)
311 {
312 spin_lock(&sb_lock);
313 __put_super(sb);
314 spin_unlock(&sb_lock);
315 }
316
317
318
319
320
321
322
323
324
325
326
327
328
329 void deactivate_locked_super(struct super_block *s)
330 {
331 struct file_system_type *fs = s->s_type;
332 if (atomic_dec_and_test(&s->s_active)) {
333 cleancache_invalidate_fs(s);
334 unregister_shrinker(&s->s_shrink);
335 fs->kill_sb(s);
336
337
338
339
340
341
342 list_lru_destroy(&s->s_dentry_lru);
343 list_lru_destroy(&s->s_inode_lru);
344
345 put_filesystem(fs);
346 put_super(s);
347 } else {
348 up_write(&s->s_umount);
349 }
350 }
351
352 EXPORT_SYMBOL(deactivate_locked_super);
353
354
355
356
357
358
359
360
361
362 void deactivate_super(struct super_block *s)
363 {
364 if (!atomic_add_unless(&s->s_active, -1, 1)) {
365 down_write(&s->s_umount);
366 deactivate_locked_super(s);
367 }
368 }
369
370 EXPORT_SYMBOL(deactivate_super);
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 static int grab_super(struct super_block *s) __releases(sb_lock)
386 {
387 s->s_count++;
388 spin_unlock(&sb_lock);
389 down_write(&s->s_umount);
390 if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
391 put_super(s);
392 return 1;
393 }
394 up_write(&s->s_umount);
395 put_super(s);
396 return 0;
397 }
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416 bool trylock_super(struct super_block *sb)
417 {
418 if (down_read_trylock(&sb->s_umount)) {
419 if (!hlist_unhashed(&sb->s_instances) &&
420 sb->s_root && (sb->s_flags & SB_BORN))
421 return true;
422 up_read(&sb->s_umount);
423 }
424
425 return false;
426 }
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442 void generic_shutdown_super(struct super_block *sb)
443 {
444 const struct super_operations *sop = sb->s_op;
445
446 if (sb->s_root) {
447 shrink_dcache_for_umount(sb);
448 sync_filesystem(sb);
449 sb->s_flags &= ~SB_ACTIVE;
450
451 cgroup_writeback_umount();
452
453
454 evict_inodes(sb);
455
456 fsnotify_sb_delete(sb);
457
458 if (sb->s_dio_done_wq) {
459 destroy_workqueue(sb->s_dio_done_wq);
460 sb->s_dio_done_wq = NULL;
461 }
462
463 if (sop->put_super)
464 sop->put_super(sb);
465
466 if (!list_empty(&sb->s_inodes)) {
467 printk("VFS: Busy inodes after unmount of %s. "
468 "Self-destruct in 5 seconds. Have a nice day...\n",
469 sb->s_id);
470 }
471 }
472 spin_lock(&sb_lock);
473
474 hlist_del_init(&sb->s_instances);
475 spin_unlock(&sb_lock);
476 up_write(&sb->s_umount);
477 if (sb->s_bdi != &noop_backing_dev_info) {
478 bdi_put(sb->s_bdi);
479 sb->s_bdi = &noop_backing_dev_info;
480 }
481 }
482
483 EXPORT_SYMBOL(generic_shutdown_super);
484
485 bool mount_capable(struct fs_context *fc)
486 {
487 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
488 return capable(CAP_SYS_ADMIN);
489 else
490 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
491 }
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511 struct super_block *sget_fc(struct fs_context *fc,
512 int (*test)(struct super_block *, struct fs_context *),
513 int (*set)(struct super_block *, struct fs_context *))
514 {
515 struct super_block *s = NULL;
516 struct super_block *old;
517 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
518 int err;
519
520 retry:
521 spin_lock(&sb_lock);
522 if (test) {
523 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
524 if (test(old, fc))
525 goto share_extant_sb;
526 }
527 }
528 if (!s) {
529 spin_unlock(&sb_lock);
530 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
531 if (!s)
532 return ERR_PTR(-ENOMEM);
533 goto retry;
534 }
535
536 s->s_fs_info = fc->s_fs_info;
537 err = set(s, fc);
538 if (err) {
539 s->s_fs_info = NULL;
540 spin_unlock(&sb_lock);
541 destroy_unused_super(s);
542 return ERR_PTR(err);
543 }
544 fc->s_fs_info = NULL;
545 s->s_type = fc->fs_type;
546 s->s_iflags |= fc->s_iflags;
547 strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
548 list_add_tail(&s->s_list, &super_blocks);
549 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
550 spin_unlock(&sb_lock);
551 get_filesystem(s->s_type);
552 register_shrinker_prepared(&s->s_shrink);
553 return s;
554
555 share_extant_sb:
556 if (user_ns != old->s_user_ns) {
557 spin_unlock(&sb_lock);
558 destroy_unused_super(s);
559 return ERR_PTR(-EBUSY);
560 }
561 if (!grab_super(old))
562 goto retry;
563 destroy_unused_super(s);
564 return old;
565 }
566 EXPORT_SYMBOL(sget_fc);
567
568
569
570
571
572
573
574
575
576 struct super_block *sget(struct file_system_type *type,
577 int (*test)(struct super_block *,void *),
578 int (*set)(struct super_block *,void *),
579 int flags,
580 void *data)
581 {
582 struct user_namespace *user_ns = current_user_ns();
583 struct super_block *s = NULL;
584 struct super_block *old;
585 int err;
586
587
588
589
590
591 if (flags & SB_SUBMOUNT)
592 user_ns = &init_user_ns;
593
594 retry:
595 spin_lock(&sb_lock);
596 if (test) {
597 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
598 if (!test(old, data))
599 continue;
600 if (user_ns != old->s_user_ns) {
601 spin_unlock(&sb_lock);
602 destroy_unused_super(s);
603 return ERR_PTR(-EBUSY);
604 }
605 if (!grab_super(old))
606 goto retry;
607 destroy_unused_super(s);
608 return old;
609 }
610 }
611 if (!s) {
612 spin_unlock(&sb_lock);
613 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
614 if (!s)
615 return ERR_PTR(-ENOMEM);
616 goto retry;
617 }
618
619 err = set(s, data);
620 if (err) {
621 spin_unlock(&sb_lock);
622 destroy_unused_super(s);
623 return ERR_PTR(err);
624 }
625 s->s_type = type;
626 strlcpy(s->s_id, type->name, sizeof(s->s_id));
627 list_add_tail(&s->s_list, &super_blocks);
628 hlist_add_head(&s->s_instances, &type->fs_supers);
629 spin_unlock(&sb_lock);
630 get_filesystem(type);
631 register_shrinker_prepared(&s->s_shrink);
632 return s;
633 }
634 EXPORT_SYMBOL(sget);
635
636 void drop_super(struct super_block *sb)
637 {
638 up_read(&sb->s_umount);
639 put_super(sb);
640 }
641
642 EXPORT_SYMBOL(drop_super);
643
644 void drop_super_exclusive(struct super_block *sb)
645 {
646 up_write(&sb->s_umount);
647 put_super(sb);
648 }
649 EXPORT_SYMBOL(drop_super_exclusive);
650
651 static void __iterate_supers(void (*f)(struct super_block *))
652 {
653 struct super_block *sb, *p = NULL;
654
655 spin_lock(&sb_lock);
656 list_for_each_entry(sb, &super_blocks, s_list) {
657 if (hlist_unhashed(&sb->s_instances))
658 continue;
659 sb->s_count++;
660 spin_unlock(&sb_lock);
661
662 f(sb);
663
664 spin_lock(&sb_lock);
665 if (p)
666 __put_super(p);
667 p = sb;
668 }
669 if (p)
670 __put_super(p);
671 spin_unlock(&sb_lock);
672 }
673
674
675
676
677
678
679
680
681 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
682 {
683 struct super_block *sb, *p = NULL;
684
685 spin_lock(&sb_lock);
686 list_for_each_entry(sb, &super_blocks, s_list) {
687 if (hlist_unhashed(&sb->s_instances))
688 continue;
689 sb->s_count++;
690 spin_unlock(&sb_lock);
691
692 down_read(&sb->s_umount);
693 if (sb->s_root && (sb->s_flags & SB_BORN))
694 f(sb, arg);
695 up_read(&sb->s_umount);
696
697 spin_lock(&sb_lock);
698 if (p)
699 __put_super(p);
700 p = sb;
701 }
702 if (p)
703 __put_super(p);
704 spin_unlock(&sb_lock);
705 }
706
707
708
709
710
711
712
713
714
715
716 void iterate_supers_type(struct file_system_type *type,
717 void (*f)(struct super_block *, void *), void *arg)
718 {
719 struct super_block *sb, *p = NULL;
720
721 spin_lock(&sb_lock);
722 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
723 sb->s_count++;
724 spin_unlock(&sb_lock);
725
726 down_read(&sb->s_umount);
727 if (sb->s_root && (sb->s_flags & SB_BORN))
728 f(sb, arg);
729 up_read(&sb->s_umount);
730
731 spin_lock(&sb_lock);
732 if (p)
733 __put_super(p);
734 p = sb;
735 }
736 if (p)
737 __put_super(p);
738 spin_unlock(&sb_lock);
739 }
740
741 EXPORT_SYMBOL(iterate_supers_type);
742
743 static struct super_block *__get_super(struct block_device *bdev, bool excl)
744 {
745 struct super_block *sb;
746
747 if (!bdev)
748 return NULL;
749
750 spin_lock(&sb_lock);
751 rescan:
752 list_for_each_entry(sb, &super_blocks, s_list) {
753 if (hlist_unhashed(&sb->s_instances))
754 continue;
755 if (sb->s_bdev == bdev) {
756 sb->s_count++;
757 spin_unlock(&sb_lock);
758 if (!excl)
759 down_read(&sb->s_umount);
760 else
761 down_write(&sb->s_umount);
762
763 if (sb->s_root && (sb->s_flags & SB_BORN))
764 return sb;
765 if (!excl)
766 up_read(&sb->s_umount);
767 else
768 up_write(&sb->s_umount);
769
770 spin_lock(&sb_lock);
771 __put_super(sb);
772 goto rescan;
773 }
774 }
775 spin_unlock(&sb_lock);
776 return NULL;
777 }
778
779
780
781
782
783
784
785
786 struct super_block *get_super(struct block_device *bdev)
787 {
788 return __get_super(bdev, false);
789 }
790 EXPORT_SYMBOL(get_super);
791
792 static struct super_block *__get_super_thawed(struct block_device *bdev,
793 bool excl)
794 {
795 while (1) {
796 struct super_block *s = __get_super(bdev, excl);
797 if (!s || s->s_writers.frozen == SB_UNFROZEN)
798 return s;
799 if (!excl)
800 up_read(&s->s_umount);
801 else
802 up_write(&s->s_umount);
803 wait_event(s->s_writers.wait_unfrozen,
804 s->s_writers.frozen == SB_UNFROZEN);
805 put_super(s);
806 }
807 }
808
809
810
811
812
813
814
815
816
817
818 struct super_block *get_super_thawed(struct block_device *bdev)
819 {
820 return __get_super_thawed(bdev, false);
821 }
822 EXPORT_SYMBOL(get_super_thawed);
823
824
825
826
827
828
829
830
831
832
833 struct super_block *get_super_exclusive_thawed(struct block_device *bdev)
834 {
835 return __get_super_thawed(bdev, true);
836 }
837 EXPORT_SYMBOL(get_super_exclusive_thawed);
838
839
840
841
842
843
844
845
846
847 struct super_block *get_active_super(struct block_device *bdev)
848 {
849 struct super_block *sb;
850
851 if (!bdev)
852 return NULL;
853
854 restart:
855 spin_lock(&sb_lock);
856 list_for_each_entry(sb, &super_blocks, s_list) {
857 if (hlist_unhashed(&sb->s_instances))
858 continue;
859 if (sb->s_bdev == bdev) {
860 if (!grab_super(sb))
861 goto restart;
862 up_write(&sb->s_umount);
863 return sb;
864 }
865 }
866 spin_unlock(&sb_lock);
867 return NULL;
868 }
869
870 struct super_block *user_get_super(dev_t dev)
871 {
872 struct super_block *sb;
873
874 spin_lock(&sb_lock);
875 rescan:
876 list_for_each_entry(sb, &super_blocks, s_list) {
877 if (hlist_unhashed(&sb->s_instances))
878 continue;
879 if (sb->s_dev == dev) {
880 sb->s_count++;
881 spin_unlock(&sb_lock);
882 down_read(&sb->s_umount);
883
884 if (sb->s_root && (sb->s_flags & SB_BORN))
885 return sb;
886 up_read(&sb->s_umount);
887
888 spin_lock(&sb_lock);
889 __put_super(sb);
890 goto rescan;
891 }
892 }
893 spin_unlock(&sb_lock);
894 return NULL;
895 }
896
897
898
899
900
901
902
903 int reconfigure_super(struct fs_context *fc)
904 {
905 struct super_block *sb = fc->root->d_sb;
906 int retval;
907 bool remount_ro = false;
908 bool force = fc->sb_flags & SB_FORCE;
909
910 if (fc->sb_flags_mask & ~MS_RMT_MASK)
911 return -EINVAL;
912 if (sb->s_writers.frozen != SB_UNFROZEN)
913 return -EBUSY;
914
915 retval = security_sb_remount(sb, fc->security);
916 if (retval)
917 return retval;
918
919 if (fc->sb_flags_mask & SB_RDONLY) {
920 #ifdef CONFIG_BLOCK
921 if (!(fc->sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev))
922 return -EACCES;
923 #endif
924
925 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
926 }
927
928 if (remount_ro) {
929 if (!hlist_empty(&sb->s_pins)) {
930 up_write(&sb->s_umount);
931 group_pin_kill(&sb->s_pins);
932 down_write(&sb->s_umount);
933 if (!sb->s_root)
934 return 0;
935 if (sb->s_writers.frozen != SB_UNFROZEN)
936 return -EBUSY;
937 remount_ro = !sb_rdonly(sb);
938 }
939 }
940 shrink_dcache_sb(sb);
941
942
943
944
945 if (remount_ro) {
946 if (force) {
947 sb->s_readonly_remount = 1;
948 smp_wmb();
949 } else {
950 retval = sb_prepare_remount_readonly(sb);
951 if (retval)
952 return retval;
953 }
954 }
955
956 if (fc->ops->reconfigure) {
957 retval = fc->ops->reconfigure(fc);
958 if (retval) {
959 if (!force)
960 goto cancel_readonly;
961
962 WARN(1, "forced remount of a %s fs returned %i\n",
963 sb->s_type->name, retval);
964 }
965 }
966
967 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
968 (fc->sb_flags & fc->sb_flags_mask)));
969
970 smp_wmb();
971 sb->s_readonly_remount = 0;
972
973
974
975
976
977
978
979
980
981 if (remount_ro && sb->s_bdev)
982 invalidate_bdev(sb->s_bdev);
983 return 0;
984
985 cancel_readonly:
986 sb->s_readonly_remount = 0;
987 return retval;
988 }
989
990 static void do_emergency_remount_callback(struct super_block *sb)
991 {
992 down_write(&sb->s_umount);
993 if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
994 !sb_rdonly(sb)) {
995 struct fs_context *fc;
996
997 fc = fs_context_for_reconfigure(sb->s_root,
998 SB_RDONLY | SB_FORCE, SB_RDONLY);
999 if (!IS_ERR(fc)) {
1000 if (parse_monolithic_mount_data(fc, NULL) == 0)
1001 (void)reconfigure_super(fc);
1002 put_fs_context(fc);
1003 }
1004 }
1005 up_write(&sb->s_umount);
1006 }
1007
1008 static void do_emergency_remount(struct work_struct *work)
1009 {
1010 __iterate_supers(do_emergency_remount_callback);
1011 kfree(work);
1012 printk("Emergency Remount complete\n");
1013 }
1014
1015 void emergency_remount(void)
1016 {
1017 struct work_struct *work;
1018
1019 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1020 if (work) {
1021 INIT_WORK(work, do_emergency_remount);
1022 schedule_work(work);
1023 }
1024 }
1025
1026 static void do_thaw_all_callback(struct super_block *sb)
1027 {
1028 down_write(&sb->s_umount);
1029 if (sb->s_root && sb->s_flags & SB_BORN) {
1030 emergency_thaw_bdev(sb);
1031 thaw_super_locked(sb);
1032 } else {
1033 up_write(&sb->s_umount);
1034 }
1035 }
1036
1037 static void do_thaw_all(struct work_struct *work)
1038 {
1039 __iterate_supers(do_thaw_all_callback);
1040 kfree(work);
1041 printk(KERN_WARNING "Emergency Thaw complete\n");
1042 }
1043
1044
1045
1046
1047
1048
1049 void emergency_thaw_all(void)
1050 {
1051 struct work_struct *work;
1052
1053 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1054 if (work) {
1055 INIT_WORK(work, do_thaw_all);
1056 schedule_work(work);
1057 }
1058 }
1059
1060 static DEFINE_IDA(unnamed_dev_ida);
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 int get_anon_bdev(dev_t *p)
1074 {
1075 int dev;
1076
1077
1078
1079
1080
1081 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1082 GFP_ATOMIC);
1083 if (dev == -ENOSPC)
1084 dev = -EMFILE;
1085 if (dev < 0)
1086 return dev;
1087
1088 *p = MKDEV(0, dev);
1089 return 0;
1090 }
1091 EXPORT_SYMBOL(get_anon_bdev);
1092
1093 void free_anon_bdev(dev_t dev)
1094 {
1095 ida_free(&unnamed_dev_ida, MINOR(dev));
1096 }
1097 EXPORT_SYMBOL(free_anon_bdev);
1098
1099 int set_anon_super(struct super_block *s, void *data)
1100 {
1101 return get_anon_bdev(&s->s_dev);
1102 }
1103 EXPORT_SYMBOL(set_anon_super);
1104
1105 void kill_anon_super(struct super_block *sb)
1106 {
1107 dev_t dev = sb->s_dev;
1108 generic_shutdown_super(sb);
1109 free_anon_bdev(dev);
1110 }
1111 EXPORT_SYMBOL(kill_anon_super);
1112
1113 void kill_litter_super(struct super_block *sb)
1114 {
1115 if (sb->s_root)
1116 d_genocide(sb->s_root);
1117 kill_anon_super(sb);
1118 }
1119 EXPORT_SYMBOL(kill_litter_super);
1120
1121 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1122 {
1123 return set_anon_super(sb, NULL);
1124 }
1125 EXPORT_SYMBOL(set_anon_super_fc);
1126
1127 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1128 {
1129 return sb->s_fs_info == fc->s_fs_info;
1130 }
1131
1132 static int test_single_super(struct super_block *s, struct fs_context *fc)
1133 {
1134 return 1;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 int vfs_get_super(struct fs_context *fc,
1163 enum vfs_get_super_keying keying,
1164 int (*fill_super)(struct super_block *sb,
1165 struct fs_context *fc))
1166 {
1167 int (*test)(struct super_block *, struct fs_context *);
1168 struct super_block *sb;
1169 int err;
1170
1171 switch (keying) {
1172 case vfs_get_single_super:
1173 case vfs_get_single_reconf_super:
1174 test = test_single_super;
1175 break;
1176 case vfs_get_keyed_super:
1177 test = test_keyed_super;
1178 break;
1179 case vfs_get_independent_super:
1180 test = NULL;
1181 break;
1182 default:
1183 BUG();
1184 }
1185
1186 sb = sget_fc(fc, test, set_anon_super_fc);
1187 if (IS_ERR(sb))
1188 return PTR_ERR(sb);
1189
1190 if (!sb->s_root) {
1191 err = fill_super(sb, fc);
1192 if (err)
1193 goto error;
1194
1195 sb->s_flags |= SB_ACTIVE;
1196 fc->root = dget(sb->s_root);
1197 } else {
1198 fc->root = dget(sb->s_root);
1199 if (keying == vfs_get_single_reconf_super) {
1200 err = reconfigure_super(fc);
1201 if (err < 0) {
1202 dput(fc->root);
1203 fc->root = NULL;
1204 goto error;
1205 }
1206 }
1207 }
1208
1209 return 0;
1210
1211 error:
1212 deactivate_locked_super(sb);
1213 return err;
1214 }
1215 EXPORT_SYMBOL(vfs_get_super);
1216
1217 int get_tree_nodev(struct fs_context *fc,
1218 int (*fill_super)(struct super_block *sb,
1219 struct fs_context *fc))
1220 {
1221 return vfs_get_super(fc, vfs_get_independent_super, fill_super);
1222 }
1223 EXPORT_SYMBOL(get_tree_nodev);
1224
1225 int get_tree_single(struct fs_context *fc,
1226 int (*fill_super)(struct super_block *sb,
1227 struct fs_context *fc))
1228 {
1229 return vfs_get_super(fc, vfs_get_single_super, fill_super);
1230 }
1231 EXPORT_SYMBOL(get_tree_single);
1232
1233 int get_tree_single_reconf(struct fs_context *fc,
1234 int (*fill_super)(struct super_block *sb,
1235 struct fs_context *fc))
1236 {
1237 return vfs_get_super(fc, vfs_get_single_reconf_super, fill_super);
1238 }
1239 EXPORT_SYMBOL(get_tree_single_reconf);
1240
1241 int get_tree_keyed(struct fs_context *fc,
1242 int (*fill_super)(struct super_block *sb,
1243 struct fs_context *fc),
1244 void *key)
1245 {
1246 fc->s_fs_info = key;
1247 return vfs_get_super(fc, vfs_get_keyed_super, fill_super);
1248 }
1249 EXPORT_SYMBOL(get_tree_keyed);
1250
1251 #ifdef CONFIG_BLOCK
1252
1253 static int set_bdev_super(struct super_block *s, void *data)
1254 {
1255 s->s_bdev = data;
1256 s->s_dev = s->s_bdev->bd_dev;
1257 s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
1258
1259 return 0;
1260 }
1261
1262 static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
1263 {
1264 return set_bdev_super(s, fc->sget_key);
1265 }
1266
1267 static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
1268 {
1269 return s->s_bdev == fc->sget_key;
1270 }
1271
1272
1273
1274
1275
1276
1277 int get_tree_bdev(struct fs_context *fc,
1278 int (*fill_super)(struct super_block *,
1279 struct fs_context *))
1280 {
1281 struct block_device *bdev;
1282 struct super_block *s;
1283 fmode_t mode = FMODE_READ | FMODE_EXCL;
1284 int error = 0;
1285
1286 if (!(fc->sb_flags & SB_RDONLY))
1287 mode |= FMODE_WRITE;
1288
1289 if (!fc->source)
1290 return invalf(fc, "No source specified");
1291
1292 bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
1293 if (IS_ERR(bdev)) {
1294 errorf(fc, "%s: Can't open blockdev", fc->source);
1295 return PTR_ERR(bdev);
1296 }
1297
1298
1299
1300
1301
1302 mutex_lock(&bdev->bd_fsfreeze_mutex);
1303 if (bdev->bd_fsfreeze_count > 0) {
1304 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1305 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1306 blkdev_put(bdev, mode);
1307 return -EBUSY;
1308 }
1309
1310 fc->sb_flags |= SB_NOSEC;
1311 fc->sget_key = bdev;
1312 s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
1313 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1314 if (IS_ERR(s)) {
1315 blkdev_put(bdev, mode);
1316 return PTR_ERR(s);
1317 }
1318
1319 if (s->s_root) {
1320
1321 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1322 warnf(fc, "%pg: Can't mount, would change RO state", bdev);
1323 deactivate_locked_super(s);
1324 blkdev_put(bdev, mode);
1325 return -EBUSY;
1326 }
1327
1328
1329
1330
1331
1332
1333
1334
1335 up_write(&s->s_umount);
1336 blkdev_put(bdev, mode);
1337 down_write(&s->s_umount);
1338 } else {
1339 s->s_mode = mode;
1340 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1341 sb_set_blocksize(s, block_size(bdev));
1342 error = fill_super(s, fc);
1343 if (error) {
1344 deactivate_locked_super(s);
1345 return error;
1346 }
1347
1348 s->s_flags |= SB_ACTIVE;
1349 bdev->bd_super = s;
1350 }
1351
1352 BUG_ON(fc->root);
1353 fc->root = dget(s->s_root);
1354 return 0;
1355 }
1356 EXPORT_SYMBOL(get_tree_bdev);
1357
1358 static int test_bdev_super(struct super_block *s, void *data)
1359 {
1360 return (void *)s->s_bdev == data;
1361 }
1362
1363 struct dentry *mount_bdev(struct file_system_type *fs_type,
1364 int flags, const char *dev_name, void *data,
1365 int (*fill_super)(struct super_block *, void *, int))
1366 {
1367 struct block_device *bdev;
1368 struct super_block *s;
1369 fmode_t mode = FMODE_READ | FMODE_EXCL;
1370 int error = 0;
1371
1372 if (!(flags & SB_RDONLY))
1373 mode |= FMODE_WRITE;
1374
1375 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1376 if (IS_ERR(bdev))
1377 return ERR_CAST(bdev);
1378
1379
1380
1381
1382
1383
1384 mutex_lock(&bdev->bd_fsfreeze_mutex);
1385 if (bdev->bd_fsfreeze_count > 0) {
1386 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1387 error = -EBUSY;
1388 goto error_bdev;
1389 }
1390 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
1391 bdev);
1392 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1393 if (IS_ERR(s))
1394 goto error_s;
1395
1396 if (s->s_root) {
1397 if ((flags ^ s->s_flags) & SB_RDONLY) {
1398 deactivate_locked_super(s);
1399 error = -EBUSY;
1400 goto error_bdev;
1401 }
1402
1403
1404
1405
1406
1407
1408
1409
1410 up_write(&s->s_umount);
1411 blkdev_put(bdev, mode);
1412 down_write(&s->s_umount);
1413 } else {
1414 s->s_mode = mode;
1415 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1416 sb_set_blocksize(s, block_size(bdev));
1417 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1418 if (error) {
1419 deactivate_locked_super(s);
1420 goto error;
1421 }
1422
1423 s->s_flags |= SB_ACTIVE;
1424 bdev->bd_super = s;
1425 }
1426
1427 return dget(s->s_root);
1428
1429 error_s:
1430 error = PTR_ERR(s);
1431 error_bdev:
1432 blkdev_put(bdev, mode);
1433 error:
1434 return ERR_PTR(error);
1435 }
1436 EXPORT_SYMBOL(mount_bdev);
1437
1438 void kill_block_super(struct super_block *sb)
1439 {
1440 struct block_device *bdev = sb->s_bdev;
1441 fmode_t mode = sb->s_mode;
1442
1443 bdev->bd_super = NULL;
1444 generic_shutdown_super(sb);
1445 sync_blockdev(bdev);
1446 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1447 blkdev_put(bdev, mode | FMODE_EXCL);
1448 }
1449
1450 EXPORT_SYMBOL(kill_block_super);
1451 #endif
1452
1453 struct dentry *mount_nodev(struct file_system_type *fs_type,
1454 int flags, void *data,
1455 int (*fill_super)(struct super_block *, void *, int))
1456 {
1457 int error;
1458 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1459
1460 if (IS_ERR(s))
1461 return ERR_CAST(s);
1462
1463 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1464 if (error) {
1465 deactivate_locked_super(s);
1466 return ERR_PTR(error);
1467 }
1468 s->s_flags |= SB_ACTIVE;
1469 return dget(s->s_root);
1470 }
1471 EXPORT_SYMBOL(mount_nodev);
1472
1473 static int reconfigure_single(struct super_block *s,
1474 int flags, void *data)
1475 {
1476 struct fs_context *fc;
1477 int ret;
1478
1479
1480
1481
1482
1483
1484 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1485 if (IS_ERR(fc))
1486 return PTR_ERR(fc);
1487
1488 ret = parse_monolithic_mount_data(fc, data);
1489 if (ret < 0)
1490 goto out;
1491
1492 ret = reconfigure_super(fc);
1493 out:
1494 put_fs_context(fc);
1495 return ret;
1496 }
1497
1498 static int compare_single(struct super_block *s, void *p)
1499 {
1500 return 1;
1501 }
1502
1503 struct dentry *mount_single(struct file_system_type *fs_type,
1504 int flags, void *data,
1505 int (*fill_super)(struct super_block *, void *, int))
1506 {
1507 struct super_block *s;
1508 int error;
1509
1510 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1511 if (IS_ERR(s))
1512 return ERR_CAST(s);
1513 if (!s->s_root) {
1514 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1515 if (!error)
1516 s->s_flags |= SB_ACTIVE;
1517 } else {
1518 error = reconfigure_single(s, flags, data);
1519 }
1520 if (unlikely(error)) {
1521 deactivate_locked_super(s);
1522 return ERR_PTR(error);
1523 }
1524 return dget(s->s_root);
1525 }
1526 EXPORT_SYMBOL(mount_single);
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 int vfs_get_tree(struct fs_context *fc)
1537 {
1538 struct super_block *sb;
1539 int error;
1540
1541 if (fc->root)
1542 return -EBUSY;
1543
1544
1545
1546
1547 error = fc->ops->get_tree(fc);
1548 if (error < 0)
1549 return error;
1550
1551 if (!fc->root) {
1552 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1553 fc->fs_type->name);
1554
1555
1556
1557 BUG();
1558 }
1559
1560 sb = fc->root->d_sb;
1561 WARN_ON(!sb->s_bdi);
1562
1563
1564
1565
1566
1567
1568
1569 smp_wmb();
1570 sb->s_flags |= SB_BORN;
1571
1572 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1573 if (unlikely(error)) {
1574 fc_drop_locked(fc);
1575 return error;
1576 }
1577
1578
1579
1580
1581
1582
1583
1584 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1585 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1586
1587 return 0;
1588 }
1589 EXPORT_SYMBOL(vfs_get_tree);
1590
1591
1592
1593
1594
1595 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1596 {
1597 struct backing_dev_info *bdi;
1598 int err;
1599 va_list args;
1600
1601 bdi = bdi_alloc(GFP_KERNEL);
1602 if (!bdi)
1603 return -ENOMEM;
1604
1605 bdi->name = sb->s_type->name;
1606
1607 va_start(args, fmt);
1608 err = bdi_register_va(bdi, fmt, args);
1609 va_end(args);
1610 if (err) {
1611 bdi_put(bdi);
1612 return err;
1613 }
1614 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1615 sb->s_bdi = bdi;
1616
1617 return 0;
1618 }
1619 EXPORT_SYMBOL(super_setup_bdi_name);
1620
1621
1622
1623
1624
1625 int super_setup_bdi(struct super_block *sb)
1626 {
1627 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1628
1629 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1630 atomic_long_inc_return(&bdi_seq));
1631 }
1632 EXPORT_SYMBOL(super_setup_bdi);
1633
1634
1635
1636
1637
1638 void __sb_end_write(struct super_block *sb, int level)
1639 {
1640 percpu_up_read(sb->s_writers.rw_sem + level-1);
1641 }
1642 EXPORT_SYMBOL(__sb_end_write);
1643
1644
1645
1646
1647
1648 int __sb_start_write(struct super_block *sb, int level, bool wait)
1649 {
1650 bool force_trylock = false;
1651 int ret = 1;
1652
1653 #ifdef CONFIG_LOCKDEP
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 if (wait) {
1664 int i;
1665
1666 for (i = 0; i < level - 1; i++)
1667 if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) {
1668 force_trylock = true;
1669 break;
1670 }
1671 }
1672 #endif
1673 if (wait && !force_trylock)
1674 percpu_down_read(sb->s_writers.rw_sem + level-1);
1675 else
1676 ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
1677
1678 WARN_ON(force_trylock && !ret);
1679 return ret;
1680 }
1681 EXPORT_SYMBOL(__sb_start_write);
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691 static void sb_wait_write(struct super_block *sb, int level)
1692 {
1693 percpu_down_write(sb->s_writers.rw_sem + level-1);
1694 }
1695
1696
1697
1698
1699
1700 static void lockdep_sb_freeze_release(struct super_block *sb)
1701 {
1702 int level;
1703
1704 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1705 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1706 }
1707
1708
1709
1710
1711 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1712 {
1713 int level;
1714
1715 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1716 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1717 }
1718
1719 static void sb_freeze_unlock(struct super_block *sb)
1720 {
1721 int level;
1722
1723 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1724 percpu_up_write(sb->s_writers.rw_sem + level);
1725 }
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 int freeze_super(struct super_block *sb)
1761 {
1762 int ret;
1763
1764 atomic_inc(&sb->s_active);
1765 down_write(&sb->s_umount);
1766 if (sb->s_writers.frozen != SB_UNFROZEN) {
1767 deactivate_locked_super(sb);
1768 return -EBUSY;
1769 }
1770
1771 if (!(sb->s_flags & SB_BORN)) {
1772 up_write(&sb->s_umount);
1773 return 0;
1774 }
1775
1776 if (sb_rdonly(sb)) {
1777
1778 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1779 up_write(&sb->s_umount);
1780 return 0;
1781 }
1782
1783 sb->s_writers.frozen = SB_FREEZE_WRITE;
1784
1785 up_write(&sb->s_umount);
1786 sb_wait_write(sb, SB_FREEZE_WRITE);
1787 down_write(&sb->s_umount);
1788
1789
1790 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1791 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1792
1793
1794 sync_filesystem(sb);
1795
1796
1797 sb->s_writers.frozen = SB_FREEZE_FS;
1798 sb_wait_write(sb, SB_FREEZE_FS);
1799
1800 if (sb->s_op->freeze_fs) {
1801 ret = sb->s_op->freeze_fs(sb);
1802 if (ret) {
1803 printk(KERN_ERR
1804 "VFS:Filesystem freeze failed\n");
1805 sb->s_writers.frozen = SB_UNFROZEN;
1806 sb_freeze_unlock(sb);
1807 wake_up(&sb->s_writers.wait_unfrozen);
1808 deactivate_locked_super(sb);
1809 return ret;
1810 }
1811 }
1812
1813
1814
1815
1816 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1817 lockdep_sb_freeze_release(sb);
1818 up_write(&sb->s_umount);
1819 return 0;
1820 }
1821 EXPORT_SYMBOL(freeze_super);
1822
1823
1824
1825
1826
1827
1828
1829 static int thaw_super_locked(struct super_block *sb)
1830 {
1831 int error;
1832
1833 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
1834 up_write(&sb->s_umount);
1835 return -EINVAL;
1836 }
1837
1838 if (sb_rdonly(sb)) {
1839 sb->s_writers.frozen = SB_UNFROZEN;
1840 goto out;
1841 }
1842
1843 lockdep_sb_freeze_acquire(sb);
1844
1845 if (sb->s_op->unfreeze_fs) {
1846 error = sb->s_op->unfreeze_fs(sb);
1847 if (error) {
1848 printk(KERN_ERR
1849 "VFS:Filesystem thaw failed\n");
1850 lockdep_sb_freeze_release(sb);
1851 up_write(&sb->s_umount);
1852 return error;
1853 }
1854 }
1855
1856 sb->s_writers.frozen = SB_UNFROZEN;
1857 sb_freeze_unlock(sb);
1858 out:
1859 wake_up(&sb->s_writers.wait_unfrozen);
1860 deactivate_locked_super(sb);
1861 return 0;
1862 }
1863
1864 int thaw_super(struct super_block *sb)
1865 {
1866 down_write(&sb->s_umount);
1867 return thaw_super_locked(sb);
1868 }
1869 EXPORT_SYMBOL(thaw_super);