Lines Matching refs:sdp

297 void gfs2_jindex_free(struct gfs2_sbd *sdp)  in gfs2_jindex_free()  argument
302 spin_lock(&sdp->sd_jindex_spin); in gfs2_jindex_free()
303 list_add(&list, &sdp->sd_jindex_list); in gfs2_jindex_free()
304 list_del_init(&sdp->sd_jindex_list); in gfs2_jindex_free()
305 sdp->sd_journals = 0; in gfs2_jindex_free()
306 spin_unlock(&sdp->sd_jindex_spin); in gfs2_jindex_free()
335 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) in gfs2_jdesc_find() argument
339 spin_lock(&sdp->sd_jindex_spin); in gfs2_jdesc_find()
340 jd = jdesc_find_i(&sdp->sd_jindex_list, jid); in gfs2_jdesc_find()
341 spin_unlock(&sdp->sd_jindex_spin); in gfs2_jdesc_find()
349 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); in gfs2_jdesc_check() local
355 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift; in gfs2_jdesc_check()
365 static int init_threads(struct gfs2_sbd *sdp) in init_threads() argument
370 p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); in init_threads()
373 fs_err(sdp, "can't start logd thread: %d\n", error); in init_threads()
376 sdp->sd_logd_process = p; in init_threads()
378 p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); in init_threads()
381 fs_err(sdp, "can't start quotad thread: %d\n", error); in init_threads()
384 sdp->sd_quotad_process = p; in init_threads()
388 kthread_stop(sdp->sd_logd_process); in init_threads()
399 int gfs2_make_fs_rw(struct gfs2_sbd *sdp) in gfs2_make_fs_rw() argument
401 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); in gfs2_make_fs_rw()
407 error = init_threads(sdp); in gfs2_make_fs_rw()
411 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0, in gfs2_make_fs_rw()
418 error = gfs2_find_jhead(sdp->sd_jdesc, &head); in gfs2_make_fs_rw()
423 gfs2_consist(sdp); in gfs2_make_fs_rw()
429 sdp->sd_log_sequence = head.lh_sequence + 1; in gfs2_make_fs_rw()
430 gfs2_log_pointers_init(sdp, head.lh_blkno); in gfs2_make_fs_rw()
432 error = gfs2_quota_init(sdp); in gfs2_make_fs_rw()
436 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); in gfs2_make_fs_rw()
446 kthread_stop(sdp->sd_quotad_process); in gfs2_make_fs_rw()
447 kthread_stop(sdp->sd_logd_process); in gfs2_make_fs_rw()
469 int gfs2_statfs_init(struct gfs2_sbd *sdp) in gfs2_statfs_init() argument
471 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); in gfs2_statfs_init()
472 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; in gfs2_statfs_init()
473 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); in gfs2_statfs_init()
474 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; in gfs2_statfs_init()
488 if (sdp->sd_args.ar_spectator) { in gfs2_statfs_init()
489 spin_lock(&sdp->sd_statfs_spin); in gfs2_statfs_init()
492 spin_unlock(&sdp->sd_statfs_spin); in gfs2_statfs_init()
498 spin_lock(&sdp->sd_statfs_spin); in gfs2_statfs_init()
503 spin_unlock(&sdp->sd_statfs_spin); in gfs2_statfs_init()
515 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, in gfs2_statfs_change() argument
518 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); in gfs2_statfs_change()
519 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; in gfs2_statfs_change()
520 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; in gfs2_statfs_change()
532 spin_lock(&sdp->sd_statfs_spin); in gfs2_statfs_change()
537 if (sdp->sd_args.ar_statfs_percent) { in gfs2_statfs_change()
539 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent; in gfs2_statfs_change()
543 spin_unlock(&sdp->sd_statfs_spin); in gfs2_statfs_change()
547 gfs2_wake_up_statfs(sdp); in gfs2_statfs_change()
550 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, in update_statfs() argument
553 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); in update_statfs()
554 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); in update_statfs()
555 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; in update_statfs()
556 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; in update_statfs()
560 spin_lock(&sdp->sd_statfs_spin); in update_statfs()
567 spin_unlock(&sdp->sd_statfs_spin); in update_statfs()
575 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_statfs_sync() local
576 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); in gfs2_statfs_sync()
577 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); in gfs2_statfs_sync()
578 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; in gfs2_statfs_sync()
579 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; in gfs2_statfs_sync()
594 spin_lock(&sdp->sd_statfs_spin); in gfs2_statfs_sync()
598 spin_unlock(&sdp->sd_statfs_spin); in gfs2_statfs_sync()
601 spin_unlock(&sdp->sd_statfs_spin); in gfs2_statfs_sync()
607 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); in gfs2_statfs_sync()
611 update_statfs(sdp, m_bh, l_bh); in gfs2_statfs_sync()
612 sdp->sd_statfs_force_sync = 0; in gfs2_statfs_sync()
614 gfs2_trans_end(sdp); in gfs2_statfs_sync()
642 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp, in gfs2_lock_fs_check_clean() argument
652 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { in gfs2_lock_fs_check_clean()
667 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE, in gfs2_lock_fs_check_clean()
670 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { in gfs2_lock_fs_check_clean()
744 struct gfs2_sbd *sdp = GFS2_SB(inode); in gfs2_write_inode() local
752 gfs2_ail1_flush(sdp, wbc); in gfs2_write_inode()
778 struct gfs2_sbd *sdp = GFS2_SB(inode); in gfs2_dirty_inode() local
791 fs_err(sdp, "dirty_inode: glock %d\n", ret); in gfs2_dirty_inode()
799 ret = gfs2_trans_begin(sdp, RES_DINODE, 0); in gfs2_dirty_inode()
801 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret); in gfs2_dirty_inode()
815 gfs2_trans_end(sdp); in gfs2_dirty_inode()
828 static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) in gfs2_make_fs_ro() argument
833 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE, in gfs2_make_fs_ro()
835 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) in gfs2_make_fs_ro()
838 kthread_stop(sdp->sd_quotad_process); in gfs2_make_fs_ro()
839 kthread_stop(sdp->sd_logd_process); in gfs2_make_fs_ro()
842 gfs2_quota_sync(sdp->sd_vfs, 0); in gfs2_make_fs_ro()
843 gfs2_statfs_sync(sdp->sd_vfs, 0); in gfs2_make_fs_ro()
845 down_write(&sdp->sd_log_flush_lock); in gfs2_make_fs_ro()
846 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); in gfs2_make_fs_ro()
847 up_write(&sdp->sd_log_flush_lock); in gfs2_make_fs_ro()
849 gfs2_log_flush(sdp, NULL, SHUTDOWN_FLUSH); in gfs2_make_fs_ro()
850 wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0); in gfs2_make_fs_ro()
851 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks); in gfs2_make_fs_ro()
856 gfs2_quota_cleanup(sdp); in gfs2_make_fs_ro()
869 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_put_super() local
874 set_bit(SDF_NORECOVERY, &sdp->sd_flags); in gfs2_put_super()
879 spin_lock(&sdp->sd_jindex_spin); in gfs2_put_super()
880 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { in gfs2_put_super()
883 spin_unlock(&sdp->sd_jindex_spin); in gfs2_put_super()
888 spin_unlock(&sdp->sd_jindex_spin); in gfs2_put_super()
891 error = gfs2_make_fs_ro(sdp); in gfs2_put_super()
893 gfs2_io_error(sdp); in gfs2_put_super()
899 iput(sdp->sd_jindex); in gfs2_put_super()
900 iput(sdp->sd_statfs_inode); in gfs2_put_super()
901 iput(sdp->sd_rindex); in gfs2_put_super()
902 iput(sdp->sd_quota_inode); in gfs2_put_super()
904 gfs2_glock_put(sdp->sd_rename_gl); in gfs2_put_super()
905 gfs2_glock_put(sdp->sd_freeze_gl); in gfs2_put_super()
907 if (!sdp->sd_args.ar_spectator) { in gfs2_put_super()
908 gfs2_glock_dq_uninit(&sdp->sd_journal_gh); in gfs2_put_super()
909 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); in gfs2_put_super()
910 gfs2_glock_dq_uninit(&sdp->sd_sc_gh); in gfs2_put_super()
911 gfs2_glock_dq_uninit(&sdp->sd_qc_gh); in gfs2_put_super()
912 iput(sdp->sd_sc_inode); in gfs2_put_super()
913 iput(sdp->sd_qc_inode); in gfs2_put_super()
916 gfs2_glock_dq_uninit(&sdp->sd_live_gh); in gfs2_put_super()
917 gfs2_clear_rgrpd(sdp); in gfs2_put_super()
918 gfs2_jindex_free(sdp); in gfs2_put_super()
920 gfs2_gl_hash_clear(sdp); in gfs2_put_super()
922 gfs2_lm_unmount(sdp); in gfs2_put_super()
925 gfs2_sys_fs_del(sdp); in gfs2_put_super()
937 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_sync_fs() local
940 if (wait && sdp) in gfs2_sync_fs()
941 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); in gfs2_sync_fs()
949 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work); in gfs2_freeze_func() local
950 struct super_block *sb = sdp->sd_vfs; in gfs2_freeze_func()
953 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0, in gfs2_freeze_func()
957 gfs2_assert_withdraw(sdp, 0); in gfs2_freeze_func()
960 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); in gfs2_freeze_func()
965 gfs2_assert_withdraw(sdp, 0); in gfs2_freeze_func()
967 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) in gfs2_freeze_func()
983 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_freeze() local
986 mutex_lock(&sdp->sd_freeze_mutex); in gfs2_freeze()
987 if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) in gfs2_freeze()
990 if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { in gfs2_freeze()
996 error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh); in gfs2_freeze()
1002 fs_err(sdp, "waiting for recovery before freeze\n"); in gfs2_freeze()
1006 fs_err(sdp, "error freezing FS: %d\n", error); in gfs2_freeze()
1010 fs_err(sdp, "retrying...\n"); in gfs2_freeze()
1015 mutex_unlock(&sdp->sd_freeze_mutex); in gfs2_freeze()
1027 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_unfreeze() local
1029 mutex_lock(&sdp->sd_freeze_mutex); in gfs2_unfreeze()
1030 if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || in gfs2_unfreeze()
1031 sdp->sd_freeze_gh.gh_gl == NULL) { in gfs2_unfreeze()
1032 mutex_unlock(&sdp->sd_freeze_mutex); in gfs2_unfreeze()
1036 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); in gfs2_unfreeze()
1037 mutex_unlock(&sdp->sd_freeze_mutex); in gfs2_unfreeze()
1072 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) in gfs2_statfs_slow() argument
1086 rgd_next = gfs2_rgrpd_get_first(sdp); in gfs2_statfs_slow()
1140 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) in gfs2_statfs_i() argument
1142 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; in gfs2_statfs_i()
1143 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; in gfs2_statfs_i()
1145 spin_lock(&sdp->sd_statfs_spin); in gfs2_statfs_i()
1152 spin_unlock(&sdp->sd_statfs_spin); in gfs2_statfs_i()
1175 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_statfs() local
1179 error = gfs2_rindex_update(sdp); in gfs2_statfs()
1183 if (gfs2_tune_get(sdp, gt_statfs_slow)) in gfs2_statfs()
1184 error = gfs2_statfs_slow(sdp, &sc); in gfs2_statfs()
1186 error = gfs2_statfs_i(sdp, &sc); in gfs2_statfs()
1192 buf->f_bsize = sdp->sd_sb.sb_bsize; in gfs2_statfs()
1214 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_remount_fs() local
1215 struct gfs2_args args = sdp->sd_args; /* Default to current settings */ in gfs2_remount_fs()
1216 struct gfs2_tune *gt = &sdp->sd_tune; in gfs2_remount_fs()
1234 if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) || in gfs2_remount_fs()
1235 strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) || in gfs2_remount_fs()
1236 strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata)) in gfs2_remount_fs()
1240 if (args_neq(&args, &sdp->sd_args, spectator) || in gfs2_remount_fs()
1241 args_neq(&args, &sdp->sd_args, localflocks) || in gfs2_remount_fs()
1242 args_neq(&args, &sdp->sd_args, meta)) in gfs2_remount_fs()
1245 if (sdp->sd_args.ar_spectator) in gfs2_remount_fs()
1250 error = gfs2_make_fs_ro(sdp); in gfs2_remount_fs()
1252 error = gfs2_make_fs_rw(sdp); in gfs2_remount_fs()
1257 sdp->sd_args = args; in gfs2_remount_fs()
1258 if (sdp->sd_args.ar_posix_acl) in gfs2_remount_fs()
1262 if (sdp->sd_args.ar_nobarrier) in gfs2_remount_fs()
1263 set_bit(SDF_NOBARRIERS, &sdp->sd_flags); in gfs2_remount_fs()
1265 clear_bit(SDF_NOBARRIERS, &sdp->sd_flags); in gfs2_remount_fs()
1279 gfs2_online_uevent(sdp); in gfs2_remount_fs()
1330 struct gfs2_sbd *sdp = root->d_sb->s_fs_info; in gfs2_show_options() local
1331 struct gfs2_args *args = &sdp->sd_args; in gfs2_show_options()
1334 if (is_ancestor(root, sdp->sd_master_dir)) in gfs2_show_options()
1387 val = sdp->sd_tune.gt_logd_secs; in gfs2_show_options()
1390 val = sdp->sd_tune.gt_statfs_quantum; in gfs2_show_options()
1393 else if (sdp->sd_tune.gt_statfs_slow) in gfs2_show_options()
1395 val = sdp->sd_tune.gt_quota_quantum; in gfs2_show_options()
1416 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) in gfs2_show_options()
1418 if (test_bit(SDF_DEMOTE, &sdp->sd_flags)) in gfs2_show_options()
1441 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_dinode_dealloc() local
1451 error = gfs2_rindex_update(sdp); in gfs2_dinode_dealloc()
1459 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); in gfs2_dinode_dealloc()
1470 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, in gfs2_dinode_dealloc()
1471 sdp->sd_jdesc->jd_blocks); in gfs2_dinode_dealloc()
1479 gfs2_trans_end(sdp); in gfs2_dinode_dealloc()
1512 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_evict_inode() local
1534 error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); in gfs2_evict_inode()
1577 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); in gfs2_evict_inode()
1587 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); in gfs2_evict_inode()
1592 gfs2_trans_end(sdp); in gfs2_evict_inode()
1606 fs_warn(sdp, "gfs2_evict_inode: %d\n", error); in gfs2_evict_inode()