Lines Matching refs:gl
33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) in gfs2_ail_error() argument
35 fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", in gfs2_ail_error()
38 fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", in gfs2_ail_error()
39 gl->gl_name.ln_type, gl->gl_name.ln_number, in gfs2_ail_error()
40 gfs2_glock2aspace(gl)); in gfs2_ail_error()
41 gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); in gfs2_ail_error()
52 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, in __gfs2_ail_flush() argument
55 struct gfs2_sbd *sdp = gl->gl_sbd; in __gfs2_ail_flush()
56 struct list_head *head = &gl->gl_ail_list; in __gfs2_ail_flush()
70 gfs2_ail_error(gl, bh); in __gfs2_ail_flush()
75 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); in __gfs2_ail_flush()
81 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) in gfs2_ail_empty_gl() argument
83 struct gfs2_sbd *sdp = gl->gl_sbd; in gfs2_ail_empty_gl()
89 tr.tr_revokes = atomic_read(&gl->gl_ail_count); in gfs2_ail_empty_gl()
104 __gfs2_ail_flush(gl, 0, tr.tr_revokes); in gfs2_ail_empty_gl()
110 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) in gfs2_ail_flush() argument
112 struct gfs2_sbd *sdp = gl->gl_sbd; in gfs2_ail_flush()
113 unsigned int revokes = atomic_read(&gl->gl_ail_count); in gfs2_ail_flush()
126 __gfs2_ail_flush(gl, fsync, max_revokes); in gfs2_ail_flush()
140 static void rgrp_go_sync(struct gfs2_glock *gl) in rgrp_go_sync() argument
142 struct gfs2_sbd *sdp = gl->gl_sbd; in rgrp_go_sync()
147 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) in rgrp_go_sync()
149 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); in rgrp_go_sync()
151 gfs2_log_flush(sdp, gl, NORMAL_FLUSH); in rgrp_go_sync()
152 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync()
153 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync()
155 gfs2_ail_empty_gl(gl); in rgrp_go_sync()
157 spin_lock(&gl->gl_spin); in rgrp_go_sync()
158 rgd = gl->gl_object; in rgrp_go_sync()
161 spin_unlock(&gl->gl_spin); in rgrp_go_sync()
174 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) in rgrp_go_inval() argument
176 struct gfs2_sbd *sdp = gl->gl_sbd; in rgrp_go_inval()
180 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); in rgrp_go_inval()
181 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_inval()
183 if (gl->gl_object) { in rgrp_go_inval()
184 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; in rgrp_go_inval()
195 static void inode_go_sync(struct gfs2_glock *gl) in inode_go_sync() argument
197 struct gfs2_inode *ip = gl->gl_object; in inode_go_sync()
198 struct address_space *metamapping = gfs2_glock2aspace(gl); in inode_go_sync()
208 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) in inode_go_sync()
211 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); in inode_go_sync()
213 gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH); in inode_go_sync()
223 gfs2_ail_empty_gl(gl); in inode_go_sync()
229 clear_bit(GLF_DIRTY, &gl->gl_flags); in inode_go_sync()
243 static void inode_go_inval(struct gfs2_glock *gl, int flags) in inode_go_inval() argument
245 struct gfs2_inode *ip = gl->gl_object; in inode_go_inval()
247 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); in inode_go_inval()
250 struct address_space *mapping = gfs2_glock2aspace(gl); in inode_go_inval()
259 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { in inode_go_inval()
260 gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH); in inode_go_inval()
261 gl->gl_sbd->sd_rindex_uptodate = 0; in inode_go_inval()
274 static int inode_go_demote_ok(const struct gfs2_glock *gl) in inode_go_demote_ok() argument
276 struct gfs2_sbd *sdp = gl->gl_sbd; in inode_go_demote_ok()
279 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) in inode_go_demote_ok()
282 if (!list_empty(&gl->gl_holders)) { in inode_go_demote_ok()
283 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in inode_go_demote_ok()
284 if (gh->gh_list.next != &gl->gl_holders) in inode_go_demote_ok()
410 struct gfs2_glock *gl = gh->gh_gl; in inode_go_lock() local
411 struct gfs2_sbd *sdp = gl->gl_sbd; in inode_go_lock()
412 struct gfs2_inode *ip = gl->gl_object; in inode_go_lock()
428 (gl->gl_state == LM_ST_EXCLUSIVE) && in inode_go_lock()
448 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) in inode_go_dump() argument
450 const struct gfs2_inode *ip = gl->gl_object; in inode_go_dump()
469 static void freeze_go_sync(struct gfs2_glock *gl) in freeze_go_sync() argument
472 struct gfs2_sbd *sdp = gl->gl_sbd; in freeze_go_sync()
474 if (gl->gl_state == LM_ST_SHARED && in freeze_go_sync()
493 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) in freeze_go_xmote_bh() argument
495 struct gfs2_sbd *sdp = gl->gl_sbd; in freeze_go_xmote_bh()
526 static int freeze_go_demote_ok(const struct gfs2_glock *gl) in freeze_go_demote_ok() argument
537 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) in iopen_go_callback() argument
539 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; in iopen_go_callback()
540 struct gfs2_sbd *sdp = gl->gl_sbd; in iopen_go_callback()
545 if (gl->gl_demote_state == LM_ST_UNLOCKED && in iopen_go_callback()
546 gl->gl_state == LM_ST_SHARED && ip) { in iopen_go_callback()
547 gl->gl_lockref.count++; in iopen_go_callback()
548 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) in iopen_go_callback()
549 gl->gl_lockref.count--; in iopen_go_callback()