Lines Matching refs:gl

33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)  in gfs2_ail_error()  argument
35 fs_err(gl->gl_name.ln_sbd, in gfs2_ail_error()
40 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", in gfs2_ail_error()
41 gl->gl_name.ln_type, gl->gl_name.ln_number, in gfs2_ail_error()
42 gfs2_glock2aspace(gl)); in gfs2_ail_error()
43 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); in gfs2_ail_error()
54 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, in __gfs2_ail_flush() argument
57 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_ail_flush()
58 struct list_head *head = &gl->gl_ail_list; in __gfs2_ail_flush()
72 gfs2_ail_error(gl, bh); in __gfs2_ail_flush()
77 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); in __gfs2_ail_flush()
83 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) in gfs2_ail_empty_gl() argument
85 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_ail_empty_gl()
91 tr.tr_revokes = atomic_read(&gl->gl_ail_count); in gfs2_ail_empty_gl()
106 __gfs2_ail_flush(gl, 0, tr.tr_revokes); in gfs2_ail_empty_gl()
112 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) in gfs2_ail_flush() argument
114 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_ail_flush()
115 unsigned int revokes = atomic_read(&gl->gl_ail_count); in gfs2_ail_flush()
128 __gfs2_ail_flush(gl, fsync, max_revokes); in gfs2_ail_flush()
142 static void rgrp_go_sync(struct gfs2_glock *gl) in rgrp_go_sync() argument
144 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in rgrp_go_sync()
149 spin_lock(&gl->gl_lockref.lock); in rgrp_go_sync()
150 rgd = gl->gl_object; in rgrp_go_sync()
153 spin_unlock(&gl->gl_lockref.lock); in rgrp_go_sync()
155 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) in rgrp_go_sync()
157 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); in rgrp_go_sync()
159 gfs2_log_flush(sdp, gl, NORMAL_FLUSH); in rgrp_go_sync()
160 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync()
161 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync()
163 gfs2_ail_empty_gl(gl); in rgrp_go_sync()
165 spin_lock(&gl->gl_lockref.lock); in rgrp_go_sync()
166 rgd = gl->gl_object; in rgrp_go_sync()
169 spin_unlock(&gl->gl_lockref.lock); in rgrp_go_sync()
182 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) in rgrp_go_inval() argument
184 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in rgrp_go_inval()
186 struct gfs2_rgrpd *rgd = gl->gl_object; in rgrp_go_inval()
192 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); in rgrp_go_inval()
193 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_inval()
205 static void inode_go_sync(struct gfs2_glock *gl) in inode_go_sync() argument
207 struct gfs2_inode *ip = gl->gl_object; in inode_go_sync()
208 struct address_space *metamapping = gfs2_glock2aspace(gl); in inode_go_sync()
218 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) in inode_go_sync()
221 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); in inode_go_sync()
223 gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH); in inode_go_sync()
233 gfs2_ail_empty_gl(gl); in inode_go_sync()
239 clear_bit(GLF_DIRTY, &gl->gl_flags); in inode_go_sync()
253 static void inode_go_inval(struct gfs2_glock *gl, int flags) in inode_go_inval() argument
255 struct gfs2_inode *ip = gl->gl_object; in inode_go_inval()
257 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); in inode_go_inval()
260 struct address_space *mapping = gfs2_glock2aspace(gl); in inode_go_inval()
269 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { in inode_go_inval()
270 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH); in inode_go_inval()
271 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; in inode_go_inval()
284 static int inode_go_demote_ok(const struct gfs2_glock *gl) in inode_go_demote_ok() argument
286 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in inode_go_demote_ok()
289 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) in inode_go_demote_ok()
292 if (!list_empty(&gl->gl_holders)) { in inode_go_demote_ok()
293 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in inode_go_demote_ok()
294 if (gh->gh_list.next != &gl->gl_holders) in inode_go_demote_ok()
420 struct gfs2_glock *gl = gh->gh_gl; in inode_go_lock() local
421 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in inode_go_lock()
422 struct gfs2_inode *ip = gl->gl_object; in inode_go_lock()
438 (gl->gl_state == LM_ST_EXCLUSIVE) && in inode_go_lock()
458 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) in inode_go_dump() argument
460 const struct gfs2_inode *ip = gl->gl_object; in inode_go_dump()
479 static void freeze_go_sync(struct gfs2_glock *gl) in freeze_go_sync() argument
482 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in freeze_go_sync()
484 if (gl->gl_state == LM_ST_SHARED && in freeze_go_sync()
503 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) in freeze_go_xmote_bh() argument
505 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in freeze_go_xmote_bh()
536 static int freeze_go_demote_ok(const struct gfs2_glock *gl) in freeze_go_demote_ok() argument
547 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) in iopen_go_callback() argument
549 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; in iopen_go_callback()
550 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in iopen_go_callback()
555 if (gl->gl_demote_state == LM_ST_UNLOCKED && in iopen_go_callback()
556 gl->gl_state == LM_ST_SHARED && ip) { in iopen_go_callback()
557 gl->gl_lockref.count++; in iopen_go_callback()
558 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) in iopen_go_callback()
559 gl->gl_lockref.count--; in iopen_go_callback()