Lines Matching refs:gl

56 	struct gfs2_glock *gl;		/* current glock struct        */  member
60 typedef void (*glock_examiner) (struct gfs2_glock * gl);
62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
83 void gfs2_glock_free(struct gfs2_glock *gl) in gfs2_glock_free() argument
85 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free()
87 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_free()
88 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_free()
90 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_free()
91 kmem_cache_free(gfs2_glock_cachep, gl); in gfs2_glock_free()
103 static void gfs2_glock_hold(struct gfs2_glock *gl) in gfs2_glock_hold() argument
105 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in gfs2_glock_hold()
106 lockref_get(&gl->gl_lockref); in gfs2_glock_hold()
116 static int demote_ok(const struct gfs2_glock *gl) in demote_ok() argument
118 const struct gfs2_glock_operations *glops = gl->gl_ops; in demote_ok()
120 if (gl->gl_state == LM_ST_UNLOCKED) in demote_ok()
122 if (!list_empty(&gl->gl_holders)) in demote_ok()
125 return glops->go_demote_ok(gl); in demote_ok()
130 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) in gfs2_glock_add_to_lru() argument
134 if (!list_empty(&gl->gl_lru)) in gfs2_glock_add_to_lru()
135 list_del_init(&gl->gl_lru); in gfs2_glock_add_to_lru()
139 list_add_tail(&gl->gl_lru, &lru_list); in gfs2_glock_add_to_lru()
140 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_add_to_lru()
144 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) in gfs2_glock_remove_from_lru() argument
147 if (!list_empty(&gl->gl_lru)) { in gfs2_glock_remove_from_lru()
148 list_del_init(&gl->gl_lru); in gfs2_glock_remove_from_lru()
150 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_remove_from_lru()
161 void gfs2_glock_put(struct gfs2_glock *gl) in gfs2_glock_put() argument
163 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_put()
164 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_glock_put()
166 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put()
169 lockref_mark_dead(&gl->gl_lockref); in gfs2_glock_put()
171 gfs2_glock_remove_from_lru(gl); in gfs2_glock_put()
172 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_put()
173 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); in gfs2_glock_put()
174 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); in gfs2_glock_put()
175 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); in gfs2_glock_put()
176 trace_gfs2_glock_put(gl); in gfs2_glock_put()
177 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); in gfs2_glock_put()
188 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) in may_grant() argument
190 …const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_l… in may_grant()
194 if (gl->gl_state == gh->gh_state) in may_grant()
198 if (gl->gl_state == LM_ST_EXCLUSIVE) { in may_grant()
204 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) in may_grant()
221 static inline void do_error(struct gfs2_glock *gl, const int ret) in do_error() argument
225 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
248 static int do_promote(struct gfs2_glock *gl) in do_promote() argument
249 __releases(&gl->gl_lockref.lock) in do_promote()
250 __acquires(&gl->gl_lockref.lock) in do_promote()
252 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_promote()
257 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_promote()
260 if (may_grant(gl, gh)) { in do_promote()
261 if (gh->gh_list.prev == &gl->gl_holders && in do_promote()
263 spin_unlock(&gl->gl_lockref.lock); in do_promote()
266 spin_lock(&gl->gl_lockref.lock); in do_promote()
286 if (gh->gh_list.prev == &gl->gl_holders) in do_promote()
288 do_error(gl, 0); in do_promote()
299 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) in find_first_waiter() argument
303 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
317 static void state_change(struct gfs2_glock *gl, unsigned int new_state) in state_change() argument
321 held1 = (gl->gl_state != LM_ST_UNLOCKED); in state_change()
325 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in state_change()
327 gl->gl_lockref.count++; in state_change()
329 gl->gl_lockref.count--; in state_change()
331 if (held1 && held2 && list_empty(&gl->gl_holders)) in state_change()
332 clear_bit(GLF_QUEUED, &gl->gl_flags); in state_change()
334 if (new_state != gl->gl_target) in state_change()
336 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, in state_change()
338 gl->gl_state = new_state; in state_change()
339 gl->gl_tchange = jiffies; in state_change()
342 static void gfs2_demote_wake(struct gfs2_glock *gl) in gfs2_demote_wake() argument
344 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_demote_wake()
345 clear_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_demote_wake()
347 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); in gfs2_demote_wake()
357 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) in finish_xmote() argument
359 const struct gfs2_glock_operations *glops = gl->gl_ops; in finish_xmote()
364 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
365 trace_gfs2_glock_state_change(gl, state); in finish_xmote()
366 state_change(gl, state); in finish_xmote()
367 gh = find_first_waiter(gl); in finish_xmote()
370 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && in finish_xmote()
371 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) in finish_xmote()
372 gl->gl_target = LM_ST_UNLOCKED; in finish_xmote()
375 if (unlikely(state != gl->gl_target)) { in finish_xmote()
376 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
380 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
381 gh = find_first_waiter(gl); in finish_xmote()
382 gl->gl_target = gh->gh_state; in finish_xmote()
388 gl->gl_target = gl->gl_state; in finish_xmote()
389 do_error(gl, ret); in finish_xmote()
397 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
402 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
405 pr_err("wanted %u got %u\n", gl->gl_target, state); in finish_xmote()
406 GLOCK_BUG_ON(gl, 1); in finish_xmote()
408 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
413 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) in finish_xmote()
414 gfs2_demote_wake(gl); in finish_xmote()
417 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
418 rv = glops->go_xmote_bh(gl, gh); in finish_xmote()
419 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
421 do_error(gl, rv); in finish_xmote()
425 rv = do_promote(gl); in finish_xmote()
430 clear_bit(GLF_LOCK, &gl->gl_flags); in finish_xmote()
432 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
443 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) in do_xmote() argument
444 __releases(&gl->gl_lockref.lock) in do_xmote()
445 __acquires(&gl->gl_lockref.lock) in do_xmote()
447 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_xmote()
448 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in do_xmote()
454 GLOCK_BUG_ON(gl, gl->gl_state == target); in do_xmote()
455 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); in do_xmote()
458 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
459 do_error(gl, 0); /* Fail queued try locks */ in do_xmote()
461 gl->gl_req = target; in do_xmote()
462 set_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
463 if ((gl->gl_req == LM_ST_UNLOCKED) || in do_xmote()
464 (gl->gl_state == LM_ST_EXCLUSIVE) || in do_xmote()
466 clear_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
467 spin_unlock(&gl->gl_lockref.lock); in do_xmote()
469 glops->go_sync(gl); in do_xmote()
470 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in do_xmote()
471 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); in do_xmote()
472 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
474 gfs2_glock_hold(gl); in do_xmote()
477 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); in do_xmote()
480 GLOCK_BUG_ON(gl, 1); in do_xmote()
483 finish_xmote(gl, target); in do_xmote()
484 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in do_xmote()
485 gfs2_glock_put(gl); in do_xmote()
488 spin_lock(&gl->gl_lockref.lock); in do_xmote()
496 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) in find_first_holder() argument
500 if (!list_empty(&gl->gl_holders)) { in find_first_holder()
501 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in find_first_holder()
515 static void run_queue(struct gfs2_glock *gl, const int nonblock) in run_queue() argument
516 __releases(&gl->gl_lockref.lock) in run_queue()
517 __acquires(&gl->gl_lockref.lock) in run_queue()
522 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) in run_queue()
525 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); in run_queue()
527 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && in run_queue()
528 gl->gl_demote_state != gl->gl_state) { in run_queue()
529 if (find_first_holder(gl)) in run_queue()
533 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in run_queue()
534 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); in run_queue()
535 gl->gl_target = gl->gl_demote_state; in run_queue()
537 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in run_queue()
538 gfs2_demote_wake(gl); in run_queue()
539 ret = do_promote(gl); in run_queue()
544 gh = find_first_waiter(gl); in run_queue()
545 gl->gl_target = gh->gh_state; in run_queue()
547 do_error(gl, 0); /* Fail queued try locks */ in run_queue()
549 do_xmote(gl, gh, gl->gl_target); in run_queue()
554 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
556 gl->gl_lockref.count++; in run_queue()
557 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in run_queue()
558 gl->gl_lockref.count--; in run_queue()
562 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
569 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); in delete_work_func() local
570 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in delete_work_func()
573 u64 no_addr = gl->gl_name.ln_number; in delete_work_func()
575 ip = gl->gl_object; in delete_work_func()
586 gfs2_glock_put(gl); in delete_work_func()
592 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); in glock_work_func() local
595 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { in glock_work_func()
596 finish_xmote(gl, gl->gl_reply); in glock_work_func()
599 spin_lock(&gl->gl_lockref.lock); in glock_work_func()
600 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in glock_work_func()
601 gl->gl_state != LM_ST_UNLOCKED && in glock_work_func()
602 gl->gl_demote_state != LM_ST_EXCLUSIVE) { in glock_work_func()
605 holdtime = gl->gl_tchange + gl->gl_hold_time; in glock_work_func()
610 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in glock_work_func()
611 set_bit(GLF_DEMOTE, &gl->gl_flags); in glock_work_func()
614 run_queue(gl, 0); in glock_work_func()
615 spin_unlock(&gl->gl_lockref.lock); in glock_work_func()
617 gfs2_glock_put(gl); in glock_work_func()
619 if (gl->gl_name.ln_type != LM_TYPE_INODE) in glock_work_func()
621 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) in glock_work_func()
622 gfs2_glock_put(gl); in glock_work_func()
625 gfs2_glock_put(gl); in glock_work_func()
649 struct gfs2_glock *gl, *tmp = NULL; in gfs2_glock_get() local
654 gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); in gfs2_glock_get()
655 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) in gfs2_glock_get()
656 gl = NULL; in gfs2_glock_get()
658 *glp = gl; in gfs2_glock_get()
659 if (gl) in gfs2_glock_get()
668 gl = kmem_cache_alloc(cachep, GFP_NOFS); in gfs2_glock_get()
669 if (!gl) in gfs2_glock_get()
672 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); in gfs2_glock_get()
675 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); in gfs2_glock_get()
676 if (!gl->gl_lksb.sb_lvbptr) { in gfs2_glock_get()
677 kmem_cache_free(cachep, gl); in gfs2_glock_get()
683 gl->gl_node.next = NULL; in gfs2_glock_get()
684 gl->gl_flags = 0; in gfs2_glock_get()
685 gl->gl_name = name; in gfs2_glock_get()
686 gl->gl_lockref.count = 1; in gfs2_glock_get()
687 gl->gl_state = LM_ST_UNLOCKED; in gfs2_glock_get()
688 gl->gl_target = LM_ST_UNLOCKED; in gfs2_glock_get()
689 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_glock_get()
690 gl->gl_ops = glops; in gfs2_glock_get()
691 gl->gl_dstamp = ktime_set(0, 0); in gfs2_glock_get()
694 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; in gfs2_glock_get()
696 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; in gfs2_glock_get()
697 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; in gfs2_glock_get()
698 gl->gl_tchange = jiffies; in gfs2_glock_get()
699 gl->gl_object = NULL; in gfs2_glock_get()
700 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; in gfs2_glock_get()
701 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); in gfs2_glock_get()
702 INIT_WORK(&gl->gl_delete, delete_work_func); in gfs2_glock_get()
704 mapping = gfs2_glock2aspace(gl); in gfs2_glock_get()
715 ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node, in gfs2_glock_get()
718 *glp = gl; in gfs2_glock_get()
736 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_get()
737 kmem_cache_free(cachep, gl); in gfs2_glock_get()
753 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, in gfs2_holder_init() argument
757 gh->gh_gl = gl; in gfs2_holder_init()
764 gfs2_glock_hold(gl); in gfs2_holder_init()
831 static void handle_callback(struct gfs2_glock *gl, unsigned int state, in handle_callback() argument
836 set_bit(bit, &gl->gl_flags); in handle_callback()
837 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { in handle_callback()
838 gl->gl_demote_state = state; in handle_callback()
839 gl->gl_demote_time = jiffies; in handle_callback()
840 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && in handle_callback()
841 gl->gl_demote_state != state) { in handle_callback()
842 gl->gl_demote_state = LM_ST_UNLOCKED; in handle_callback()
844 if (gl->gl_ops->go_callback) in handle_callback()
845 gl->gl_ops->go_callback(gl, remote); in handle_callback()
846 trace_gfs2_demote_rq(gl, remote); in handle_callback()
879 __releases(&gl->gl_lockref.lock) in add_to_queue()
880 __acquires(&gl->gl_lockref.lock) in add_to_queue()
882 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue() local
883 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in add_to_queue()
893 if (test_bit(GLF_LOCK, &gl->gl_flags)) in add_to_queue()
894 try_futile = !may_grant(gl, gh); in add_to_queue()
895 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in add_to_queue()
899 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
915 set_bit(GLF_QUEUED, &gl->gl_flags); in add_to_queue()
917 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
918 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
920 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
927 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in add_to_queue()
929 spin_unlock(&gl->gl_lockref.lock); in add_to_queue()
931 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); in add_to_queue()
932 spin_lock(&gl->gl_lockref.lock); in add_to_queue()
945 gfs2_dump_glock(NULL, gl); in add_to_queue()
960 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq() local
961 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_nq()
967 if (test_bit(GLF_LRU, &gl->gl_flags)) in gfs2_glock_nq()
968 gfs2_glock_remove_from_lru(gl); in gfs2_glock_nq()
970 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_nq()
973 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { in gfs2_glock_nq()
974 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_nq()
975 gl->gl_lockref.count++; in gfs2_glock_nq()
976 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in gfs2_glock_nq()
977 gl->gl_lockref.count--; in gfs2_glock_nq()
979 run_queue(gl, 1); in gfs2_glock_nq()
980 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1008 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq() local
1009 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_glock_dq()
1013 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1015 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_glock_dq()
1018 if (find_first_holder(gl) == NULL) { in gfs2_glock_dq()
1020 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_glock_dq()
1021 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1023 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1024 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_dq()
1026 if (list_empty(&gl->gl_holders) && in gfs2_glock_dq()
1027 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1028 !test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_glock_dq()
1031 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) && in gfs2_glock_dq()
1033 gfs2_glock_add_to_lru(gl); in gfs2_glock_dq()
1036 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1040 gfs2_glock_hold(gl); in gfs2_glock_dq()
1041 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1042 !test_bit(GLF_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1043 gl->gl_name.ln_type == LM_TYPE_INODE) in gfs2_glock_dq()
1044 delay = gl->gl_hold_time; in gfs2_glock_dq()
1045 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) in gfs2_glock_dq()
1046 gfs2_glock_put(gl); in gfs2_glock_dq()
1051 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait() local
1054 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq_wait()
1085 struct gfs2_glock *gl; in gfs2_glock_nq_num() local
1088 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); in gfs2_glock_nq_num()
1090 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1091 gfs2_glock_put(gl); in gfs2_glock_nq_num()
1204 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) in gfs2_glock_cb() argument
1210 gfs2_glock_hold(gl); in gfs2_glock_cb()
1211 holdtime = gl->gl_tchange + gl->gl_hold_time; in gfs2_glock_cb()
1212 if (test_bit(GLF_QUEUED, &gl->gl_flags) && in gfs2_glock_cb()
1213 gl->gl_name.ln_type == LM_TYPE_INODE) { in gfs2_glock_cb()
1216 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) in gfs2_glock_cb()
1217 delay = gl->gl_hold_time; in gfs2_glock_cb()
1220 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1221 handle_callback(gl, state, delay, true); in gfs2_glock_cb()
1222 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1223 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) in gfs2_glock_cb()
1224 gfs2_glock_put(gl); in gfs2_glock_cb()
1238 static int gfs2_should_freeze(const struct gfs2_glock *gl) in gfs2_should_freeze() argument
1242 if (gl->gl_reply & ~LM_OUT_ST_MASK) in gfs2_should_freeze()
1244 if (gl->gl_target == LM_ST_UNLOCKED) in gfs2_should_freeze()
1247 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1266 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) in gfs2_glock_complete() argument
1268 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gfs2_glock_complete()
1270 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1271 gl->gl_reply = ret; in gfs2_glock_complete()
1274 if (gfs2_should_freeze(gl)) { in gfs2_glock_complete()
1275 set_bit(GLF_FROZEN, &gl->gl_flags); in gfs2_glock_complete()
1276 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1281 gl->gl_lockref.count++; in gfs2_glock_complete()
1282 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_complete()
1283 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1285 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in gfs2_glock_complete()
1286 gfs2_glock_put(gl); in gfs2_glock_complete()
1322 struct gfs2_glock *gl; in gfs2_dispose_glock_lru() local
1327 gl = list_entry(list->next, struct gfs2_glock, gl_lru); in gfs2_dispose_glock_lru()
1328 list_del_init(&gl->gl_lru); in gfs2_dispose_glock_lru()
1329 if (!spin_trylock(&gl->gl_lockref.lock)) { in gfs2_dispose_glock_lru()
1331 list_add(&gl->gl_lru, &lru_list); in gfs2_dispose_glock_lru()
1335 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_dispose_glock_lru()
1336 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1339 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1340 gl->gl_lockref.count++; in gfs2_dispose_glock_lru()
1341 if (demote_ok(gl)) in gfs2_dispose_glock_lru()
1342 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_dispose_glock_lru()
1343 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_dispose_glock_lru()
1344 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in gfs2_dispose_glock_lru()
1345 gl->gl_lockref.count--; in gfs2_dispose_glock_lru()
1346 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1362 struct gfs2_glock *gl; in gfs2_scan_glock_lru() local
1369 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); in gfs2_scan_glock_lru()
1372 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_scan_glock_lru()
1373 list_move(&gl->gl_lru, &dispose); in gfs2_scan_glock_lru()
1379 list_move(&gl->gl_lru, &skipped); in gfs2_scan_glock_lru()
1419 struct gfs2_glock *gl; in glock_hash_walk() local
1427 rht_for_each_entry_safe(gl, pos, next, tbl, i, gl_node) { in glock_hash_walk()
1428 if ((gl->gl_name.ln_sbd == sdp) && in glock_hash_walk()
1429 lockref_get_not_dead(&gl->gl_lockref)) in glock_hash_walk()
1430 examiner(gl); in glock_hash_walk()
1443 static void thaw_glock(struct gfs2_glock *gl) in thaw_glock() argument
1445 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) in thaw_glock()
1447 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in thaw_glock()
1448 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) { in thaw_glock()
1450 gfs2_glock_put(gl); in thaw_glock()
1460 static void clear_glock(struct gfs2_glock *gl) in clear_glock() argument
1462 gfs2_glock_remove_from_lru(gl); in clear_glock()
1464 spin_lock(&gl->gl_lockref.lock); in clear_glock()
1465 if (gl->gl_state != LM_ST_UNLOCKED) in clear_glock()
1466 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in clear_glock()
1467 spin_unlock(&gl->gl_lockref.lock); in clear_glock()
1468 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in clear_glock()
1469 gfs2_glock_put(gl); in clear_glock()
1483 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl) in dump_glock() argument
1485 spin_lock(&gl->gl_lockref.lock); in dump_glock()
1486 gfs2_dump_glock(seq, gl); in dump_glock()
1487 spin_unlock(&gl->gl_lockref.lock); in dump_glock()
1490 static void dump_glock_func(struct gfs2_glock *gl) in dump_glock_func() argument
1492 dump_glock(NULL, gl); in dump_glock_func()
1515 struct gfs2_glock *gl = ip->i_gl; in gfs2_glock_finish_truncate() local
1519 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0); in gfs2_glock_finish_truncate()
1521 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
1522 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_finish_truncate()
1523 run_queue(gl, 1); in gfs2_glock_finish_truncate()
1524 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
1596 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) in gflags2str() argument
1598 const unsigned long *gflags = &gl->gl_flags; in gflags2str()
1625 if (gl->gl_object) in gflags2str()
1650 void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) in gfs2_dump_glock() argument
1652 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_dump_glock()
1657 dtime = jiffies - gl->gl_demote_time; in gfs2_dump_glock()
1659 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_dump_glock()
1662 state2str(gl->gl_state), in gfs2_dump_glock()
1663 gl->gl_name.ln_type, in gfs2_dump_glock()
1664 (unsigned long long)gl->gl_name.ln_number, in gfs2_dump_glock()
1665 gflags2str(gflags_buf, gl), in gfs2_dump_glock()
1666 state2str(gl->gl_target), in gfs2_dump_glock()
1667 state2str(gl->gl_demote_state), dtime, in gfs2_dump_glock()
1668 atomic_read(&gl->gl_ail_count), in gfs2_dump_glock()
1669 atomic_read(&gl->gl_revokes), in gfs2_dump_glock()
1670 (int)gl->gl_lockref.count, gl->gl_hold_time); in gfs2_dump_glock()
1672 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
1675 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) in gfs2_dump_glock()
1676 glops->go_dump(seq, gl); in gfs2_dump_glock()
1681 struct gfs2_glock *gl = iter_ptr; in gfs2_glstats_seq_show() local
1684 gl->gl_name.ln_type, in gfs2_glstats_seq_show()
1685 (unsigned long long)gl->gl_name.ln_number, in gfs2_glstats_seq_show()
1686 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], in gfs2_glstats_seq_show()
1687 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], in gfs2_glstats_seq_show()
1688 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], in gfs2_glstats_seq_show()
1689 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], in gfs2_glstats_seq_show()
1690 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], in gfs2_glstats_seq_show()
1691 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], in gfs2_glstats_seq_show()
1692 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], in gfs2_glstats_seq_show()
1693 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); in gfs2_glstats_seq_show()
1790 gi->gl = rhashtable_walk_next(&gi->hti); in gfs2_glock_iter_next()
1791 if (IS_ERR(gi->gl)) { in gfs2_glock_iter_next()
1792 if (PTR_ERR(gi->gl) == -EAGAIN) in gfs2_glock_iter_next()
1794 gi->gl = NULL; in gfs2_glock_iter_next()
1797 } while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) || in gfs2_glock_iter_next()
1798 __lockref_is_dead(&gi->gl->gl_lockref))); in gfs2_glock_iter_next()
1816 } while (gi->gl && n--); in gfs2_glock_seq_start()
1819 return gi->gl; in gfs2_glock_seq_start()
1830 return gi->gl; in gfs2_glock_seq_next()
1837 gi->gl = NULL; in gfs2_glock_seq_stop()
1905 gi->gl = NULL; in gfs2_glocks_open()
1916 gi->gl = NULL; in gfs2_glocks_release()
1933 gi->gl = NULL; in gfs2_glstats_open()