Lines Matching refs:gl

56 	struct gfs2_glock *gl;		/* current glock struct        */  member
60 typedef void (*glock_examiner) (struct gfs2_glock * gl);
62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
110 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local
112 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_dealloc()
113 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_dealloc()
115 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
116 kmem_cache_free(gfs2_glock_cachep, gl); in gfs2_glock_dealloc()
120 void gfs2_glock_free(struct gfs2_glock *gl) in gfs2_glock_free() argument
122 struct gfs2_sbd *sdp = gl->gl_sbd; in gfs2_glock_free()
124 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); in gfs2_glock_free()
135 static void gfs2_glock_hold(struct gfs2_glock *gl) in gfs2_glock_hold() argument
137 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in gfs2_glock_hold()
138 lockref_get(&gl->gl_lockref); in gfs2_glock_hold()
148 static int demote_ok(const struct gfs2_glock *gl) in demote_ok() argument
150 const struct gfs2_glock_operations *glops = gl->gl_ops; in demote_ok()
152 if (gl->gl_state == LM_ST_UNLOCKED) in demote_ok()
154 if (!list_empty(&gl->gl_holders)) in demote_ok()
157 return glops->go_demote_ok(gl); in demote_ok()
162 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) in gfs2_glock_add_to_lru() argument
166 if (!list_empty(&gl->gl_lru)) in gfs2_glock_add_to_lru()
167 list_del_init(&gl->gl_lru); in gfs2_glock_add_to_lru()
171 list_add_tail(&gl->gl_lru, &lru_list); in gfs2_glock_add_to_lru()
172 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_add_to_lru()
176 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) in gfs2_glock_remove_from_lru() argument
179 if (!list_empty(&gl->gl_lru)) { in gfs2_glock_remove_from_lru()
180 list_del_init(&gl->gl_lru); in gfs2_glock_remove_from_lru()
182 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_remove_from_lru()
193 void gfs2_glock_put(struct gfs2_glock *gl) in gfs2_glock_put() argument
195 struct gfs2_sbd *sdp = gl->gl_sbd; in gfs2_glock_put()
196 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_glock_put()
198 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put()
201 lockref_mark_dead(&gl->gl_lockref); in gfs2_glock_put()
203 gfs2_glock_remove_from_lru(gl); in gfs2_glock_put()
204 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_put()
205 spin_lock_bucket(gl->gl_hash); in gfs2_glock_put()
206 hlist_bl_del_rcu(&gl->gl_list); in gfs2_glock_put()
207 spin_unlock_bucket(gl->gl_hash); in gfs2_glock_put()
208 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); in gfs2_glock_put()
209 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); in gfs2_glock_put()
210 trace_gfs2_glock_put(gl); in gfs2_glock_put()
211 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); in gfs2_glock_put()
226 struct gfs2_glock *gl; in search_bucket() local
229 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) { in search_bucket()
230 if (!lm_name_equal(&gl->gl_name, name)) in search_bucket()
232 if (gl->gl_sbd != sdp) in search_bucket()
234 if (lockref_get_not_dead(&gl->gl_lockref)) in search_bucket()
235 return gl; in search_bucket()
249 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) in may_grant() argument
251 …const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_l… in may_grant()
255 if (gl->gl_state == gh->gh_state) in may_grant()
259 if (gl->gl_state == LM_ST_EXCLUSIVE) { in may_grant()
265 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) in may_grant()
282 static inline void do_error(struct gfs2_glock *gl, const int ret) in do_error() argument
286 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
309 static int do_promote(struct gfs2_glock *gl) in do_promote() argument
310 __releases(&gl->gl_spin) in do_promote()
311 __acquires(&gl->gl_spin) in do_promote()
313 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_promote()
318 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_promote()
321 if (may_grant(gl, gh)) { in do_promote()
322 if (gh->gh_list.prev == &gl->gl_holders && in do_promote()
324 spin_unlock(&gl->gl_spin); in do_promote()
327 spin_lock(&gl->gl_spin); in do_promote()
347 if (gh->gh_list.prev == &gl->gl_holders) in do_promote()
349 do_error(gl, 0); in do_promote()
360 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) in find_first_waiter() argument
364 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
378 static void state_change(struct gfs2_glock *gl, unsigned int new_state) in state_change() argument
382 held1 = (gl->gl_state != LM_ST_UNLOCKED); in state_change()
386 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in state_change()
388 gl->gl_lockref.count++; in state_change()
390 gl->gl_lockref.count--; in state_change()
392 if (held1 && held2 && list_empty(&gl->gl_holders)) in state_change()
393 clear_bit(GLF_QUEUED, &gl->gl_flags); in state_change()
395 if (new_state != gl->gl_target) in state_change()
397 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, in state_change()
399 gl->gl_state = new_state; in state_change()
400 gl->gl_tchange = jiffies; in state_change()
403 static void gfs2_demote_wake(struct gfs2_glock *gl) in gfs2_demote_wake() argument
405 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_demote_wake()
406 clear_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_demote_wake()
408 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); in gfs2_demote_wake()
418 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) in finish_xmote() argument
420 const struct gfs2_glock_operations *glops = gl->gl_ops; in finish_xmote()
425 spin_lock(&gl->gl_spin); in finish_xmote()
426 trace_gfs2_glock_state_change(gl, state); in finish_xmote()
427 state_change(gl, state); in finish_xmote()
428 gh = find_first_waiter(gl); in finish_xmote()
431 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && in finish_xmote()
432 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) in finish_xmote()
433 gl->gl_target = LM_ST_UNLOCKED; in finish_xmote()
436 if (unlikely(state != gl->gl_target)) { in finish_xmote()
437 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
441 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
442 gh = find_first_waiter(gl); in finish_xmote()
443 gl->gl_target = gh->gh_state; in finish_xmote()
449 gl->gl_target = gl->gl_state; in finish_xmote()
450 do_error(gl, ret); in finish_xmote()
458 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
463 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
466 pr_err("wanted %u got %u\n", gl->gl_target, state); in finish_xmote()
467 GLOCK_BUG_ON(gl, 1); in finish_xmote()
469 spin_unlock(&gl->gl_spin); in finish_xmote()
474 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) in finish_xmote()
475 gfs2_demote_wake(gl); in finish_xmote()
478 spin_unlock(&gl->gl_spin); in finish_xmote()
479 rv = glops->go_xmote_bh(gl, gh); in finish_xmote()
480 spin_lock(&gl->gl_spin); in finish_xmote()
482 do_error(gl, rv); in finish_xmote()
486 rv = do_promote(gl); in finish_xmote()
491 clear_bit(GLF_LOCK, &gl->gl_flags); in finish_xmote()
493 spin_unlock(&gl->gl_spin); in finish_xmote()
504 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) in do_xmote() argument
505 __releases(&gl->gl_spin) in do_xmote()
506 __acquires(&gl->gl_spin) in do_xmote()
508 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_xmote()
509 struct gfs2_sbd *sdp = gl->gl_sbd; in do_xmote()
515 GLOCK_BUG_ON(gl, gl->gl_state == target); in do_xmote()
516 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); in do_xmote()
519 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
520 do_error(gl, 0); /* Fail queued try locks */ in do_xmote()
522 gl->gl_req = target; in do_xmote()
523 set_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
524 if ((gl->gl_req == LM_ST_UNLOCKED) || in do_xmote()
525 (gl->gl_state == LM_ST_EXCLUSIVE) || in do_xmote()
527 clear_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
528 spin_unlock(&gl->gl_spin); in do_xmote()
530 glops->go_sync(gl); in do_xmote()
531 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in do_xmote()
532 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); in do_xmote()
533 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
535 gfs2_glock_hold(gl); in do_xmote()
538 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); in do_xmote()
541 GLOCK_BUG_ON(gl, 1); in do_xmote()
544 finish_xmote(gl, target); in do_xmote()
545 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in do_xmote()
546 gfs2_glock_put(gl); in do_xmote()
549 spin_lock(&gl->gl_spin); in do_xmote()
557 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) in find_first_holder() argument
561 if (!list_empty(&gl->gl_holders)) { in find_first_holder()
562 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in find_first_holder()
576 static void run_queue(struct gfs2_glock *gl, const int nonblock) in run_queue() argument
577 __releases(&gl->gl_spin) in run_queue()
578 __acquires(&gl->gl_spin) in run_queue()
583 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) in run_queue()
586 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); in run_queue()
588 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && in run_queue()
589 gl->gl_demote_state != gl->gl_state) { in run_queue()
590 if (find_first_holder(gl)) in run_queue()
594 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in run_queue()
595 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); in run_queue()
596 gl->gl_target = gl->gl_demote_state; in run_queue()
598 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in run_queue()
599 gfs2_demote_wake(gl); in run_queue()
600 ret = do_promote(gl); in run_queue()
605 gh = find_first_waiter(gl); in run_queue()
606 gl->gl_target = gh->gh_state; in run_queue()
608 do_error(gl, 0); /* Fail queued try locks */ in run_queue()
610 do_xmote(gl, gh, gl->gl_target); in run_queue()
615 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
617 gl->gl_lockref.count++; in run_queue()
618 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in run_queue()
619 gl->gl_lockref.count--; in run_queue()
623 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
630 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); in delete_work_func() local
631 struct gfs2_sbd *sdp = gl->gl_sbd; in delete_work_func()
634 u64 no_addr = gl->gl_name.ln_number; in delete_work_func()
636 ip = gl->gl_object; in delete_work_func()
647 gfs2_glock_put(gl); in delete_work_func()
653 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); in glock_work_func() local
656 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { in glock_work_func()
657 finish_xmote(gl, gl->gl_reply); in glock_work_func()
660 spin_lock(&gl->gl_spin); in glock_work_func()
661 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in glock_work_func()
662 gl->gl_state != LM_ST_UNLOCKED && in glock_work_func()
663 gl->gl_demote_state != LM_ST_EXCLUSIVE) { in glock_work_func()
666 holdtime = gl->gl_tchange + gl->gl_hold_time; in glock_work_func()
671 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in glock_work_func()
672 set_bit(GLF_DEMOTE, &gl->gl_flags); in glock_work_func()
675 run_queue(gl, 0); in glock_work_func()
676 spin_unlock(&gl->gl_spin); in glock_work_func()
678 gfs2_glock_put(gl); in glock_work_func()
680 if (gl->gl_name.ln_type != LM_TYPE_INODE) in glock_work_func()
682 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) in glock_work_func()
683 gfs2_glock_put(gl); in glock_work_func()
686 gfs2_glock_put(gl); in glock_work_func()
708 struct gfs2_glock *gl, *tmp; in gfs2_glock_get() local
714 gl = search_bucket(hash, sdp, &name); in gfs2_glock_get()
717 *glp = gl; in gfs2_glock_get()
718 if (gl) in gfs2_glock_get()
727 gl = kmem_cache_alloc(cachep, GFP_NOFS); in gfs2_glock_get()
728 if (!gl) in gfs2_glock_get()
731 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); in gfs2_glock_get()
734 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); in gfs2_glock_get()
735 if (!gl->gl_lksb.sb_lvbptr) { in gfs2_glock_get()
736 kmem_cache_free(cachep, gl); in gfs2_glock_get()
742 gl->gl_sbd = sdp; in gfs2_glock_get()
743 gl->gl_flags = 0; in gfs2_glock_get()
744 gl->gl_name = name; in gfs2_glock_get()
745 gl->gl_lockref.count = 1; in gfs2_glock_get()
746 gl->gl_state = LM_ST_UNLOCKED; in gfs2_glock_get()
747 gl->gl_target = LM_ST_UNLOCKED; in gfs2_glock_get()
748 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_glock_get()
749 gl->gl_hash = hash; in gfs2_glock_get()
750 gl->gl_ops = glops; in gfs2_glock_get()
751 gl->gl_dstamp = ktime_set(0, 0); in gfs2_glock_get()
754 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; in gfs2_glock_get()
756 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; in gfs2_glock_get()
757 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; in gfs2_glock_get()
758 gl->gl_tchange = jiffies; in gfs2_glock_get()
759 gl->gl_object = NULL; in gfs2_glock_get()
760 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; in gfs2_glock_get()
761 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); in gfs2_glock_get()
762 INIT_WORK(&gl->gl_delete, delete_work_func); in gfs2_glock_get()
764 mapping = gfs2_glock2aspace(gl); in gfs2_glock_get()
778 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_get()
779 kmem_cache_free(cachep, gl); in gfs2_glock_get()
781 gl = tmp; in gfs2_glock_get()
783 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]); in gfs2_glock_get()
787 *glp = gl; in gfs2_glock_get()
801 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, in gfs2_holder_init() argument
805 gh->gh_gl = gl; in gfs2_holder_init()
812 gfs2_glock_hold(gl); in gfs2_holder_init()
879 static void handle_callback(struct gfs2_glock *gl, unsigned int state, in handle_callback() argument
884 set_bit(bit, &gl->gl_flags); in handle_callback()
885 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { in handle_callback()
886 gl->gl_demote_state = state; in handle_callback()
887 gl->gl_demote_time = jiffies; in handle_callback()
888 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && in handle_callback()
889 gl->gl_demote_state != state) { in handle_callback()
890 gl->gl_demote_state = LM_ST_UNLOCKED; in handle_callback()
892 if (gl->gl_ops->go_callback) in handle_callback()
893 gl->gl_ops->go_callback(gl, remote); in handle_callback()
894 trace_gfs2_demote_rq(gl, remote); in handle_callback()
927 __releases(&gl->gl_spin) in add_to_queue()
928 __acquires(&gl->gl_spin) in add_to_queue()
930 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue() local
931 struct gfs2_sbd *sdp = gl->gl_sbd; in add_to_queue()
941 if (test_bit(GLF_LOCK, &gl->gl_flags)) in add_to_queue()
942 try_futile = !may_grant(gl, gh); in add_to_queue()
943 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in add_to_queue()
947 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
963 set_bit(GLF_QUEUED, &gl->gl_flags); in add_to_queue()
965 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
966 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
968 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
975 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in add_to_queue()
977 spin_unlock(&gl->gl_spin); in add_to_queue()
979 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); in add_to_queue()
980 spin_lock(&gl->gl_spin); in add_to_queue()
993 gfs2_dump_glock(NULL, gl); in add_to_queue()
1008 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq() local
1009 struct gfs2_sbd *sdp = gl->gl_sbd; in gfs2_glock_nq()
1015 if (test_bit(GLF_LRU, &gl->gl_flags)) in gfs2_glock_nq()
1016 gfs2_glock_remove_from_lru(gl); in gfs2_glock_nq()
1018 spin_lock(&gl->gl_spin); in gfs2_glock_nq()
1021 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { in gfs2_glock_nq()
1022 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_nq()
1023 gl->gl_lockref.count++; in gfs2_glock_nq()
1024 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in gfs2_glock_nq()
1025 gl->gl_lockref.count--; in gfs2_glock_nq()
1027 run_queue(gl, 1); in gfs2_glock_nq()
1028 spin_unlock(&gl->gl_spin); in gfs2_glock_nq()
1056 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq() local
1057 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_glock_dq()
1061 spin_lock(&gl->gl_spin); in gfs2_glock_dq()
1063 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_glock_dq()
1066 if (find_first_holder(gl) == NULL) { in gfs2_glock_dq()
1068 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_glock_dq()
1069 spin_unlock(&gl->gl_spin); in gfs2_glock_dq()
1071 spin_lock(&gl->gl_spin); in gfs2_glock_dq()
1072 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_dq()
1074 if (list_empty(&gl->gl_holders) && in gfs2_glock_dq()
1075 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1076 !test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_glock_dq()
1079 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) in gfs2_glock_dq()
1080 gfs2_glock_add_to_lru(gl); in gfs2_glock_dq()
1083 spin_unlock(&gl->gl_spin); in gfs2_glock_dq()
1087 gfs2_glock_hold(gl); in gfs2_glock_dq()
1088 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1089 !test_bit(GLF_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1090 gl->gl_name.ln_type == LM_TYPE_INODE) in gfs2_glock_dq()
1091 delay = gl->gl_hold_time; in gfs2_glock_dq()
1092 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) in gfs2_glock_dq()
1093 gfs2_glock_put(gl); in gfs2_glock_dq()
1098 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait() local
1101 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq_wait()
1132 struct gfs2_glock *gl; in gfs2_glock_nq_num() local
1135 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); in gfs2_glock_nq_num()
1137 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1138 gfs2_glock_put(gl); in gfs2_glock_nq_num()
1251 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) in gfs2_glock_cb() argument
1257 gfs2_glock_hold(gl); in gfs2_glock_cb()
1258 holdtime = gl->gl_tchange + gl->gl_hold_time; in gfs2_glock_cb()
1259 if (test_bit(GLF_QUEUED, &gl->gl_flags) && in gfs2_glock_cb()
1260 gl->gl_name.ln_type == LM_TYPE_INODE) { in gfs2_glock_cb()
1263 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) in gfs2_glock_cb()
1264 delay = gl->gl_hold_time; in gfs2_glock_cb()
1267 spin_lock(&gl->gl_spin); in gfs2_glock_cb()
1268 handle_callback(gl, state, delay, true); in gfs2_glock_cb()
1269 spin_unlock(&gl->gl_spin); in gfs2_glock_cb()
1270 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) in gfs2_glock_cb()
1271 gfs2_glock_put(gl); in gfs2_glock_cb()
1285 static int gfs2_should_freeze(const struct gfs2_glock *gl) in gfs2_should_freeze() argument
1289 if (gl->gl_reply & ~LM_OUT_ST_MASK) in gfs2_should_freeze()
1291 if (gl->gl_target == LM_ST_UNLOCKED) in gfs2_should_freeze()
1294 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1313 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) in gfs2_glock_complete() argument
1315 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; in gfs2_glock_complete()
1317 spin_lock(&gl->gl_spin); in gfs2_glock_complete()
1318 gl->gl_reply = ret; in gfs2_glock_complete()
1321 if (gfs2_should_freeze(gl)) { in gfs2_glock_complete()
1322 set_bit(GLF_FROZEN, &gl->gl_flags); in gfs2_glock_complete()
1323 spin_unlock(&gl->gl_spin); in gfs2_glock_complete()
1328 gl->gl_lockref.count++; in gfs2_glock_complete()
1329 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_complete()
1330 spin_unlock(&gl->gl_spin); in gfs2_glock_complete()
1332 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in gfs2_glock_complete()
1333 gfs2_glock_put(gl); in gfs2_glock_complete()
1369 struct gfs2_glock *gl; in gfs2_dispose_glock_lru() local
1374 gl = list_entry(list->next, struct gfs2_glock, gl_lru); in gfs2_dispose_glock_lru()
1375 list_del_init(&gl->gl_lru); in gfs2_dispose_glock_lru()
1376 if (!spin_trylock(&gl->gl_spin)) { in gfs2_dispose_glock_lru()
1378 list_add(&gl->gl_lru, &lru_list); in gfs2_dispose_glock_lru()
1382 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_dispose_glock_lru()
1383 spin_unlock(&gl->gl_spin); in gfs2_dispose_glock_lru()
1386 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1387 gl->gl_lockref.count++; in gfs2_dispose_glock_lru()
1388 if (demote_ok(gl)) in gfs2_dispose_glock_lru()
1389 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_dispose_glock_lru()
1390 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_dispose_glock_lru()
1391 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in gfs2_dispose_glock_lru()
1392 gl->gl_lockref.count--; in gfs2_dispose_glock_lru()
1393 spin_unlock(&gl->gl_spin); in gfs2_dispose_glock_lru()
1409 struct gfs2_glock *gl; in gfs2_scan_glock_lru() local
1416 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); in gfs2_scan_glock_lru()
1419 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_scan_glock_lru()
1420 list_move(&gl->gl_lru, &dispose); in gfs2_scan_glock_lru()
1426 list_move(&gl->gl_lru, &skipped); in gfs2_scan_glock_lru()
1467 struct gfs2_glock *gl; in examine_bucket() local
1472 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { in examine_bucket()
1473 if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref)) in examine_bucket()
1474 examiner(gl); in examine_bucket()
1495 static void thaw_glock(struct gfs2_glock *gl) in thaw_glock() argument
1497 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) in thaw_glock()
1499 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in thaw_glock()
1500 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) { in thaw_glock()
1502 gfs2_glock_put(gl); in thaw_glock()
1512 static void clear_glock(struct gfs2_glock *gl) in clear_glock() argument
1514 gfs2_glock_remove_from_lru(gl); in clear_glock()
1516 spin_lock(&gl->gl_spin); in clear_glock()
1517 if (gl->gl_state != LM_ST_UNLOCKED) in clear_glock()
1518 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in clear_glock()
1519 spin_unlock(&gl->gl_spin); in clear_glock()
1520 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) in clear_glock()
1521 gfs2_glock_put(gl); in clear_glock()
1535 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl) in dump_glock() argument
1537 spin_lock(&gl->gl_spin); in dump_glock()
1538 gfs2_dump_glock(seq, gl); in dump_glock()
1539 spin_unlock(&gl->gl_spin); in dump_glock()
1542 static void dump_glock_func(struct gfs2_glock *gl) in dump_glock_func() argument
1544 dump_glock(NULL, gl); in dump_glock_func()
1567 struct gfs2_glock *gl = ip->i_gl; in gfs2_glock_finish_truncate() local
1571 gfs2_assert_withdraw(gl->gl_sbd, ret == 0); in gfs2_glock_finish_truncate()
1573 spin_lock(&gl->gl_spin); in gfs2_glock_finish_truncate()
1574 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_finish_truncate()
1575 run_queue(gl, 1); in gfs2_glock_finish_truncate()
1576 spin_unlock(&gl->gl_spin); in gfs2_glock_finish_truncate()
1648 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) in gflags2str() argument
1650 const unsigned long *gflags = &gl->gl_flags; in gflags2str()
1677 if (gl->gl_object) in gflags2str()
1702 void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) in gfs2_dump_glock() argument
1704 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_dump_glock()
1709 dtime = jiffies - gl->gl_demote_time; in gfs2_dump_glock()
1711 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_dump_glock()
1714 state2str(gl->gl_state), in gfs2_dump_glock()
1715 gl->gl_name.ln_type, in gfs2_dump_glock()
1716 (unsigned long long)gl->gl_name.ln_number, in gfs2_dump_glock()
1717 gflags2str(gflags_buf, gl), in gfs2_dump_glock()
1718 state2str(gl->gl_target), in gfs2_dump_glock()
1719 state2str(gl->gl_demote_state), dtime, in gfs2_dump_glock()
1720 atomic_read(&gl->gl_ail_count), in gfs2_dump_glock()
1721 atomic_read(&gl->gl_revokes), in gfs2_dump_glock()
1722 (int)gl->gl_lockref.count, gl->gl_hold_time); in gfs2_dump_glock()
1724 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
1727 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) in gfs2_dump_glock()
1728 glops->go_dump(seq, gl); in gfs2_dump_glock()
1733 struct gfs2_glock *gl = iter_ptr; in gfs2_glstats_seq_show() local
1736 gl->gl_name.ln_type, in gfs2_glstats_seq_show()
1737 (unsigned long long)gl->gl_name.ln_number, in gfs2_glstats_seq_show()
1738 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT], in gfs2_glstats_seq_show()
1739 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], in gfs2_glstats_seq_show()
1740 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], in gfs2_glstats_seq_show()
1741 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], in gfs2_glstats_seq_show()
1742 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT], in gfs2_glstats_seq_show()
1743 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], in gfs2_glstats_seq_show()
1744 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], in gfs2_glstats_seq_show()
1745 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); in gfs2_glstats_seq_show()
1841 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl) in glock_hash_next() argument
1843 return hlist_bl_entry(rcu_dereference(gl->gl_list.next), in glock_hash_next()
1849 struct gfs2_glock *gl; in gfs2_glock_iter_next() local
1852 gl = gi->gl; in gfs2_glock_iter_next()
1853 if (gl) { in gfs2_glock_iter_next()
1854 gi->gl = glock_hash_next(gl); in gfs2_glock_iter_next()
1861 gi->gl = glock_hash_chain(gi->hash); in gfs2_glock_iter_next()
1864 while (gi->gl == NULL) { in gfs2_glock_iter_next()
1870 gi->gl = glock_hash_chain(gi->hash); in gfs2_glock_iter_next()
1874 } while (gi->sdp != gi->gl->gl_sbd || in gfs2_glock_iter_next()
1875 __lockref_is_dead(&gi->gl->gl_lockref)); in gfs2_glock_iter_next()
1899 return gi->gl; in gfs2_glock_seq_start()
1912 return gi->gl; in gfs2_glock_seq_next()
1919 if (gi->gl) in gfs2_glock_seq_stop()
1921 gi->gl = NULL; in gfs2_glock_seq_stop()