This source file includes following definitions.
- glock_wake_function
- glock_waitqueue
- wake_up_glock
- gfs2_glock_dealloc
- gfs2_glock_free
- gfs2_glock_hold
- demote_ok
- gfs2_glock_add_to_lru
- gfs2_glock_remove_from_lru
- __gfs2_glock_queue_work
- gfs2_glock_queue_work
- __gfs2_glock_put
- gfs2_glock_queue_put
- gfs2_glock_put
- may_grant
- gfs2_holder_wake
- do_error
- do_promote
- find_first_waiter
- state_change
- gfs2_demote_wake
- finish_xmote
- do_xmote
- find_first_holder
- run_queue
- delete_work_func
- glock_work_func
- find_insert_glock
- gfs2_glock_get
- gfs2_holder_init
- gfs2_holder_reinit
- gfs2_holder_uninit
- gfs2_glock_update_hold_time
- gfs2_glock_wait
- glocks_pending
- gfs2_glock_async_wait
- handle_callback
- gfs2_print_dbg
- add_to_queue
- gfs2_glock_nq
- gfs2_glock_poll
- gfs2_glock_dq
- gfs2_glock_dq_wait
- gfs2_glock_dq_uninit
- gfs2_glock_nq_num
- glock_compare
- nq_m_sync
- gfs2_glock_nq_m
- gfs2_glock_dq_m
- gfs2_glock_cb
- gfs2_should_freeze
- gfs2_glock_complete
- glock_cmp
- gfs2_dispose_glock_lru
- gfs2_scan_glock_lru
- gfs2_glock_shrink_scan
- gfs2_glock_shrink_count
- glock_hash_walk
- thaw_glock
- clear_glock
- gfs2_glock_thaw
- dump_glock
- dump_glock_func
- gfs2_gl_hash_clear
- gfs2_glock_finish_truncate
- state2str
- hflags2str
- dump_holder
- gflags2str
- gfs2_dump_glock
- gfs2_glstats_seq_show
- gfs2_sbstats_seq_show
- gfs2_glock_init
- gfs2_glock_exit
- gfs2_glock_iter_next
- gfs2_glock_seq_start
- gfs2_glock_seq_next
- gfs2_glock_seq_stop
- gfs2_glock_seq_show
- gfs2_sbstats_seq_start
- gfs2_sbstats_seq_next
- gfs2_sbstats_seq_stop
- __gfs2_glocks_open
- gfs2_glocks_open
- gfs2_glocks_release
- gfs2_glstats_open
- gfs2_sbstats_open
- gfs2_create_debugfs_file
- gfs2_delete_debugfs_file
- gfs2_register_debugfs
- gfs2_unregister_debugfs
1
2
3
4
5
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/buffer_head.h>
13 #include <linux/delay.h>
14 #include <linux/sort.h>
15 #include <linux/hash.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 #include <linux/percpu.h>
33 #include <linux/list_sort.h>
34 #include <linux/lockref.h>
35 #include <linux/rhashtable.h>
36
37 #include "gfs2.h"
38 #include "incore.h"
39 #include "glock.h"
40 #include "glops.h"
41 #include "inode.h"
42 #include "lops.h"
43 #include "meta_io.h"
44 #include "quota.h"
45 #include "super.h"
46 #include "util.h"
47 #include "bmap.h"
48 #define CREATE_TRACE_POINTS
49 #include "trace_gfs2.h"
50
51 struct gfs2_glock_iter {
52 struct gfs2_sbd *sdp;
53 struct rhashtable_iter hti;
54 struct gfs2_glock *gl;
55 loff_t last_pos;
56 };
57
58 typedef void (*glock_examiner) (struct gfs2_glock * gl);
59
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
64 struct workqueue_struct *gfs2_delete_workqueue;
65 static LIST_HEAD(lru_list);
66 static atomic_t lru_count = ATOMIC_INIT(0);
67 static DEFINE_SPINLOCK(lru_lock);
68
69 #define GFS2_GL_HASH_SHIFT 15
70 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
71
72 static const struct rhashtable_params ht_parms = {
73 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
74 .key_len = offsetofend(struct lm_lockname, ln_type),
75 .key_offset = offsetof(struct gfs2_glock, gl_name),
76 .head_offset = offsetof(struct gfs2_glock, gl_node),
77 };
78
79 static struct rhashtable gl_hash_table;
80
81 #define GLOCK_WAIT_TABLE_BITS 12
82 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
83 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
84
85 struct wait_glock_queue {
86 struct lm_lockname *name;
87 wait_queue_entry_t wait;
88 };
89
90 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
91 int sync, void *key)
92 {
93 struct wait_glock_queue *wait_glock =
94 container_of(wait, struct wait_glock_queue, wait);
95 struct lm_lockname *wait_name = wait_glock->name;
96 struct lm_lockname *wake_name = key;
97
98 if (wake_name->ln_sbd != wait_name->ln_sbd ||
99 wake_name->ln_number != wait_name->ln_number ||
100 wake_name->ln_type != wait_name->ln_type)
101 return 0;
102 return autoremove_wake_function(wait, mode, sync, key);
103 }
104
105 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
106 {
107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
108
109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
110 }
111
112
113
114
115
116 static void wake_up_glock(struct gfs2_glock *gl)
117 {
118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
119
120 if (waitqueue_active(wq))
121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
122 }
123
124 static void gfs2_glock_dealloc(struct rcu_head *rcu)
125 {
126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
127
128 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
129 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
130 } else {
131 kfree(gl->gl_lksb.sb_lvbptr);
132 kmem_cache_free(gfs2_glock_cachep, gl);
133 }
134 }
135
136 void gfs2_glock_free(struct gfs2_glock *gl)
137 {
138 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
139
140 BUG_ON(atomic_read(&gl->gl_revokes));
141 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
142 smp_mb();
143 wake_up_glock(gl);
144 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
145 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
146 wake_up(&sdp->sd_glock_wait);
147 }
148
149
150
151
152
153
154
155 void gfs2_glock_hold(struct gfs2_glock *gl)
156 {
157 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
158 lockref_get(&gl->gl_lockref);
159 }
160
161
162
163
164
165
166
167
168 static int demote_ok(const struct gfs2_glock *gl)
169 {
170 const struct gfs2_glock_operations *glops = gl->gl_ops;
171
172 if (gl->gl_state == LM_ST_UNLOCKED)
173 return 0;
174 if (!list_empty(&gl->gl_holders))
175 return 0;
176 if (glops->go_demote_ok)
177 return glops->go_demote_ok(gl);
178 return 1;
179 }
180
181
182 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
183 {
184 if (!(gl->gl_ops->go_flags & GLOF_LRU))
185 return;
186
187 spin_lock(&lru_lock);
188
189 list_del(&gl->gl_lru);
190 list_add_tail(&gl->gl_lru, &lru_list);
191
192 if (!test_bit(GLF_LRU, &gl->gl_flags)) {
193 set_bit(GLF_LRU, &gl->gl_flags);
194 atomic_inc(&lru_count);
195 }
196
197 spin_unlock(&lru_lock);
198 }
199
200 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
201 {
202 if (!(gl->gl_ops->go_flags & GLOF_LRU))
203 return;
204
205 spin_lock(&lru_lock);
206 if (test_bit(GLF_LRU, &gl->gl_flags)) {
207 list_del_init(&gl->gl_lru);
208 atomic_dec(&lru_count);
209 clear_bit(GLF_LRU, &gl->gl_flags);
210 }
211 spin_unlock(&lru_lock);
212 }
213
214
215
216
217
218 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
219 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
220
221
222
223
224
225
226 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
227 gl->gl_lockref.count--;
228 }
229 }
230
231 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
232 spin_lock(&gl->gl_lockref.lock);
233 __gfs2_glock_queue_work(gl, delay);
234 spin_unlock(&gl->gl_lockref.lock);
235 }
236
237 static void __gfs2_glock_put(struct gfs2_glock *gl)
238 {
239 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
240 struct address_space *mapping = gfs2_glock2aspace(gl);
241
242 lockref_mark_dead(&gl->gl_lockref);
243
244 gfs2_glock_remove_from_lru(gl);
245 spin_unlock(&gl->gl_lockref.lock);
246 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
247 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
248 trace_gfs2_glock_put(gl);
249 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
250 }
251
252
253
254
255 void gfs2_glock_queue_put(struct gfs2_glock *gl)
256 {
257 gfs2_glock_queue_work(gl, 0);
258 }
259
260
261
262
263
264
265
266 void gfs2_glock_put(struct gfs2_glock *gl)
267 {
268 if (lockref_put_or_lock(&gl->gl_lockref))
269 return;
270
271 __gfs2_glock_put(gl);
272 }
273
274
275
276
277
278
279
280
281
282 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
283 {
284 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
285 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
286 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
287 return 0;
288 if (gl->gl_state == gh->gh_state)
289 return 1;
290 if (gh->gh_flags & GL_EXACT)
291 return 0;
292 if (gl->gl_state == LM_ST_EXCLUSIVE) {
293 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
294 return 1;
295 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
296 return 1;
297 }
298 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
299 return 1;
300 return 0;
301 }
302
303 static void gfs2_holder_wake(struct gfs2_holder *gh)
304 {
305 clear_bit(HIF_WAIT, &gh->gh_iflags);
306 smp_mb__after_atomic();
307 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
308 if (gh->gh_flags & GL_ASYNC) {
309 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
310
311 wake_up(&sdp->sd_async_glock_wait);
312 }
313 }
314
315
316
317
318
319
320 static void do_error(struct gfs2_glock *gl, const int ret)
321 {
322 struct gfs2_holder *gh, *tmp;
323
324 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
325 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
326 continue;
327 if (ret & LM_OUT_ERROR)
328 gh->gh_error = -EIO;
329 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
330 gh->gh_error = GLR_TRYFAILED;
331 else
332 continue;
333 list_del_init(&gh->gh_list);
334 trace_gfs2_glock_queue(gh, 0);
335 gfs2_holder_wake(gh);
336 }
337 }
338
339
340
341
342
343
344
345
346
347 static int do_promote(struct gfs2_glock *gl)
348 __releases(&gl->gl_lockref.lock)
349 __acquires(&gl->gl_lockref.lock)
350 {
351 const struct gfs2_glock_operations *glops = gl->gl_ops;
352 struct gfs2_holder *gh, *tmp;
353 int ret;
354
355 restart:
356 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
357 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
358 continue;
359 if (may_grant(gl, gh)) {
360 if (gh->gh_list.prev == &gl->gl_holders &&
361 glops->go_lock) {
362 spin_unlock(&gl->gl_lockref.lock);
363
364 ret = glops->go_lock(gh);
365 spin_lock(&gl->gl_lockref.lock);
366 if (ret) {
367 if (ret == 1)
368 return 2;
369 gh->gh_error = ret;
370 list_del_init(&gh->gh_list);
371 trace_gfs2_glock_queue(gh, 0);
372 gfs2_holder_wake(gh);
373 goto restart;
374 }
375 set_bit(HIF_HOLDER, &gh->gh_iflags);
376 trace_gfs2_promote(gh, 1);
377 gfs2_holder_wake(gh);
378 goto restart;
379 }
380 set_bit(HIF_HOLDER, &gh->gh_iflags);
381 trace_gfs2_promote(gh, 0);
382 gfs2_holder_wake(gh);
383 continue;
384 }
385 if (gh->gh_list.prev == &gl->gl_holders)
386 return 1;
387 do_error(gl, 0);
388 break;
389 }
390 return 0;
391 }
392
393
394
395
396
397
398 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
399 {
400 struct gfs2_holder *gh;
401
402 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
403 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
404 return gh;
405 }
406 return NULL;
407 }
408
409
410
411
412
413
414
415
416 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
417 {
418 int held1, held2;
419
420 held1 = (gl->gl_state != LM_ST_UNLOCKED);
421 held2 = (new_state != LM_ST_UNLOCKED);
422
423 if (held1 != held2) {
424 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
425 if (held2)
426 gl->gl_lockref.count++;
427 else
428 gl->gl_lockref.count--;
429 }
430 if (held1 && held2 && list_empty(&gl->gl_holders))
431 clear_bit(GLF_QUEUED, &gl->gl_flags);
432
433 if (new_state != gl->gl_target)
434
435 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
436 GL_GLOCK_MIN_HOLD);
437 gl->gl_state = new_state;
438 gl->gl_tchange = jiffies;
439 }
440
441 static void gfs2_demote_wake(struct gfs2_glock *gl)
442 {
443 gl->gl_demote_state = LM_ST_EXCLUSIVE;
444 clear_bit(GLF_DEMOTE, &gl->gl_flags);
445 smp_mb__after_atomic();
446 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
447 }
448
449
450
451
452
453
454
455
456 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
457 {
458 const struct gfs2_glock_operations *glops = gl->gl_ops;
459 struct gfs2_holder *gh;
460 unsigned state = ret & LM_OUT_ST_MASK;
461 int rv;
462
463 spin_lock(&gl->gl_lockref.lock);
464 trace_gfs2_glock_state_change(gl, state);
465 state_change(gl, state);
466 gh = find_first_waiter(gl);
467
468
469 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
470 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
471 gl->gl_target = LM_ST_UNLOCKED;
472
473
474 if (unlikely(state != gl->gl_target)) {
475 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
476
477 if (ret & LM_OUT_CANCELED) {
478 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
479 list_move_tail(&gh->gh_list, &gl->gl_holders);
480 gh = find_first_waiter(gl);
481 gl->gl_target = gh->gh_state;
482 goto retry;
483 }
484
485 if ((ret & LM_OUT_ERROR) ||
486 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
487 gl->gl_target = gl->gl_state;
488 do_error(gl, ret);
489 goto out;
490 }
491 }
492 switch(state) {
493
494 case LM_ST_UNLOCKED:
495 retry:
496 do_xmote(gl, gh, gl->gl_target);
497 break;
498
499 case LM_ST_SHARED:
500 case LM_ST_DEFERRED:
501 do_xmote(gl, gh, LM_ST_UNLOCKED);
502 break;
503 default:
504 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
505 gl->gl_target, state);
506 GLOCK_BUG_ON(gl, 1);
507 }
508 spin_unlock(&gl->gl_lockref.lock);
509 return;
510 }
511
512
513 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
514 gfs2_demote_wake(gl);
515 if (state != LM_ST_UNLOCKED) {
516 if (glops->go_xmote_bh) {
517 spin_unlock(&gl->gl_lockref.lock);
518 rv = glops->go_xmote_bh(gl, gh);
519 spin_lock(&gl->gl_lockref.lock);
520 if (rv) {
521 do_error(gl, rv);
522 goto out;
523 }
524 }
525 rv = do_promote(gl);
526 if (rv == 2)
527 goto out_locked;
528 }
529 out:
530 clear_bit(GLF_LOCK, &gl->gl_flags);
531 out_locked:
532 spin_unlock(&gl->gl_lockref.lock);
533 }
534
535
536
537
538
539
540
541
542
543 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
544 __releases(&gl->gl_lockref.lock)
545 __acquires(&gl->gl_lockref.lock)
546 {
547 const struct gfs2_glock_operations *glops = gl->gl_ops;
548 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
549 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
550 int ret;
551
552 if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) &&
553 target != LM_ST_UNLOCKED)
554 return;
555 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
556 LM_FLAG_PRIORITY);
557 GLOCK_BUG_ON(gl, gl->gl_state == target);
558 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
559 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
560 glops->go_inval) {
561 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
562 do_error(gl, 0);
563 }
564 gl->gl_req = target;
565 set_bit(GLF_BLOCKING, &gl->gl_flags);
566 if ((gl->gl_req == LM_ST_UNLOCKED) ||
567 (gl->gl_state == LM_ST_EXCLUSIVE) ||
568 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
569 clear_bit(GLF_BLOCKING, &gl->gl_flags);
570 spin_unlock(&gl->gl_lockref.lock);
571 if (glops->go_sync)
572 glops->go_sync(gl);
573 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
574 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
575 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
576
577 gfs2_glock_hold(gl);
578 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
579
580 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
581 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
582 target == LM_ST_UNLOCKED &&
583 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
584 finish_xmote(gl, target);
585 gfs2_glock_queue_work(gl, 0);
586 }
587 else if (ret) {
588 fs_err(sdp, "lm_lock ret %d\n", ret);
589 GLOCK_BUG_ON(gl, !test_bit(SDF_WITHDRAWN,
590 &sdp->sd_flags));
591 }
592 } else {
593 finish_xmote(gl, target);
594 gfs2_glock_queue_work(gl, 0);
595 }
596
597 spin_lock(&gl->gl_lockref.lock);
598 }
599
600
601
602
603
604
605 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
606 {
607 struct gfs2_holder *gh;
608
609 if (!list_empty(&gl->gl_holders)) {
610 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
611 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
612 return gh;
613 }
614 return NULL;
615 }
616
617
618
619
620
621
622
623
624 static void run_queue(struct gfs2_glock *gl, const int nonblock)
625 __releases(&gl->gl_lockref.lock)
626 __acquires(&gl->gl_lockref.lock)
627 {
628 struct gfs2_holder *gh = NULL;
629 int ret;
630
631 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
632 return;
633
634 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
635
636 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
637 gl->gl_demote_state != gl->gl_state) {
638 if (find_first_holder(gl))
639 goto out_unlock;
640 if (nonblock)
641 goto out_sched;
642 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
643 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
644 gl->gl_target = gl->gl_demote_state;
645 } else {
646 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
647 gfs2_demote_wake(gl);
648 ret = do_promote(gl);
649 if (ret == 0)
650 goto out_unlock;
651 if (ret == 2)
652 goto out;
653 gh = find_first_waiter(gl);
654 gl->gl_target = gh->gh_state;
655 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
656 do_error(gl, 0);
657 }
658 do_xmote(gl, gh, gl->gl_target);
659 out:
660 return;
661
662 out_sched:
663 clear_bit(GLF_LOCK, &gl->gl_flags);
664 smp_mb__after_atomic();
665 gl->gl_lockref.count++;
666 __gfs2_glock_queue_work(gl, 0);
667 return;
668
669 out_unlock:
670 clear_bit(GLF_LOCK, &gl->gl_flags);
671 smp_mb__after_atomic();
672 return;
673 }
674
675 static void delete_work_func(struct work_struct *work)
676 {
677 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
678 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
679 struct inode *inode;
680 u64 no_addr = gl->gl_name.ln_number;
681
682
683
684
685 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
686 goto out;
687
688 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
689 if (!IS_ERR_OR_NULL(inode)) {
690 d_prune_aliases(inode);
691 iput(inode);
692 }
693 out:
694 gfs2_glock_put(gl);
695 }
696
697 static void glock_work_func(struct work_struct *work)
698 {
699 unsigned long delay = 0;
700 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
701 unsigned int drop_refs = 1;
702
703 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
704 finish_xmote(gl, gl->gl_reply);
705 drop_refs++;
706 }
707 spin_lock(&gl->gl_lockref.lock);
708 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
709 gl->gl_state != LM_ST_UNLOCKED &&
710 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
711 unsigned long holdtime, now = jiffies;
712
713 holdtime = gl->gl_tchange + gl->gl_hold_time;
714 if (time_before(now, holdtime))
715 delay = holdtime - now;
716
717 if (!delay) {
718 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
719 set_bit(GLF_DEMOTE, &gl->gl_flags);
720 }
721 }
722 run_queue(gl, 0);
723 if (delay) {
724
725 drop_refs--;
726 if (gl->gl_name.ln_type != LM_TYPE_INODE)
727 delay = 0;
728 __gfs2_glock_queue_work(gl, delay);
729 }
730
731
732
733
734
735
736 gl->gl_lockref.count -= drop_refs;
737 if (!gl->gl_lockref.count) {
738 __gfs2_glock_put(gl);
739 return;
740 }
741 spin_unlock(&gl->gl_lockref.lock);
742 }
743
744 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
745 struct gfs2_glock *new)
746 {
747 struct wait_glock_queue wait;
748 wait_queue_head_t *wq = glock_waitqueue(name);
749 struct gfs2_glock *gl;
750
751 wait.name = name;
752 init_wait(&wait.wait);
753 wait.wait.func = glock_wake_function;
754
755 again:
756 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
757 rcu_read_lock();
758 if (new) {
759 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
760 &new->gl_node, ht_parms);
761 if (IS_ERR(gl))
762 goto out;
763 } else {
764 gl = rhashtable_lookup_fast(&gl_hash_table,
765 name, ht_parms);
766 }
767 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
768 rcu_read_unlock();
769 schedule();
770 goto again;
771 }
772 out:
773 rcu_read_unlock();
774 finish_wait(wq, &wait.wait);
775 return gl;
776 }
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
792 const struct gfs2_glock_operations *glops, int create,
793 struct gfs2_glock **glp)
794 {
795 struct super_block *s = sdp->sd_vfs;
796 struct lm_lockname name = { .ln_number = number,
797 .ln_type = glops->go_type,
798 .ln_sbd = sdp };
799 struct gfs2_glock *gl, *tmp;
800 struct address_space *mapping;
801 struct kmem_cache *cachep;
802 int ret = 0;
803
804 gl = find_insert_glock(&name, NULL);
805 if (gl) {
806 *glp = gl;
807 return 0;
808 }
809 if (!create)
810 return -ENOENT;
811
812 if (glops->go_flags & GLOF_ASPACE)
813 cachep = gfs2_glock_aspace_cachep;
814 else
815 cachep = gfs2_glock_cachep;
816 gl = kmem_cache_alloc(cachep, GFP_NOFS);
817 if (!gl)
818 return -ENOMEM;
819
820 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
821
822 if (glops->go_flags & GLOF_LVB) {
823 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
824 if (!gl->gl_lksb.sb_lvbptr) {
825 kmem_cache_free(cachep, gl);
826 return -ENOMEM;
827 }
828 }
829
830 atomic_inc(&sdp->sd_glock_disposal);
831 gl->gl_node.next = NULL;
832 gl->gl_flags = 0;
833 gl->gl_name = name;
834 gl->gl_lockref.count = 1;
835 gl->gl_state = LM_ST_UNLOCKED;
836 gl->gl_target = LM_ST_UNLOCKED;
837 gl->gl_demote_state = LM_ST_EXCLUSIVE;
838 gl->gl_ops = glops;
839 gl->gl_dstamp = 0;
840 preempt_disable();
841
842 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
843 preempt_enable();
844 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
845 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
846 gl->gl_tchange = jiffies;
847 gl->gl_object = NULL;
848 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
849 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
850 INIT_WORK(&gl->gl_delete, delete_work_func);
851
852 mapping = gfs2_glock2aspace(gl);
853 if (mapping) {
854 mapping->a_ops = &gfs2_meta_aops;
855 mapping->host = s->s_bdev->bd_inode;
856 mapping->flags = 0;
857 mapping_set_gfp_mask(mapping, GFP_NOFS);
858 mapping->private_data = NULL;
859 mapping->writeback_index = 0;
860 }
861
862 tmp = find_insert_glock(&name, gl);
863 if (!tmp) {
864 *glp = gl;
865 goto out;
866 }
867 if (IS_ERR(tmp)) {
868 ret = PTR_ERR(tmp);
869 goto out_free;
870 }
871 *glp = tmp;
872
873 out_free:
874 kfree(gl->gl_lksb.sb_lvbptr);
875 kmem_cache_free(cachep, gl);
876 atomic_dec(&sdp->sd_glock_disposal);
877
878 out:
879 return ret;
880 }
881
882
883
884
885
886
887
888
889
890
891 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
892 struct gfs2_holder *gh)
893 {
894 INIT_LIST_HEAD(&gh->gh_list);
895 gh->gh_gl = gl;
896 gh->gh_ip = _RET_IP_;
897 gh->gh_owner_pid = get_pid(task_pid(current));
898 gh->gh_state = state;
899 gh->gh_flags = flags;
900 gh->gh_error = 0;
901 gh->gh_iflags = 0;
902 gfs2_glock_hold(gl);
903 }
904
905
906
907
908
909
910
911
912
913
914
915 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
916 {
917 gh->gh_state = state;
918 gh->gh_flags = flags;
919 gh->gh_iflags = 0;
920 gh->gh_ip = _RET_IP_;
921 put_pid(gh->gh_owner_pid);
922 gh->gh_owner_pid = get_pid(task_pid(current));
923 }
924
925
926
927
928
929
930
931 void gfs2_holder_uninit(struct gfs2_holder *gh)
932 {
933 put_pid(gh->gh_owner_pid);
934 gfs2_glock_put(gh->gh_gl);
935 gfs2_holder_mark_uninitialized(gh);
936 gh->gh_ip = 0;
937 }
938
939 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
940 unsigned long start_time)
941 {
942
943 if (time_after(jiffies, start_time + HZ)) {
944
945 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
946 GL_GLOCK_MAX_HOLD);
947 }
948 }
949
950
951
952
953
954
955
956
957 int gfs2_glock_wait(struct gfs2_holder *gh)
958 {
959 unsigned long start_time = jiffies;
960
961 might_sleep();
962 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
963 gfs2_glock_update_hold_time(gh->gh_gl, start_time);
964 return gh->gh_error;
965 }
966
967 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
968 {
969 int i;
970
971 for (i = 0; i < num_gh; i++)
972 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
973 return 1;
974 return 0;
975 }
976
977
978
979
980
981
982
983
984
985
986
987 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
988 {
989 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
990 int i, ret = 0, timeout = 0;
991 unsigned long start_time = jiffies;
992 bool keep_waiting;
993
994 might_sleep();
995
996
997
998
999 for (i = 0; i < num_gh; i++)
1000 timeout += ghs[i].gh_gl->gl_hold_time << 1;
1001
1002 wait_for_dlm:
1003 if (!wait_event_timeout(sdp->sd_async_glock_wait,
1004 !glocks_pending(num_gh, ghs), timeout))
1005 ret = -ESTALE;
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 keep_waiting = false;
1022 for (i = 0; i < num_gh; i++) {
1023
1024 if (!gfs2_holder_queued(&ghs[i]))
1025 continue;
1026
1027 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
1028 keep_waiting = true;
1029 continue;
1030 }
1031
1032 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
1033 if (ret == -ESTALE)
1034 gfs2_glock_dq(&ghs[i]);
1035 else
1036 gfs2_glock_update_hold_time(ghs[i].gh_gl,
1037 start_time);
1038 }
1039 if (!ret)
1040 ret = ghs[i].gh_error;
1041 }
1042
1043 if (keep_waiting)
1044 goto wait_for_dlm;
1045
1046
1047
1048
1049 return ret;
1050 }
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
1062 unsigned long delay, bool remote)
1063 {
1064 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
1065
1066 set_bit(bit, &gl->gl_flags);
1067 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
1068 gl->gl_demote_state = state;
1069 gl->gl_demote_time = jiffies;
1070 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1071 gl->gl_demote_state != state) {
1072 gl->gl_demote_state = LM_ST_UNLOCKED;
1073 }
1074 if (gl->gl_ops->go_callback)
1075 gl->gl_ops->go_callback(gl, remote);
1076 trace_gfs2_demote_rq(gl, remote);
1077 }
1078
1079 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
1080 {
1081 struct va_format vaf;
1082 va_list args;
1083
1084 va_start(args, fmt);
1085
1086 if (seq) {
1087 seq_vprintf(seq, fmt, args);
1088 } else {
1089 vaf.fmt = fmt;
1090 vaf.va = &args;
1091
1092 pr_err("%pV", &vaf);
1093 }
1094
1095 va_end(args);
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 static inline void add_to_queue(struct gfs2_holder *gh)
1109 __releases(&gl->gl_lockref.lock)
1110 __acquires(&gl->gl_lockref.lock)
1111 {
1112 struct gfs2_glock *gl = gh->gh_gl;
1113 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1114 struct list_head *insert_pt = NULL;
1115 struct gfs2_holder *gh2;
1116 int try_futile = 0;
1117
1118 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1119 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1120 GLOCK_BUG_ON(gl, true);
1121
1122 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1123 if (test_bit(GLF_LOCK, &gl->gl_flags))
1124 try_futile = !may_grant(gl, gh);
1125 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1126 goto fail;
1127 }
1128
1129 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1130 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1131 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1132 goto trap_recursive;
1133 if (try_futile &&
1134 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1135 fail:
1136 gh->gh_error = GLR_TRYFAILED;
1137 gfs2_holder_wake(gh);
1138 return;
1139 }
1140 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1141 continue;
1142 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1143 insert_pt = &gh2->gh_list;
1144 }
1145 set_bit(GLF_QUEUED, &gl->gl_flags);
1146 trace_gfs2_glock_queue(gh, 1);
1147 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1148 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1149 if (likely(insert_pt == NULL)) {
1150 list_add_tail(&gh->gh_list, &gl->gl_holders);
1151 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1152 goto do_cancel;
1153 return;
1154 }
1155 list_add_tail(&gh->gh_list, insert_pt);
1156 do_cancel:
1157 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1158 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1159 spin_unlock(&gl->gl_lockref.lock);
1160 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1161 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1162 spin_lock(&gl->gl_lockref.lock);
1163 }
1164 return;
1165
1166 trap_recursive:
1167 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1168 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1169 fs_err(sdp, "lock type: %d req lock state : %d\n",
1170 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1171 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1172 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1173 fs_err(sdp, "lock type: %d req lock state : %d\n",
1174 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1175 gfs2_dump_glock(NULL, gl, true);
1176 BUG();
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188 int gfs2_glock_nq(struct gfs2_holder *gh)
1189 {
1190 struct gfs2_glock *gl = gh->gh_gl;
1191 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1192 int error = 0;
1193
1194 if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
1195 return -EIO;
1196
1197 if (test_bit(GLF_LRU, &gl->gl_flags))
1198 gfs2_glock_remove_from_lru(gl);
1199
1200 spin_lock(&gl->gl_lockref.lock);
1201 add_to_queue(gh);
1202 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1203 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1204 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1205 gl->gl_lockref.count++;
1206 __gfs2_glock_queue_work(gl, 0);
1207 }
1208 run_queue(gl, 1);
1209 spin_unlock(&gl->gl_lockref.lock);
1210
1211 if (!(gh->gh_flags & GL_ASYNC))
1212 error = gfs2_glock_wait(gh);
1213
1214 return error;
1215 }
1216
1217
1218
1219
1220
1221
1222
1223
1224 int gfs2_glock_poll(struct gfs2_holder *gh)
1225 {
1226 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1227 }
1228
1229
1230
1231
1232
1233
1234
1235 void gfs2_glock_dq(struct gfs2_holder *gh)
1236 {
1237 struct gfs2_glock *gl = gh->gh_gl;
1238 const struct gfs2_glock_operations *glops = gl->gl_ops;
1239 unsigned delay = 0;
1240 int fast_path = 0;
1241
1242 spin_lock(&gl->gl_lockref.lock);
1243 if (gh->gh_flags & GL_NOCACHE)
1244 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1245
1246 list_del_init(&gh->gh_list);
1247 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1248 if (find_first_holder(gl) == NULL) {
1249 if (glops->go_unlock) {
1250 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1251 spin_unlock(&gl->gl_lockref.lock);
1252 glops->go_unlock(gh);
1253 spin_lock(&gl->gl_lockref.lock);
1254 clear_bit(GLF_LOCK, &gl->gl_flags);
1255 }
1256 if (list_empty(&gl->gl_holders) &&
1257 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1258 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1259 fast_path = 1;
1260 }
1261 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1262 gfs2_glock_add_to_lru(gl);
1263
1264 trace_gfs2_glock_queue(gh, 0);
1265 if (unlikely(!fast_path)) {
1266 gl->gl_lockref.count++;
1267 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1268 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1269 gl->gl_name.ln_type == LM_TYPE_INODE)
1270 delay = gl->gl_hold_time;
1271 __gfs2_glock_queue_work(gl, delay);
1272 }
1273 spin_unlock(&gl->gl_lockref.lock);
1274 }
1275
1276 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1277 {
1278 struct gfs2_glock *gl = gh->gh_gl;
1279 gfs2_glock_dq(gh);
1280 might_sleep();
1281 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1282 }
1283
1284
1285
1286
1287
1288
1289
1290 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1291 {
1292 gfs2_glock_dq(gh);
1293 gfs2_holder_uninit(gh);
1294 }
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1309 const struct gfs2_glock_operations *glops,
1310 unsigned int state, u16 flags, struct gfs2_holder *gh)
1311 {
1312 struct gfs2_glock *gl;
1313 int error;
1314
1315 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1316 if (!error) {
1317 error = gfs2_glock_nq_init(gl, state, flags, gh);
1318 gfs2_glock_put(gl);
1319 }
1320
1321 return error;
1322 }
1323
1324
1325
1326
1327
1328
1329
1330
1331 static int glock_compare(const void *arg_a, const void *arg_b)
1332 {
1333 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1334 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1335 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1336 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1337
1338 if (a->ln_number > b->ln_number)
1339 return 1;
1340 if (a->ln_number < b->ln_number)
1341 return -1;
1342 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1343 return 0;
1344 }
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1356 struct gfs2_holder **p)
1357 {
1358 unsigned int x;
1359 int error = 0;
1360
1361 for (x = 0; x < num_gh; x++)
1362 p[x] = &ghs[x];
1363
1364 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1365
1366 for (x = 0; x < num_gh; x++) {
1367 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1368
1369 error = gfs2_glock_nq(p[x]);
1370 if (error) {
1371 while (x--)
1372 gfs2_glock_dq(p[x]);
1373 break;
1374 }
1375 }
1376
1377 return error;
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1391 {
1392 struct gfs2_holder *tmp[4];
1393 struct gfs2_holder **pph = tmp;
1394 int error = 0;
1395
1396 switch(num_gh) {
1397 case 0:
1398 return 0;
1399 case 1:
1400 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1401 return gfs2_glock_nq(ghs);
1402 default:
1403 if (num_gh <= 4)
1404 break;
1405 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1406 GFP_NOFS);
1407 if (!pph)
1408 return -ENOMEM;
1409 }
1410
1411 error = nq_m_sync(num_gh, ghs, pph);
1412
1413 if (pph != tmp)
1414 kfree(pph);
1415
1416 return error;
1417 }
1418
1419
1420
1421
1422
1423
1424
1425
1426 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1427 {
1428 while (num_gh--)
1429 gfs2_glock_dq(&ghs[num_gh]);
1430 }
1431
1432 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1433 {
1434 unsigned long delay = 0;
1435 unsigned long holdtime;
1436 unsigned long now = jiffies;
1437
1438 gfs2_glock_hold(gl);
1439 holdtime = gl->gl_tchange + gl->gl_hold_time;
1440 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1441 gl->gl_name.ln_type == LM_TYPE_INODE) {
1442 if (time_before(now, holdtime))
1443 delay = holdtime - now;
1444 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1445 delay = gl->gl_hold_time;
1446 }
1447
1448 spin_lock(&gl->gl_lockref.lock);
1449 handle_callback(gl, state, delay, true);
1450 __gfs2_glock_queue_work(gl, delay);
1451 spin_unlock(&gl->gl_lockref.lock);
1452 }
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1466 {
1467 const struct gfs2_holder *gh;
1468
1469 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1470 return 0;
1471 if (gl->gl_target == LM_ST_UNLOCKED)
1472 return 0;
1473
1474 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1475 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1476 continue;
1477 if (LM_FLAG_NOEXP & gh->gh_flags)
1478 return 0;
1479 }
1480
1481 return 1;
1482 }
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1494 {
1495 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1496
1497 spin_lock(&gl->gl_lockref.lock);
1498 gl->gl_reply = ret;
1499
1500 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1501 if (gfs2_should_freeze(gl)) {
1502 set_bit(GLF_FROZEN, &gl->gl_flags);
1503 spin_unlock(&gl->gl_lockref.lock);
1504 return;
1505 }
1506 }
1507
1508 gl->gl_lockref.count++;
1509 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1510 __gfs2_glock_queue_work(gl, 0);
1511 spin_unlock(&gl->gl_lockref.lock);
1512 }
1513
1514 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1515 {
1516 struct gfs2_glock *gla, *glb;
1517
1518 gla = list_entry(a, struct gfs2_glock, gl_lru);
1519 glb = list_entry(b, struct gfs2_glock, gl_lru);
1520
1521 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1522 return 1;
1523 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1524 return -1;
1525
1526 return 0;
1527 }
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 static void gfs2_dispose_glock_lru(struct list_head *list)
1544 __releases(&lru_lock)
1545 __acquires(&lru_lock)
1546 {
1547 struct gfs2_glock *gl;
1548
1549 list_sort(NULL, list, glock_cmp);
1550
1551 while(!list_empty(list)) {
1552 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1553 list_del_init(&gl->gl_lru);
1554 if (!spin_trylock(&gl->gl_lockref.lock)) {
1555 add_back_to_lru:
1556 list_add(&gl->gl_lru, &lru_list);
1557 set_bit(GLF_LRU, &gl->gl_flags);
1558 atomic_inc(&lru_count);
1559 continue;
1560 }
1561 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1562 spin_unlock(&gl->gl_lockref.lock);
1563 goto add_back_to_lru;
1564 }
1565 gl->gl_lockref.count++;
1566 if (demote_ok(gl))
1567 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1568 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1569 __gfs2_glock_queue_work(gl, 0);
1570 spin_unlock(&gl->gl_lockref.lock);
1571 cond_resched_lock(&lru_lock);
1572 }
1573 }
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584 static long gfs2_scan_glock_lru(int nr)
1585 {
1586 struct gfs2_glock *gl;
1587 LIST_HEAD(skipped);
1588 LIST_HEAD(dispose);
1589 long freed = 0;
1590
1591 spin_lock(&lru_lock);
1592 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1593 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1594
1595
1596 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1597 list_move(&gl->gl_lru, &dispose);
1598 atomic_dec(&lru_count);
1599 clear_bit(GLF_LRU, &gl->gl_flags);
1600 freed++;
1601 continue;
1602 }
1603
1604 list_move(&gl->gl_lru, &skipped);
1605 }
1606 list_splice(&skipped, &lru_list);
1607 if (!list_empty(&dispose))
1608 gfs2_dispose_glock_lru(&dispose);
1609 spin_unlock(&lru_lock);
1610
1611 return freed;
1612 }
1613
1614 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1615 struct shrink_control *sc)
1616 {
1617 if (!(sc->gfp_mask & __GFP_FS))
1618 return SHRINK_STOP;
1619 return gfs2_scan_glock_lru(sc->nr_to_scan);
1620 }
1621
1622 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1623 struct shrink_control *sc)
1624 {
1625 return vfs_pressure_ratio(atomic_read(&lru_count));
1626 }
1627
1628 static struct shrinker glock_shrinker = {
1629 .seeks = DEFAULT_SEEKS,
1630 .count_objects = gfs2_glock_shrink_count,
1631 .scan_objects = gfs2_glock_shrink_scan,
1632 };
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1646 {
1647 struct gfs2_glock *gl;
1648 struct rhashtable_iter iter;
1649
1650 rhashtable_walk_enter(&gl_hash_table, &iter);
1651
1652 do {
1653 rhashtable_walk_start(&iter);
1654
1655 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1656 if (gl->gl_name.ln_sbd == sdp &&
1657 lockref_get_not_dead(&gl->gl_lockref))
1658 examiner(gl);
1659
1660 rhashtable_walk_stop(&iter);
1661 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1662
1663 rhashtable_walk_exit(&iter);
1664 }
1665
1666
1667
1668
1669
1670
1671
1672 static void thaw_glock(struct gfs2_glock *gl)
1673 {
1674 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1675 gfs2_glock_put(gl);
1676 return;
1677 }
1678 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1679 gfs2_glock_queue_work(gl, 0);
1680 }
1681
1682
1683
1684
1685
1686
1687
1688 static void clear_glock(struct gfs2_glock *gl)
1689 {
1690 gfs2_glock_remove_from_lru(gl);
1691
1692 spin_lock(&gl->gl_lockref.lock);
1693 if (gl->gl_state != LM_ST_UNLOCKED)
1694 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1695 __gfs2_glock_queue_work(gl, 0);
1696 spin_unlock(&gl->gl_lockref.lock);
1697 }
1698
1699
1700
1701
1702
1703
1704
1705 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1706 {
1707 glock_hash_walk(thaw_glock, sdp);
1708 }
1709
1710 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
1711 {
1712 spin_lock(&gl->gl_lockref.lock);
1713 gfs2_dump_glock(seq, gl, fsid);
1714 spin_unlock(&gl->gl_lockref.lock);
1715 }
1716
1717 static void dump_glock_func(struct gfs2_glock *gl)
1718 {
1719 dump_glock(NULL, gl, true);
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1731 {
1732 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1733 flush_workqueue(glock_workqueue);
1734 glock_hash_walk(clear_glock, sdp);
1735 flush_workqueue(glock_workqueue);
1736 wait_event_timeout(sdp->sd_glock_wait,
1737 atomic_read(&sdp->sd_glock_disposal) == 0,
1738 HZ * 600);
1739 glock_hash_walk(dump_glock_func, sdp);
1740 }
1741
1742 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1743 {
1744 struct gfs2_glock *gl = ip->i_gl;
1745 int ret;
1746
1747 ret = gfs2_truncatei_resume(ip);
1748 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1749
1750 spin_lock(&gl->gl_lockref.lock);
1751 clear_bit(GLF_LOCK, &gl->gl_flags);
1752 run_queue(gl, 1);
1753 spin_unlock(&gl->gl_lockref.lock);
1754 }
1755
1756 static const char *state2str(unsigned state)
1757 {
1758 switch(state) {
1759 case LM_ST_UNLOCKED:
1760 return "UN";
1761 case LM_ST_SHARED:
1762 return "SH";
1763 case LM_ST_DEFERRED:
1764 return "DF";
1765 case LM_ST_EXCLUSIVE:
1766 return "EX";
1767 }
1768 return "??";
1769 }
1770
1771 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1772 {
1773 char *p = buf;
1774 if (flags & LM_FLAG_TRY)
1775 *p++ = 't';
1776 if (flags & LM_FLAG_TRY_1CB)
1777 *p++ = 'T';
1778 if (flags & LM_FLAG_NOEXP)
1779 *p++ = 'e';
1780 if (flags & LM_FLAG_ANY)
1781 *p++ = 'A';
1782 if (flags & LM_FLAG_PRIORITY)
1783 *p++ = 'p';
1784 if (flags & GL_ASYNC)
1785 *p++ = 'a';
1786 if (flags & GL_EXACT)
1787 *p++ = 'E';
1788 if (flags & GL_NOCACHE)
1789 *p++ = 'c';
1790 if (test_bit(HIF_HOLDER, &iflags))
1791 *p++ = 'H';
1792 if (test_bit(HIF_WAIT, &iflags))
1793 *p++ = 'W';
1794 if (test_bit(HIF_FIRST, &iflags))
1795 *p++ = 'F';
1796 *p = 0;
1797 return buf;
1798 }
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
1809 const char *fs_id_buf)
1810 {
1811 struct task_struct *gh_owner = NULL;
1812 char flags_buf[32];
1813
1814 rcu_read_lock();
1815 if (gh->gh_owner_pid)
1816 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1817 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1818 fs_id_buf, state2str(gh->gh_state),
1819 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1820 gh->gh_error,
1821 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1822 gh_owner ? gh_owner->comm : "(ended)",
1823 (void *)gh->gh_ip);
1824 rcu_read_unlock();
1825 }
1826
1827 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1828 {
1829 const unsigned long *gflags = &gl->gl_flags;
1830 char *p = buf;
1831
1832 if (test_bit(GLF_LOCK, gflags))
1833 *p++ = 'l';
1834 if (test_bit(GLF_DEMOTE, gflags))
1835 *p++ = 'D';
1836 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1837 *p++ = 'd';
1838 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1839 *p++ = 'p';
1840 if (test_bit(GLF_DIRTY, gflags))
1841 *p++ = 'y';
1842 if (test_bit(GLF_LFLUSH, gflags))
1843 *p++ = 'f';
1844 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1845 *p++ = 'i';
1846 if (test_bit(GLF_REPLY_PENDING, gflags))
1847 *p++ = 'r';
1848 if (test_bit(GLF_INITIAL, gflags))
1849 *p++ = 'I';
1850 if (test_bit(GLF_FROZEN, gflags))
1851 *p++ = 'F';
1852 if (test_bit(GLF_QUEUED, gflags))
1853 *p++ = 'q';
1854 if (test_bit(GLF_LRU, gflags))
1855 *p++ = 'L';
1856 if (gl->gl_object)
1857 *p++ = 'o';
1858 if (test_bit(GLF_BLOCKING, gflags))
1859 *p++ = 'b';
1860 *p = 0;
1861 return buf;
1862 }
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
1883 {
1884 const struct gfs2_glock_operations *glops = gl->gl_ops;
1885 unsigned long long dtime;
1886 const struct gfs2_holder *gh;
1887 char gflags_buf[32];
1888 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1889 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
1890
1891 memset(fs_id_buf, 0, sizeof(fs_id_buf));
1892 if (fsid && sdp)
1893 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
1894 dtime = jiffies - gl->gl_demote_time;
1895 dtime *= 1000000/HZ;
1896 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1897 dtime = 0;
1898 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
1899 "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state),
1900 gl->gl_name.ln_type,
1901 (unsigned long long)gl->gl_name.ln_number,
1902 gflags2str(gflags_buf, gl),
1903 state2str(gl->gl_target),
1904 state2str(gl->gl_demote_state), dtime,
1905 atomic_read(&gl->gl_ail_count),
1906 atomic_read(&gl->gl_revokes),
1907 (int)gl->gl_lockref.count, gl->gl_hold_time);
1908
1909 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1910 dump_holder(seq, gh, fs_id_buf);
1911
1912 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1913 glops->go_dump(seq, gl, fs_id_buf);
1914 }
1915
1916 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1917 {
1918 struct gfs2_glock *gl = iter_ptr;
1919
1920 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1921 gl->gl_name.ln_type,
1922 (unsigned long long)gl->gl_name.ln_number,
1923 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1924 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1925 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1926 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1927 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1928 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1929 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1930 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1931 return 0;
1932 }
1933
1934 static const char *gfs2_gltype[] = {
1935 "type",
1936 "reserved",
1937 "nondisk",
1938 "inode",
1939 "rgrp",
1940 "meta",
1941 "iopen",
1942 "flock",
1943 "plock",
1944 "quota",
1945 "journal",
1946 };
1947
1948 static const char *gfs2_stype[] = {
1949 [GFS2_LKS_SRTT] = "srtt",
1950 [GFS2_LKS_SRTTVAR] = "srttvar",
1951 [GFS2_LKS_SRTTB] = "srttb",
1952 [GFS2_LKS_SRTTVARB] = "srttvarb",
1953 [GFS2_LKS_SIRT] = "sirt",
1954 [GFS2_LKS_SIRTVAR] = "sirtvar",
1955 [GFS2_LKS_DCOUNT] = "dlm",
1956 [GFS2_LKS_QCOUNT] = "queue",
1957 };
1958
1959 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1960
1961 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1962 {
1963 struct gfs2_sbd *sdp = seq->private;
1964 loff_t pos = *(loff_t *)iter_ptr;
1965 unsigned index = pos >> 3;
1966 unsigned subindex = pos & 0x07;
1967 int i;
1968
1969 if (index == 0 && subindex != 0)
1970 return 0;
1971
1972 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1973 (index == 0) ? "cpu": gfs2_stype[subindex]);
1974
1975 for_each_possible_cpu(i) {
1976 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1977
1978 if (index == 0)
1979 seq_printf(seq, " %15u", i);
1980 else
1981 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1982 lkstats[index - 1].stats[subindex]);
1983 }
1984 seq_putc(seq, '\n');
1985 return 0;
1986 }
1987
1988 int __init gfs2_glock_init(void)
1989 {
1990 int i, ret;
1991
1992 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1993 if (ret < 0)
1994 return ret;
1995
1996 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1997 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1998 if (!glock_workqueue) {
1999 rhashtable_destroy(&gl_hash_table);
2000 return -ENOMEM;
2001 }
2002 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
2003 WQ_MEM_RECLAIM | WQ_FREEZABLE,
2004 0);
2005 if (!gfs2_delete_workqueue) {
2006 destroy_workqueue(glock_workqueue);
2007 rhashtable_destroy(&gl_hash_table);
2008 return -ENOMEM;
2009 }
2010
2011 ret = register_shrinker(&glock_shrinker);
2012 if (ret) {
2013 destroy_workqueue(gfs2_delete_workqueue);
2014 destroy_workqueue(glock_workqueue);
2015 rhashtable_destroy(&gl_hash_table);
2016 return ret;
2017 }
2018
2019 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
2020 init_waitqueue_head(glock_wait_table + i);
2021
2022 return 0;
2023 }
2024
2025 void gfs2_glock_exit(void)
2026 {
2027 unregister_shrinker(&glock_shrinker);
2028 rhashtable_destroy(&gl_hash_table);
2029 destroy_workqueue(glock_workqueue);
2030 destroy_workqueue(gfs2_delete_workqueue);
2031 }
2032
2033 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
2034 {
2035 struct gfs2_glock *gl = gi->gl;
2036
2037 if (gl) {
2038 if (n == 0)
2039 return;
2040 if (!lockref_put_not_zero(&gl->gl_lockref))
2041 gfs2_glock_queue_put(gl);
2042 }
2043 for (;;) {
2044 gl = rhashtable_walk_next(&gi->hti);
2045 if (IS_ERR_OR_NULL(gl)) {
2046 if (gl == ERR_PTR(-EAGAIN)) {
2047 n = 1;
2048 continue;
2049 }
2050 gl = NULL;
2051 break;
2052 }
2053 if (gl->gl_name.ln_sbd != gi->sdp)
2054 continue;
2055 if (n <= 1) {
2056 if (!lockref_get_not_dead(&gl->gl_lockref))
2057 continue;
2058 break;
2059 } else {
2060 if (__lockref_is_dead(&gl->gl_lockref))
2061 continue;
2062 n--;
2063 }
2064 }
2065 gi->gl = gl;
2066 }
2067
2068 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2069 __acquires(RCU)
2070 {
2071 struct gfs2_glock_iter *gi = seq->private;
2072 loff_t n;
2073
2074
2075
2076
2077
2078 if (*pos < gi->last_pos) {
2079 rhashtable_walk_exit(&gi->hti);
2080 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2081 n = *pos + 1;
2082 } else {
2083 n = *pos - gi->last_pos;
2084 }
2085
2086 rhashtable_walk_start(&gi->hti);
2087
2088 gfs2_glock_iter_next(gi, n);
2089 gi->last_pos = *pos;
2090 return gi->gl;
2091 }
2092
2093 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2094 loff_t *pos)
2095 {
2096 struct gfs2_glock_iter *gi = seq->private;
2097
2098 (*pos)++;
2099 gi->last_pos = *pos;
2100 gfs2_glock_iter_next(gi, 1);
2101 return gi->gl;
2102 }
2103
2104 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2105 __releases(RCU)
2106 {
2107 struct gfs2_glock_iter *gi = seq->private;
2108
2109 rhashtable_walk_stop(&gi->hti);
2110 }
2111
2112 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2113 {
2114 dump_glock(seq, iter_ptr, false);
2115 return 0;
2116 }
2117
2118 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2119 {
2120 preempt_disable();
2121 if (*pos >= GFS2_NR_SBSTATS)
2122 return NULL;
2123 return pos;
2124 }
2125
2126 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2127 loff_t *pos)
2128 {
2129 (*pos)++;
2130 if (*pos >= GFS2_NR_SBSTATS)
2131 return NULL;
2132 return pos;
2133 }
2134
2135 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2136 {
2137 preempt_enable();
2138 }
2139
2140 static const struct seq_operations gfs2_glock_seq_ops = {
2141 .start = gfs2_glock_seq_start,
2142 .next = gfs2_glock_seq_next,
2143 .stop = gfs2_glock_seq_stop,
2144 .show = gfs2_glock_seq_show,
2145 };
2146
2147 static const struct seq_operations gfs2_glstats_seq_ops = {
2148 .start = gfs2_glock_seq_start,
2149 .next = gfs2_glock_seq_next,
2150 .stop = gfs2_glock_seq_stop,
2151 .show = gfs2_glstats_seq_show,
2152 };
2153
2154 static const struct seq_operations gfs2_sbstats_seq_ops = {
2155 .start = gfs2_sbstats_seq_start,
2156 .next = gfs2_sbstats_seq_next,
2157 .stop = gfs2_sbstats_seq_stop,
2158 .show = gfs2_sbstats_seq_show,
2159 };
2160
2161 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2162
2163 static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2164 const struct seq_operations *ops)
2165 {
2166 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2167 if (ret == 0) {
2168 struct seq_file *seq = file->private_data;
2169 struct gfs2_glock_iter *gi = seq->private;
2170
2171 gi->sdp = inode->i_private;
2172 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2173 if (seq->buf)
2174 seq->size = GFS2_SEQ_GOODSIZE;
2175
2176
2177
2178
2179 gi->last_pos = -1;
2180 gi->gl = NULL;
2181 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2182 }
2183 return ret;
2184 }
2185
2186 static int gfs2_glocks_open(struct inode *inode, struct file *file)
2187 {
2188 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2189 }
2190
2191 static int gfs2_glocks_release(struct inode *inode, struct file *file)
2192 {
2193 struct seq_file *seq = file->private_data;
2194 struct gfs2_glock_iter *gi = seq->private;
2195
2196 if (gi->gl)
2197 gfs2_glock_put(gi->gl);
2198 rhashtable_walk_exit(&gi->hti);
2199 return seq_release_private(inode, file);
2200 }
2201
2202 static int gfs2_glstats_open(struct inode *inode, struct file *file)
2203 {
2204 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2205 }
2206
2207 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2208 {
2209 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2210 if (ret == 0) {
2211 struct seq_file *seq = file->private_data;
2212 seq->private = inode->i_private;
2213 }
2214 return ret;
2215 }
2216
2217 static const struct file_operations gfs2_glocks_fops = {
2218 .owner = THIS_MODULE,
2219 .open = gfs2_glocks_open,
2220 .read = seq_read,
2221 .llseek = seq_lseek,
2222 .release = gfs2_glocks_release,
2223 };
2224
2225 static const struct file_operations gfs2_glstats_fops = {
2226 .owner = THIS_MODULE,
2227 .open = gfs2_glstats_open,
2228 .read = seq_read,
2229 .llseek = seq_lseek,
2230 .release = gfs2_glocks_release,
2231 };
2232
2233 static const struct file_operations gfs2_sbstats_fops = {
2234 .owner = THIS_MODULE,
2235 .open = gfs2_sbstats_open,
2236 .read = seq_read,
2237 .llseek = seq_lseek,
2238 .release = seq_release,
2239 };
2240
2241 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2242 {
2243 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2244
2245 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2246 &gfs2_glocks_fops);
2247
2248 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2249 &gfs2_glstats_fops);
2250
2251 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2252 &gfs2_sbstats_fops);
2253 }
2254
2255 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2256 {
2257 debugfs_remove_recursive(sdp->debugfs_dir);
2258 sdp->debugfs_dir = NULL;
2259 }
2260
2261 void gfs2_register_debugfs(void)
2262 {
2263 gfs2_root = debugfs_create_dir("gfs2", NULL);
2264 }
2265
2266 void gfs2_unregister_debugfs(void)
2267 {
2268 debugfs_remove(gfs2_root);
2269 gfs2_root = NULL;
2270 }