This source file includes following definitions.
- gfs2_update_stats
- gfs2_update_reply_times
- gfs2_update_request_times
- gdlm_ast
- gdlm_bast
- make_mode
- make_flags
- gfs2_reverse_hex
- gdlm_lock
- gdlm_put_lock
- gdlm_cancel
- control_lvb_read
- control_lvb_write
- all_jid_bits_clear
- sync_wait_cb
- sync_unlock
- sync_lock
- mounted_unlock
- mounted_lock
- control_unlock
- control_lock
- gfs2_control_func
- control_mount
- control_first_done
- set_recover_size
- free_recover_size
- gdlm_recover_prep
- gdlm_recover_slot
- gdlm_recover_done
- gdlm_recovery_result
- gdlm_mount
- gdlm_first_done
- gdlm_unmount
1
2
3
4
5
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/fs.h>
10 #include <linux/dlm.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/delay.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/sched/signal.h>
16
17 #include "incore.h"
18 #include "glock.h"
19 #include "util.h"
20 #include "sys.h"
21 #include "trace_gfs2.h"
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44 static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
45 s64 sample)
46 {
47 s64 delta = sample - s->stats[index];
48 s->stats[index] += (delta >> 3);
49 index++;
50 s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
51 }
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
71 {
72 struct gfs2_pcpu_lkstats *lks;
73 const unsigned gltype = gl->gl_name.ln_type;
74 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
75 GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
76 s64 rtt;
77
78 preempt_disable();
79 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
80 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
81 gfs2_update_stats(&gl->gl_stats, index, rtt);
82 gfs2_update_stats(&lks->lkstats[gltype], index, rtt);
83 preempt_enable();
84
85 trace_gfs2_glock_lock_time(gl, rtt);
86 }
87
88
89
90
91
92
93
94
95
96
97 static inline void gfs2_update_request_times(struct gfs2_glock *gl)
98 {
99 struct gfs2_pcpu_lkstats *lks;
100 const unsigned gltype = gl->gl_name.ln_type;
101 ktime_t dstamp;
102 s64 irt;
103
104 preempt_disable();
105 dstamp = gl->gl_dstamp;
106 gl->gl_dstamp = ktime_get_real();
107 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
108 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
109 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt);
110 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt);
111 preempt_enable();
112 }
113
114 static void gdlm_ast(void *arg)
115 {
116 struct gfs2_glock *gl = arg;
117 unsigned ret = gl->gl_state;
118
119 gfs2_update_reply_times(gl);
120 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
121
122 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
123 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
124
125 switch (gl->gl_lksb.sb_status) {
126 case -DLM_EUNLOCK:
127 gfs2_glock_free(gl);
128 return;
129 case -DLM_ECANCEL:
130 ret |= LM_OUT_CANCELED;
131 goto out;
132 case -EAGAIN:
133 case -EDEADLK:
134 goto out;
135 case -ETIMEDOUT:
136 ret |= LM_OUT_ERROR;
137 goto out;
138 case 0:
139 break;
140 default:
141 BUG();
142 }
143
144 ret = gl->gl_req;
145 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
146 if (gl->gl_req == LM_ST_SHARED)
147 ret = LM_ST_DEFERRED;
148 else if (gl->gl_req == LM_ST_DEFERRED)
149 ret = LM_ST_SHARED;
150 else
151 BUG();
152 }
153
154 set_bit(GLF_INITIAL, &gl->gl_flags);
155 gfs2_glock_complete(gl, ret);
156 return;
157 out:
158 if (!test_bit(GLF_INITIAL, &gl->gl_flags))
159 gl->gl_lksb.sb_lkid = 0;
160 gfs2_glock_complete(gl, ret);
161 }
162
163 static void gdlm_bast(void *arg, int mode)
164 {
165 struct gfs2_glock *gl = arg;
166
167 switch (mode) {
168 case DLM_LOCK_EX:
169 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
170 break;
171 case DLM_LOCK_CW:
172 gfs2_glock_cb(gl, LM_ST_DEFERRED);
173 break;
174 case DLM_LOCK_PR:
175 gfs2_glock_cb(gl, LM_ST_SHARED);
176 break;
177 default:
178 fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode);
179 BUG();
180 }
181 }
182
183
184
185 static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate)
186 {
187 switch (lmstate) {
188 case LM_ST_UNLOCKED:
189 return DLM_LOCK_NL;
190 case LM_ST_EXCLUSIVE:
191 return DLM_LOCK_EX;
192 case LM_ST_DEFERRED:
193 return DLM_LOCK_CW;
194 case LM_ST_SHARED:
195 return DLM_LOCK_PR;
196 }
197 fs_err(sdp, "unknown LM state %d\n", lmstate);
198 BUG();
199 return -1;
200 }
201
202 static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
203 const int req)
204 {
205 u32 lkf = 0;
206
207 if (gl->gl_lksb.sb_lvbptr)
208 lkf |= DLM_LKF_VALBLK;
209
210 if (gfs_flags & LM_FLAG_TRY)
211 lkf |= DLM_LKF_NOQUEUE;
212
213 if (gfs_flags & LM_FLAG_TRY_1CB) {
214 lkf |= DLM_LKF_NOQUEUE;
215 lkf |= DLM_LKF_NOQUEUEBAST;
216 }
217
218 if (gfs_flags & LM_FLAG_PRIORITY) {
219 lkf |= DLM_LKF_NOORDER;
220 lkf |= DLM_LKF_HEADQUE;
221 }
222
223 if (gfs_flags & LM_FLAG_ANY) {
224 if (req == DLM_LOCK_PR)
225 lkf |= DLM_LKF_ALTCW;
226 else if (req == DLM_LOCK_CW)
227 lkf |= DLM_LKF_ALTPR;
228 else
229 BUG();
230 }
231
232 if (gl->gl_lksb.sb_lkid != 0) {
233 lkf |= DLM_LKF_CONVERT;
234 if (test_bit(GLF_BLOCKING, &gl->gl_flags))
235 lkf |= DLM_LKF_QUECVT;
236 }
237
238 return lkf;
239 }
240
241 static void gfs2_reverse_hex(char *c, u64 value)
242 {
243 *c = '0';
244 while (value) {
245 *c-- = hex_asc[value & 0x0f];
246 value >>= 4;
247 }
248 }
249
250 static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
251 unsigned int flags)
252 {
253 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
254 int req;
255 u32 lkf;
256 char strname[GDLM_STRNAME_BYTES] = "";
257
258 req = make_mode(gl->gl_name.ln_sbd, req_state);
259 lkf = make_flags(gl, flags, req);
260 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
261 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
262 if (gl->gl_lksb.sb_lkid) {
263 gfs2_update_request_times(gl);
264 } else {
265 memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
266 strname[GDLM_STRNAME_BYTES - 1] = '\0';
267 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
268 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
269 gl->gl_dstamp = ktime_get_real();
270 }
271
272
273
274
275 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
276 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
277 }
278
279 static void gdlm_put_lock(struct gfs2_glock *gl)
280 {
281 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
282 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
283 int lvb_needs_unlock = 0;
284 int error;
285
286 if (gl->gl_lksb.sb_lkid == 0) {
287 gfs2_glock_free(gl);
288 return;
289 }
290
291 clear_bit(GLF_BLOCKING, &gl->gl_flags);
292 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
293 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
294 gfs2_update_request_times(gl);
295
296
297
298 if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
299 lvb_needs_unlock = 1;
300
301 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
302 !lvb_needs_unlock) {
303 gfs2_glock_free(gl);
304 return;
305 }
306
307 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
308 NULL, gl);
309 if (error) {
310 fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
311 gl->gl_name.ln_type,
312 (unsigned long long)gl->gl_name.ln_number, error);
313 return;
314 }
315 }
316
317 static void gdlm_cancel(struct gfs2_glock *gl)
318 {
319 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
320 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
321 }
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463 #define JID_BITMAP_OFFSET 8
464
465 static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
466 char *lvb_bits)
467 {
468 __le32 gen;
469 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
470 memcpy(&gen, lvb_bits, sizeof(__le32));
471 *lvb_gen = le32_to_cpu(gen);
472 }
473
474 static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
475 char *lvb_bits)
476 {
477 __le32 gen;
478 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
479 gen = cpu_to_le32(lvb_gen);
480 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
481 }
482
483 static int all_jid_bits_clear(char *lvb)
484 {
485 return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0,
486 GDLM_LVB_SIZE - JID_BITMAP_OFFSET);
487 }
488
489 static void sync_wait_cb(void *arg)
490 {
491 struct lm_lockstruct *ls = arg;
492 complete(&ls->ls_sync_wait);
493 }
494
495 static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
496 {
497 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
498 int error;
499
500 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
501 if (error) {
502 fs_err(sdp, "%s lkid %x error %d\n",
503 name, lksb->sb_lkid, error);
504 return error;
505 }
506
507 wait_for_completion(&ls->ls_sync_wait);
508
509 if (lksb->sb_status != -DLM_EUNLOCK) {
510 fs_err(sdp, "%s lkid %x status %d\n",
511 name, lksb->sb_lkid, lksb->sb_status);
512 return -1;
513 }
514 return 0;
515 }
516
517 static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
518 unsigned int num, struct dlm_lksb *lksb, char *name)
519 {
520 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
521 char strname[GDLM_STRNAME_BYTES];
522 int error, status;
523
524 memset(strname, 0, GDLM_STRNAME_BYTES);
525 snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
526
527 error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
528 strname, GDLM_STRNAME_BYTES - 1,
529 0, sync_wait_cb, ls, NULL);
530 if (error) {
531 fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
532 name, lksb->sb_lkid, flags, mode, error);
533 return error;
534 }
535
536 wait_for_completion(&ls->ls_sync_wait);
537
538 status = lksb->sb_status;
539
540 if (status && status != -EAGAIN) {
541 fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n",
542 name, lksb->sb_lkid, flags, mode, status);
543 }
544
545 return status;
546 }
547
548 static int mounted_unlock(struct gfs2_sbd *sdp)
549 {
550 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
551 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock");
552 }
553
554 static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
555 {
556 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
557 return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK,
558 &ls->ls_mounted_lksb, "mounted_lock");
559 }
560
561 static int control_unlock(struct gfs2_sbd *sdp)
562 {
563 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
564 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock");
565 }
566
567 static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
568 {
569 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
570 return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK,
571 &ls->ls_control_lksb, "control_lock");
572 }
573
574 static void gfs2_control_func(struct work_struct *work)
575 {
576 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
577 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
578 uint32_t block_gen, start_gen, lvb_gen, flags;
579 int recover_set = 0;
580 int write_lvb = 0;
581 int recover_size;
582 int i, error;
583
584 spin_lock(&ls->ls_recover_spin);
585
586
587
588
589
590
591
592
593
594 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
595 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
596 spin_unlock(&ls->ls_recover_spin);
597 return;
598 }
599 block_gen = ls->ls_recover_block;
600 start_gen = ls->ls_recover_start;
601 spin_unlock(&ls->ls_recover_spin);
602
603
604
605
606
607
608
609
610 if (block_gen == start_gen)
611 return;
612
613
614
615
616
617
618
619
620
621
622
623
624
625 error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
626 if (error) {
627 fs_err(sdp, "control lock EX error %d\n", error);
628 return;
629 }
630
631 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
632
633 spin_lock(&ls->ls_recover_spin);
634 if (block_gen != ls->ls_recover_block ||
635 start_gen != ls->ls_recover_start) {
636 fs_info(sdp, "recover generation %u block1 %u %u\n",
637 start_gen, block_gen, ls->ls_recover_block);
638 spin_unlock(&ls->ls_recover_spin);
639 control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
640 return;
641 }
642
643 recover_size = ls->ls_recover_size;
644
645 if (lvb_gen <= start_gen) {
646
647
648
649
650
651
652
653
654
655 for (i = 0; i < recover_size; i++) {
656 if (ls->ls_recover_result[i] != LM_RD_SUCCESS)
657 continue;
658
659 ls->ls_recover_result[i] = 0;
660
661 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
662 continue;
663
664 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
665 write_lvb = 1;
666 }
667 }
668
669 if (lvb_gen == start_gen) {
670
671
672
673 for (i = 0; i < recover_size; i++) {
674 if (!ls->ls_recover_submit[i])
675 continue;
676 if (ls->ls_recover_submit[i] < lvb_gen)
677 ls->ls_recover_submit[i] = 0;
678 }
679 } else if (lvb_gen < start_gen) {
680
681
682
683 for (i = 0; i < recover_size; i++) {
684 if (!ls->ls_recover_submit[i])
685 continue;
686 if (ls->ls_recover_submit[i] < start_gen) {
687 ls->ls_recover_submit[i] = 0;
688 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
689 }
690 }
691
692
693 write_lvb = 1;
694 } else {
695
696
697
698 }
699 spin_unlock(&ls->ls_recover_spin);
700
701 if (write_lvb) {
702 control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
703 flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
704 } else {
705 flags = DLM_LKF_CONVERT;
706 }
707
708 error = control_lock(sdp, DLM_LOCK_NL, flags);
709 if (error) {
710 fs_err(sdp, "control lock NL error %d\n", error);
711 return;
712 }
713
714
715
716
717
718
719
720
721 for (i = 0; i < recover_size; i++) {
722 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
723 fs_info(sdp, "recover generation %u jid %d\n",
724 start_gen, i);
725 gfs2_recover_set(sdp, i);
726 recover_set++;
727 }
728 }
729 if (recover_set)
730 return;
731
732
733
734
735
736
737
738 spin_lock(&ls->ls_recover_spin);
739 if (ls->ls_recover_block == block_gen &&
740 ls->ls_recover_start == start_gen) {
741 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
742 spin_unlock(&ls->ls_recover_spin);
743 fs_info(sdp, "recover generation %u done\n", start_gen);
744 gfs2_glock_thaw(sdp);
745 } else {
746 fs_info(sdp, "recover generation %u block2 %u %u\n",
747 start_gen, block_gen, ls->ls_recover_block);
748 spin_unlock(&ls->ls_recover_spin);
749 }
750 }
751
752 static int control_mount(struct gfs2_sbd *sdp)
753 {
754 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
755 uint32_t start_gen, block_gen, mount_gen, lvb_gen;
756 int mounted_mode;
757 int retries = 0;
758 int error;
759
760 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb));
761 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb));
762 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE);
763 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb;
764 init_completion(&ls->ls_sync_wait);
765
766 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
767
768 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
769 if (error) {
770 fs_err(sdp, "control_mount control_lock NL error %d\n", error);
771 return error;
772 }
773
774 error = mounted_lock(sdp, DLM_LOCK_NL, 0);
775 if (error) {
776 fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
777 control_unlock(sdp);
778 return error;
779 }
780 mounted_mode = DLM_LOCK_NL;
781
782 restart:
783 if (retries++ && signal_pending(current)) {
784 error = -EINTR;
785 goto fail;
786 }
787
788
789
790
791
792
793 if (mounted_mode != DLM_LOCK_NL) {
794 error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
795 if (error)
796 goto fail;
797 mounted_mode = DLM_LOCK_NL;
798 }
799
800
801
802
803
804
805
806 msleep_interruptible(500);
807
808
809
810
811
812
813
814 error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
815 if (error == -EAGAIN) {
816 goto restart;
817 } else if (error) {
818 fs_err(sdp, "control_mount control_lock EX error %d\n", error);
819 goto fail;
820 }
821
822
823
824
825
826 if (sdp->sd_args.ar_spectator)
827 goto locks_done;
828
829 error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
830 if (!error) {
831 mounted_mode = DLM_LOCK_EX;
832 goto locks_done;
833 } else if (error != -EAGAIN) {
834 fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
835 goto fail;
836 }
837
838 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
839 if (!error) {
840 mounted_mode = DLM_LOCK_PR;
841 goto locks_done;
842 } else {
843
844 fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
845 goto fail;
846 }
847
848 locks_done:
849
850
851
852
853
854
855
856
857
858
859
860 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
861
862 if (lvb_gen == 0xFFFFFFFF) {
863
864 fs_err(sdp, "control_mount control_lock disabled\n");
865 error = -EINVAL;
866 goto fail;
867 }
868
869 if (mounted_mode == DLM_LOCK_EX) {
870
871 spin_lock(&ls->ls_recover_spin);
872 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
873 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
874 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
875 spin_unlock(&ls->ls_recover_spin);
876 fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
877 return 0;
878 }
879
880 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
881 if (error)
882 goto fail;
883
884
885
886
887
888
889
890 if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
891
892 fs_info(sdp, "control_mount wait for journal recovery\n");
893 goto restart;
894 }
895
896 spin_lock(&ls->ls_recover_spin);
897 block_gen = ls->ls_recover_block;
898 start_gen = ls->ls_recover_start;
899 mount_gen = ls->ls_recover_mount;
900
901 if (lvb_gen < mount_gen) {
902
903
904 if (sdp->sd_args.ar_spectator) {
905 fs_info(sdp, "Recovery is required. Waiting for a "
906 "non-spectator to mount.\n");
907 msleep_interruptible(1000);
908 } else {
909 fs_info(sdp, "control_mount wait1 block %u start %u "
910 "mount %u lvb %u flags %lx\n", block_gen,
911 start_gen, mount_gen, lvb_gen,
912 ls->ls_recover_flags);
913 }
914 spin_unlock(&ls->ls_recover_spin);
915 goto restart;
916 }
917
918 if (lvb_gen != start_gen) {
919
920
921 fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
922 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
923 lvb_gen, ls->ls_recover_flags);
924 spin_unlock(&ls->ls_recover_spin);
925 goto restart;
926 }
927
928 if (block_gen == start_gen) {
929
930 fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
931 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
932 lvb_gen, ls->ls_recover_flags);
933 spin_unlock(&ls->ls_recover_spin);
934 goto restart;
935 }
936
937 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
938 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
939 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
940 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
941 spin_unlock(&ls->ls_recover_spin);
942 return 0;
943
944 fail:
945 mounted_unlock(sdp);
946 control_unlock(sdp);
947 return error;
948 }
949
950 static int control_first_done(struct gfs2_sbd *sdp)
951 {
952 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
953 uint32_t start_gen, block_gen;
954 int error;
955
956 restart:
957 spin_lock(&ls->ls_recover_spin);
958 start_gen = ls->ls_recover_start;
959 block_gen = ls->ls_recover_block;
960
961 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) ||
962 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
963 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
964
965 fs_err(sdp, "control_first_done start %u block %u flags %lx\n",
966 start_gen, block_gen, ls->ls_recover_flags);
967 spin_unlock(&ls->ls_recover_spin);
968 control_unlock(sdp);
969 return -1;
970 }
971
972 if (start_gen == block_gen) {
973
974
975
976
977
978
979
980 spin_unlock(&ls->ls_recover_spin);
981 fs_info(sdp, "control_first_done wait gen %u\n", start_gen);
982
983 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY,
984 TASK_UNINTERRUPTIBLE);
985 goto restart;
986 }
987
988 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
989 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags);
990 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
991 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
992 spin_unlock(&ls->ls_recover_spin);
993
994 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE);
995 control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
996
997 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
998 if (error)
999 fs_err(sdp, "control_first_done mounted PR error %d\n", error);
1000
1001 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
1002 if (error)
1003 fs_err(sdp, "control_first_done control NL error %d\n", error);
1004
1005 return error;
1006 }
1007
1008
1009
1010
1011
1012
1013
1014 #define RECOVER_SIZE_INC 16
1015
1016 static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1017 int num_slots)
1018 {
1019 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1020 uint32_t *submit = NULL;
1021 uint32_t *result = NULL;
1022 uint32_t old_size, new_size;
1023 int i, max_jid;
1024
1025 if (!ls->ls_lvb_bits) {
1026 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1027 if (!ls->ls_lvb_bits)
1028 return -ENOMEM;
1029 }
1030
1031 max_jid = 0;
1032 for (i = 0; i < num_slots; i++) {
1033 if (max_jid < slots[i].slot - 1)
1034 max_jid = slots[i].slot - 1;
1035 }
1036
1037 old_size = ls->ls_recover_size;
1038 new_size = old_size;
1039 while (new_size < max_jid + 1)
1040 new_size += RECOVER_SIZE_INC;
1041 if (new_size == old_size)
1042 return 0;
1043
1044 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1045 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1046 if (!submit || !result) {
1047 kfree(submit);
1048 kfree(result);
1049 return -ENOMEM;
1050 }
1051
1052 spin_lock(&ls->ls_recover_spin);
1053 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t));
1054 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t));
1055 kfree(ls->ls_recover_submit);
1056 kfree(ls->ls_recover_result);
1057 ls->ls_recover_submit = submit;
1058 ls->ls_recover_result = result;
1059 ls->ls_recover_size = new_size;
1060 spin_unlock(&ls->ls_recover_spin);
1061 return 0;
1062 }
1063
1064 static void free_recover_size(struct lm_lockstruct *ls)
1065 {
1066 kfree(ls->ls_lvb_bits);
1067 kfree(ls->ls_recover_submit);
1068 kfree(ls->ls_recover_result);
1069 ls->ls_recover_submit = NULL;
1070 ls->ls_recover_result = NULL;
1071 ls->ls_recover_size = 0;
1072 ls->ls_lvb_bits = NULL;
1073 }
1074
1075
1076
1077 static void gdlm_recover_prep(void *arg)
1078 {
1079 struct gfs2_sbd *sdp = arg;
1080 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1081
1082 spin_lock(&ls->ls_recover_spin);
1083 ls->ls_recover_block = ls->ls_recover_start;
1084 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1085
1086 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
1087 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1088 spin_unlock(&ls->ls_recover_spin);
1089 return;
1090 }
1091 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
1092 spin_unlock(&ls->ls_recover_spin);
1093 }
1094
1095
1096
1097
1098 static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
1099 {
1100 struct gfs2_sbd *sdp = arg;
1101 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1102 int jid = slot->slot - 1;
1103
1104 spin_lock(&ls->ls_recover_spin);
1105 if (ls->ls_recover_size < jid + 1) {
1106 fs_err(sdp, "recover_slot jid %d gen %u short size %d\n",
1107 jid, ls->ls_recover_block, ls->ls_recover_size);
1108 spin_unlock(&ls->ls_recover_spin);
1109 return;
1110 }
1111
1112 if (ls->ls_recover_submit[jid]) {
1113 fs_info(sdp, "recover_slot jid %d gen %u prev %u\n",
1114 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]);
1115 }
1116 ls->ls_recover_submit[jid] = ls->ls_recover_block;
1117 spin_unlock(&ls->ls_recover_spin);
1118 }
1119
1120
1121
1122 static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
1123 int our_slot, uint32_t generation)
1124 {
1125 struct gfs2_sbd *sdp = arg;
1126 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1127
1128
1129 set_recover_size(sdp, slots, num_slots);
1130
1131 spin_lock(&ls->ls_recover_spin);
1132 ls->ls_recover_start = generation;
1133
1134 if (!ls->ls_recover_mount) {
1135 ls->ls_recover_mount = generation;
1136 ls->ls_jid = our_slot - 1;
1137 }
1138
1139 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1140 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
1141
1142 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1143 smp_mb__after_atomic();
1144 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
1145 spin_unlock(&ls->ls_recover_spin);
1146 }
1147
1148
1149
1150 static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
1151 unsigned int result)
1152 {
1153 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1154
1155 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1156 return;
1157
1158
1159 if (jid == ls->ls_jid)
1160 return;
1161
1162 spin_lock(&ls->ls_recover_spin);
1163 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1164 spin_unlock(&ls->ls_recover_spin);
1165 return;
1166 }
1167 if (ls->ls_recover_size < jid + 1) {
1168 fs_err(sdp, "recovery_result jid %d short size %d\n",
1169 jid, ls->ls_recover_size);
1170 spin_unlock(&ls->ls_recover_spin);
1171 return;
1172 }
1173
1174 fs_info(sdp, "recover jid %d result %s\n", jid,
1175 result == LM_RD_GAVEUP ? "busy" : "success");
1176
1177 ls->ls_recover_result[jid] = result;
1178
1179
1180
1181
1182
1183 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1184 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work,
1185 result == LM_RD_GAVEUP ? HZ : 0);
1186 spin_unlock(&ls->ls_recover_spin);
1187 }
1188
1189 static const struct dlm_lockspace_ops gdlm_lockspace_ops = {
1190 .recover_prep = gdlm_recover_prep,
1191 .recover_slot = gdlm_recover_slot,
1192 .recover_done = gdlm_recover_done,
1193 };
1194
1195 static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
1196 {
1197 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1198 char cluster[GFS2_LOCKNAME_LEN];
1199 const char *fsname;
1200 uint32_t flags;
1201 int error, ops_result;
1202
1203
1204
1205
1206
1207 INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
1208 spin_lock_init(&ls->ls_recover_spin);
1209 ls->ls_recover_flags = 0;
1210 ls->ls_recover_mount = 0;
1211 ls->ls_recover_start = 0;
1212 ls->ls_recover_block = 0;
1213 ls->ls_recover_size = 0;
1214 ls->ls_recover_submit = NULL;
1215 ls->ls_recover_result = NULL;
1216 ls->ls_lvb_bits = NULL;
1217
1218 error = set_recover_size(sdp, NULL, 0);
1219 if (error)
1220 goto fail;
1221
1222
1223
1224
1225
1226 fsname = strchr(table, ':');
1227 if (!fsname) {
1228 fs_info(sdp, "no fsname found\n");
1229 error = -EINVAL;
1230 goto fail_free;
1231 }
1232 memset(cluster, 0, sizeof(cluster));
1233 memcpy(cluster, table, strlen(table) - strlen(fsname));
1234 fsname++;
1235
1236 flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL;
1237
1238
1239
1240
1241
1242 error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
1243 &gdlm_lockspace_ops, sdp, &ops_result,
1244 &ls->ls_dlm);
1245 if (error) {
1246 fs_err(sdp, "dlm_new_lockspace error %d\n", error);
1247 goto fail_free;
1248 }
1249
1250 if (ops_result < 0) {
1251
1252
1253
1254
1255 fs_info(sdp, "dlm lockspace ops not used\n");
1256 free_recover_size(ls);
1257 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags);
1258 return 0;
1259 }
1260
1261 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) {
1262 fs_err(sdp, "dlm lockspace ops disallow jid preset\n");
1263 error = -EINVAL;
1264 goto fail_release;
1265 }
1266
1267
1268
1269
1270
1271
1272 error = control_mount(sdp);
1273 if (error) {
1274 fs_err(sdp, "mount control error %d\n", error);
1275 goto fail_release;
1276 }
1277
1278 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1279 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
1280 smp_mb__after_atomic();
1281 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
1282 return 0;
1283
1284 fail_release:
1285 dlm_release_lockspace(ls->ls_dlm, 2);
1286 fail_free:
1287 free_recover_size(ls);
1288 fail:
1289 return error;
1290 }
1291
1292 static void gdlm_first_done(struct gfs2_sbd *sdp)
1293 {
1294 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1295 int error;
1296
1297 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1298 return;
1299
1300 error = control_first_done(sdp);
1301 if (error)
1302 fs_err(sdp, "mount first_done error %d\n", error);
1303 }
1304
1305 static void gdlm_unmount(struct gfs2_sbd *sdp)
1306 {
1307 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1308
1309 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1310 goto release;
1311
1312
1313
1314 spin_lock(&ls->ls_recover_spin);
1315 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
1316 spin_unlock(&ls->ls_recover_spin);
1317 flush_delayed_work(&sdp->sd_control_work);
1318
1319
1320 release:
1321 if (ls->ls_dlm) {
1322 dlm_release_lockspace(ls->ls_dlm, 2);
1323 ls->ls_dlm = NULL;
1324 }
1325
1326 free_recover_size(ls);
1327 }
1328
1329 static const match_table_t dlm_tokens = {
1330 { Opt_jid, "jid=%d"},
1331 { Opt_id, "id=%d"},
1332 { Opt_first, "first=%d"},
1333 { Opt_nodir, "nodir=%d"},
1334 { Opt_err, NULL },
1335 };
1336
1337 const struct lm_lockops gfs2_dlm_ops = {
1338 .lm_proto_name = "lock_dlm",
1339 .lm_mount = gdlm_mount,
1340 .lm_first_done = gdlm_first_done,
1341 .lm_recovery_result = gdlm_recovery_result,
1342 .lm_unmount = gdlm_unmount,
1343 .lm_put_lock = gdlm_put_lock,
1344 .lm_lock = gdlm_lock,
1345 .lm_cancel = gdlm_cancel,
1346 .lm_tokens = &dlm_tokens,
1347 };
1348