This source file includes following definitions.
- gfs2_struct2blk
- gfs2_remove_from_ail
- gfs2_ail1_start_one
- gfs2_ail1_flush
- gfs2_ail1_start
- gfs2_ail1_empty_one
- gfs2_ail1_empty
- gfs2_ail1_wait
- gfs2_ail2_empty_one
- ail2_empty
- gfs2_log_release
- gfs2_log_reserve
- log_distance
- calc_reserved
- current_tail
- log_pull_tail
- log_flush_wait
- ip_cmp
- gfs2_ordered_write
- gfs2_ordered_wait
- gfs2_ordered_del_inode
- gfs2_add_revoke
- gfs2_glock_remove_revoke
- gfs2_write_revokes
- gfs2_write_log_header
- log_write_header
- gfs2_log_flush
- gfs2_merge_trans
- log_refund
- gfs2_log_commit
- gfs2_log_shutdown
- gfs2_jrnl_flush_reqd
- gfs2_ail_flush_reqd
- gfs2_logd
1
2
3
4
5
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/crc32.h>
14 #include <linux/crc32c.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include <linux/list_sort.h>
22
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "log.h"
28 #include "lops.h"
29 #include "meta_io.h"
30 #include "util.h"
31 #include "dir.h"
32 #include "trace_gfs2.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
47 unsigned int ssize)
48 {
49 unsigned int blks;
50 unsigned int first, second;
51
52 blks = 1;
53 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
54
55 if (nstruct > first) {
56 second = (sdp->sd_sb.sb_bsize -
57 sizeof(struct gfs2_meta_header)) / ssize;
58 blks += DIV_ROUND_UP(nstruct - first, second);
59 }
60
61 return blks;
62 }
63
64
65
66
67
68
69
70
71
72
73 static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
74 {
75 bd->bd_tr = NULL;
76 list_del_init(&bd->bd_ail_st_list);
77 list_del_init(&bd->bd_ail_gl_list);
78 atomic_dec(&bd->bd_gl->gl_ail_count);
79 brelse(bd->bd_bh);
80 }
81
82
83
84
85
86
87
88
89
90 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
91 struct writeback_control *wbc,
92 struct gfs2_trans *tr,
93 bool *withdraw)
94 __releases(&sdp->sd_ail_lock)
95 __acquires(&sdp->sd_ail_lock)
96 {
97 struct gfs2_glock *gl = NULL;
98 struct address_space *mapping;
99 struct gfs2_bufdata *bd, *s;
100 struct buffer_head *bh;
101
102 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
103 bh = bd->bd_bh;
104
105 gfs2_assert(sdp, bd->bd_tr == tr);
106
107 if (!buffer_busy(bh)) {
108 if (!buffer_uptodate(bh) &&
109 !test_and_set_bit(SDF_AIL1_IO_ERROR,
110 &sdp->sd_flags)) {
111 gfs2_io_error_bh(sdp, bh);
112 *withdraw = true;
113 }
114 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
115 continue;
116 }
117
118 if (!buffer_dirty(bh))
119 continue;
120 if (gl == bd->bd_gl)
121 continue;
122 gl = bd->bd_gl;
123 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124 mapping = bh->b_page->mapping;
125 if (!mapping)
126 continue;
127 spin_unlock(&sdp->sd_ail_lock);
128 generic_writepages(mapping, wbc);
129 spin_lock(&sdp->sd_ail_lock);
130 if (wbc->nr_to_write <= 0)
131 break;
132 return 1;
133 }
134
135 return 0;
136 }
137
138
139
140
141
142
143
144
145
146
147
148 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
149 {
150 struct list_head *head = &sdp->sd_ail1_list;
151 struct gfs2_trans *tr;
152 struct blk_plug plug;
153 bool withdraw = false;
154
155 trace_gfs2_ail_flush(sdp, wbc, 1);
156 blk_start_plug(&plug);
157 spin_lock(&sdp->sd_ail_lock);
158 restart:
159 list_for_each_entry_reverse(tr, head, tr_list) {
160 if (wbc->nr_to_write <= 0)
161 break;
162 if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
163 goto restart;
164 }
165 spin_unlock(&sdp->sd_ail_lock);
166 blk_finish_plug(&plug);
167 if (withdraw)
168 gfs2_lm_withdraw(sdp, NULL);
169 trace_gfs2_ail_flush(sdp, wbc, 0);
170 }
171
172
173
174
175
176
177 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
178 {
179 struct writeback_control wbc = {
180 .sync_mode = WB_SYNC_NONE,
181 .nr_to_write = LONG_MAX,
182 .range_start = 0,
183 .range_end = LLONG_MAX,
184 };
185
186 return gfs2_ail1_flush(sdp, &wbc);
187 }
188
189
190
191
192
193
194
195
196 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
197 bool *withdraw)
198 {
199 struct gfs2_bufdata *bd, *s;
200 struct buffer_head *bh;
201
202 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
203 bd_ail_st_list) {
204 bh = bd->bd_bh;
205 gfs2_assert(sdp, bd->bd_tr == tr);
206 if (buffer_busy(bh))
207 continue;
208 if (!buffer_uptodate(bh) &&
209 !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
210 gfs2_io_error_bh(sdp, bh);
211 *withdraw = true;
212 }
213 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
214 }
215 }
216
217
218
219
220
221
222
223
224 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
225 {
226 struct gfs2_trans *tr, *s;
227 int oldest_tr = 1;
228 int ret;
229 bool withdraw = false;
230
231 spin_lock(&sdp->sd_ail_lock);
232 list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
233 gfs2_ail1_empty_one(sdp, tr, &withdraw);
234 if (list_empty(&tr->tr_ail1_list) && oldest_tr)
235 list_move(&tr->tr_list, &sdp->sd_ail2_list);
236 else
237 oldest_tr = 0;
238 }
239 ret = list_empty(&sdp->sd_ail1_list);
240 spin_unlock(&sdp->sd_ail_lock);
241
242 if (withdraw)
243 gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
244
245 return ret;
246 }
247
248 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
249 {
250 struct gfs2_trans *tr;
251 struct gfs2_bufdata *bd;
252 struct buffer_head *bh;
253
254 spin_lock(&sdp->sd_ail_lock);
255 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
256 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
257 bh = bd->bd_bh;
258 if (!buffer_locked(bh))
259 continue;
260 get_bh(bh);
261 spin_unlock(&sdp->sd_ail_lock);
262 wait_on_buffer(bh);
263 brelse(bh);
264 return;
265 }
266 }
267 spin_unlock(&sdp->sd_ail_lock);
268 }
269
270
271
272
273
274
275
276
277 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
278 {
279 struct list_head *head = &tr->tr_ail2_list;
280 struct gfs2_bufdata *bd;
281
282 while (!list_empty(head)) {
283 bd = list_entry(head->prev, struct gfs2_bufdata,
284 bd_ail_st_list);
285 gfs2_assert(sdp, bd->bd_tr == tr);
286 gfs2_remove_from_ail(bd);
287 }
288 }
289
290 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
291 {
292 struct gfs2_trans *tr, *safe;
293 unsigned int old_tail = sdp->sd_log_tail;
294 int wrap = (new_tail < old_tail);
295 int a, b, rm;
296
297 spin_lock(&sdp->sd_ail_lock);
298
299 list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
300 a = (old_tail <= tr->tr_first);
301 b = (tr->tr_first < new_tail);
302 rm = (wrap) ? (a || b) : (a && b);
303 if (!rm)
304 continue;
305
306 gfs2_ail2_empty_one(sdp, tr);
307 list_del(&tr->tr_list);
308 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
309 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
310 kfree(tr);
311 }
312
313 spin_unlock(&sdp->sd_ail_lock);
314 }
315
316
317
318
319
320
321
322
323 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
324 {
325
326 atomic_add(blks, &sdp->sd_log_blks_free);
327 trace_gfs2_log_blocks(sdp, blks);
328 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
329 sdp->sd_jdesc->jd_blocks);
330 up_read(&sdp->sd_log_flush_lock);
331 }
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
354 {
355 int ret = 0;
356 unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
357 unsigned wanted = blks + reserved_blks;
358 DEFINE_WAIT(wait);
359 int did_wait = 0;
360 unsigned int free_blocks;
361
362 if (gfs2_assert_warn(sdp, blks) ||
363 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
364 return -EINVAL;
365 atomic_add(blks, &sdp->sd_log_blks_needed);
366 retry:
367 free_blocks = atomic_read(&sdp->sd_log_blks_free);
368 if (unlikely(free_blocks <= wanted)) {
369 do {
370 prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
371 TASK_UNINTERRUPTIBLE);
372 wake_up(&sdp->sd_logd_waitq);
373 did_wait = 1;
374 if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
375 io_schedule();
376 free_blocks = atomic_read(&sdp->sd_log_blks_free);
377 } while(free_blocks <= wanted);
378 finish_wait(&sdp->sd_log_waitq, &wait);
379 }
380 atomic_inc(&sdp->sd_reserving_log);
381 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
382 free_blocks - blks) != free_blocks) {
383 if (atomic_dec_and_test(&sdp->sd_reserving_log))
384 wake_up(&sdp->sd_reserving_log_wait);
385 goto retry;
386 }
387 atomic_sub(blks, &sdp->sd_log_blks_needed);
388 trace_gfs2_log_blocks(sdp, -blks);
389
390
391
392
393
394 if (unlikely(did_wait))
395 wake_up(&sdp->sd_log_waitq);
396
397 down_read(&sdp->sd_log_flush_lock);
398 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
399 gfs2_log_release(sdp, blks);
400 ret = -EROFS;
401 }
402 if (atomic_dec_and_test(&sdp->sd_reserving_log))
403 wake_up(&sdp->sd_reserving_log_wait);
404 return ret;
405 }
406
407
408
409
410
411
412
413
414
415
416
417
418
419 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
420 unsigned int older)
421 {
422 int dist;
423
424 dist = newer - older;
425 if (dist < 0)
426 dist += sdp->sd_jdesc->jd_blocks;
427
428 return dist;
429 }
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
457 {
458 unsigned int reserved = 0;
459 unsigned int mbuf;
460 unsigned int dbuf;
461 struct gfs2_trans *tr = sdp->sd_log_tr;
462
463 if (tr) {
464 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
465 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
466 reserved = mbuf + dbuf;
467
468 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
469 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
470 }
471
472 if (sdp->sd_log_commited_revoke > 0)
473 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
474 sizeof(u64));
475
476 if (reserved)
477 reserved++;
478 return reserved;
479 }
480
481 static unsigned int current_tail(struct gfs2_sbd *sdp)
482 {
483 struct gfs2_trans *tr;
484 unsigned int tail;
485
486 spin_lock(&sdp->sd_ail_lock);
487
488 if (list_empty(&sdp->sd_ail1_list)) {
489 tail = sdp->sd_log_head;
490 } else {
491 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
492 tr_list);
493 tail = tr->tr_first;
494 }
495
496 spin_unlock(&sdp->sd_ail_lock);
497
498 return tail;
499 }
500
501 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
502 {
503 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
504
505 ail2_empty(sdp, new_tail);
506
507 atomic_add(dist, &sdp->sd_log_blks_free);
508 trace_gfs2_log_blocks(sdp, dist);
509 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
510 sdp->sd_jdesc->jd_blocks);
511
512 sdp->sd_log_tail = new_tail;
513 }
514
515
516 void log_flush_wait(struct gfs2_sbd *sdp)
517 {
518 DEFINE_WAIT(wait);
519
520 if (atomic_read(&sdp->sd_log_in_flight)) {
521 do {
522 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
523 TASK_UNINTERRUPTIBLE);
524 if (atomic_read(&sdp->sd_log_in_flight))
525 io_schedule();
526 } while(atomic_read(&sdp->sd_log_in_flight));
527 finish_wait(&sdp->sd_log_flush_wait, &wait);
528 }
529 }
530
531 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
532 {
533 struct gfs2_inode *ipa, *ipb;
534
535 ipa = list_entry(a, struct gfs2_inode, i_ordered);
536 ipb = list_entry(b, struct gfs2_inode, i_ordered);
537
538 if (ipa->i_no_addr < ipb->i_no_addr)
539 return -1;
540 if (ipa->i_no_addr > ipb->i_no_addr)
541 return 1;
542 return 0;
543 }
544
545 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
546 {
547 struct gfs2_inode *ip;
548 LIST_HEAD(written);
549
550 spin_lock(&sdp->sd_ordered_lock);
551 list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
552 while (!list_empty(&sdp->sd_log_ordered)) {
553 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
554 if (ip->i_inode.i_mapping->nrpages == 0) {
555 test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
556 list_del(&ip->i_ordered);
557 continue;
558 }
559 list_move(&ip->i_ordered, &written);
560 spin_unlock(&sdp->sd_ordered_lock);
561 filemap_fdatawrite(ip->i_inode.i_mapping);
562 spin_lock(&sdp->sd_ordered_lock);
563 }
564 list_splice(&written, &sdp->sd_log_ordered);
565 spin_unlock(&sdp->sd_ordered_lock);
566 }
567
568 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
569 {
570 struct gfs2_inode *ip;
571
572 spin_lock(&sdp->sd_ordered_lock);
573 while (!list_empty(&sdp->sd_log_ordered)) {
574 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
575 list_del(&ip->i_ordered);
576 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
577 if (ip->i_inode.i_mapping->nrpages == 0)
578 continue;
579 spin_unlock(&sdp->sd_ordered_lock);
580 filemap_fdatawait(ip->i_inode.i_mapping);
581 spin_lock(&sdp->sd_ordered_lock);
582 }
583 spin_unlock(&sdp->sd_ordered_lock);
584 }
585
586 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
587 {
588 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
589
590 spin_lock(&sdp->sd_ordered_lock);
591 if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
592 list_del(&ip->i_ordered);
593 spin_unlock(&sdp->sd_ordered_lock);
594 }
595
596 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
597 {
598 struct buffer_head *bh = bd->bd_bh;
599 struct gfs2_glock *gl = bd->bd_gl;
600
601 sdp->sd_log_num_revoke++;
602 if (atomic_inc_return(&gl->gl_revokes) == 1)
603 gfs2_glock_hold(gl);
604 bh->b_private = NULL;
605 bd->bd_blkno = bh->b_blocknr;
606 gfs2_remove_from_ail(bd);
607 bd->bd_bh = NULL;
608 set_bit(GLF_LFLUSH, &gl->gl_flags);
609 list_add(&bd->bd_list, &sdp->sd_log_revokes);
610 }
611
612 void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
613 {
614 if (atomic_dec_return(&gl->gl_revokes) == 0) {
615 clear_bit(GLF_LFLUSH, &gl->gl_flags);
616 gfs2_glock_queue_put(gl);
617 }
618 }
619
620 void gfs2_write_revokes(struct gfs2_sbd *sdp)
621 {
622 struct gfs2_trans *tr;
623 struct gfs2_bufdata *bd, *tmp;
624 int have_revokes = 0;
625 int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
626
627 gfs2_ail1_empty(sdp);
628 spin_lock(&sdp->sd_ail_lock);
629 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
630 list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
631 if (list_empty(&bd->bd_list)) {
632 have_revokes = 1;
633 goto done;
634 }
635 }
636 }
637 done:
638 spin_unlock(&sdp->sd_ail_lock);
639 if (have_revokes == 0)
640 return;
641 while (sdp->sd_log_num_revoke > max_revokes)
642 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
643 max_revokes -= sdp->sd_log_num_revoke;
644 if (!sdp->sd_log_num_revoke) {
645 atomic_dec(&sdp->sd_log_blks_free);
646
647
648 if (!sdp->sd_log_blks_reserved)
649 atomic_dec(&sdp->sd_log_blks_free);
650 }
651 gfs2_log_lock(sdp);
652 spin_lock(&sdp->sd_ail_lock);
653 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
654 list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
655 if (max_revokes == 0)
656 goto out_of_blocks;
657 if (!list_empty(&bd->bd_list))
658 continue;
659 gfs2_add_revoke(sdp, bd);
660 max_revokes--;
661 }
662 }
663 out_of_blocks:
664 spin_unlock(&sdp->sd_ail_lock);
665 gfs2_log_unlock(sdp);
666
667 if (!sdp->sd_log_num_revoke) {
668 atomic_inc(&sdp->sd_log_blks_free);
669 if (!sdp->sd_log_blks_reserved)
670 atomic_inc(&sdp->sd_log_blks_free);
671 }
672 }
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
688 u64 seq, u32 tail, u32 lblock, u32 flags,
689 int op_flags)
690 {
691 struct gfs2_log_header *lh;
692 u32 hash, crc;
693 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
694 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
695 struct timespec64 tv;
696 struct super_block *sb = sdp->sd_vfs;
697 u64 dblock;
698
699 lh = page_address(page);
700 clear_page(lh);
701
702 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
703 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
704 lh->lh_header.__pad0 = cpu_to_be64(0);
705 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
706 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
707 lh->lh_sequence = cpu_to_be64(seq);
708 lh->lh_flags = cpu_to_be32(flags);
709 lh->lh_tail = cpu_to_be32(tail);
710 lh->lh_blkno = cpu_to_be32(lblock);
711 hash = ~crc32(~0, lh, LH_V1_SIZE);
712 lh->lh_hash = cpu_to_be32(hash);
713
714 ktime_get_coarse_real_ts64(&tv);
715 lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
716 lh->lh_sec = cpu_to_be64(tv.tv_sec);
717 if (!list_empty(&jd->extent_list))
718 dblock = gfs2_log_bmap(sdp);
719 else {
720 int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
721 if (gfs2_assert_withdraw(sdp, ret == 0))
722 return;
723 }
724 lh->lh_addr = cpu_to_be64(dblock);
725 lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
726
727
728
729
730 if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
731 lh->lh_statfs_addr =
732 cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
733 lh->lh_quota_addr =
734 cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
735
736 spin_lock(&sdp->sd_statfs_spin);
737 lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
738 lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
739 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
740 spin_unlock(&sdp->sd_statfs_spin);
741 }
742
743 BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
744
745 crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
746 sb->s_blocksize - LH_V1_SIZE - 4);
747 lh->lh_crc = cpu_to_be32(crc);
748
749 gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
750 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
751 log_flush_wait(sdp);
752 }
753
754
755
756
757
758
759
760
761
762 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
763 {
764 unsigned int tail;
765 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
766 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
767
768 gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
769 tail = current_tail(sdp);
770
771 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
772 gfs2_ordered_wait(sdp);
773 log_flush_wait(sdp);
774 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
775 }
776 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
777 gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
778 sdp->sd_log_flush_head, flags, op_flags);
779
780 if (sdp->sd_log_tail != tail)
781 log_pull_tail(sdp, tail);
782 }
783
784
785
786
787
788
789
790
791
792 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
793 {
794 struct gfs2_trans *tr;
795 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
796
797 down_write(&sdp->sd_log_flush_lock);
798
799
800 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
801 up_write(&sdp->sd_log_flush_lock);
802 return;
803 }
804 trace_gfs2_log_flush(sdp, 1, flags);
805
806 if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
807 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
808
809 sdp->sd_log_flush_head = sdp->sd_log_head;
810 tr = sdp->sd_log_tr;
811 if (tr) {
812 sdp->sd_log_tr = NULL;
813 INIT_LIST_HEAD(&tr->tr_ail1_list);
814 INIT_LIST_HEAD(&tr->tr_ail2_list);
815 tr->tr_first = sdp->sd_log_flush_head;
816 if (unlikely (state == SFS_FROZEN))
817 gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
818 }
819
820 if (unlikely(state == SFS_FROZEN))
821 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
822 gfs2_assert_withdraw(sdp,
823 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
824
825 gfs2_ordered_write(sdp);
826 lops_before_commit(sdp, tr);
827 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
828
829 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
830 log_flush_wait(sdp);
831 log_write_header(sdp, flags);
832 } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
833 atomic_dec(&sdp->sd_log_blks_free);
834 trace_gfs2_log_blocks(sdp, -1);
835 log_write_header(sdp, flags);
836 }
837 lops_after_commit(sdp, tr);
838
839 gfs2_log_lock(sdp);
840 sdp->sd_log_head = sdp->sd_log_flush_head;
841 sdp->sd_log_blks_reserved = 0;
842 sdp->sd_log_commited_revoke = 0;
843
844 spin_lock(&sdp->sd_ail_lock);
845 if (tr && !list_empty(&tr->tr_ail1_list)) {
846 list_add(&tr->tr_list, &sdp->sd_ail1_list);
847 tr = NULL;
848 }
849 spin_unlock(&sdp->sd_ail_lock);
850 gfs2_log_unlock(sdp);
851
852 if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
853 if (!sdp->sd_log_idle) {
854 for (;;) {
855 gfs2_ail1_start(sdp);
856 gfs2_ail1_wait(sdp);
857 if (gfs2_ail1_empty(sdp))
858 break;
859 }
860 atomic_dec(&sdp->sd_log_blks_free);
861 trace_gfs2_log_blocks(sdp, -1);
862 log_write_header(sdp, flags);
863 sdp->sd_log_head = sdp->sd_log_flush_head;
864 }
865 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
866 GFS2_LOG_HEAD_FLUSH_FREEZE))
867 gfs2_log_shutdown(sdp);
868 if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
869 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
870 }
871
872 trace_gfs2_log_flush(sdp, 0, flags);
873 up_write(&sdp->sd_log_flush_lock);
874
875 kfree(tr);
876 }
877
878
879
880
881
882
883
884 static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
885 {
886 WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
887
888 old->tr_num_buf_new += new->tr_num_buf_new;
889 old->tr_num_databuf_new += new->tr_num_databuf_new;
890 old->tr_num_buf_rm += new->tr_num_buf_rm;
891 old->tr_num_databuf_rm += new->tr_num_databuf_rm;
892 old->tr_num_revoke += new->tr_num_revoke;
893
894 list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
895 list_splice_tail_init(&new->tr_buf, &old->tr_buf);
896 }
897
898 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
899 {
900 unsigned int reserved;
901 unsigned int unused;
902 unsigned int maxres;
903
904 gfs2_log_lock(sdp);
905
906 if (sdp->sd_log_tr) {
907 gfs2_merge_trans(sdp->sd_log_tr, tr);
908 } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
909 gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
910 sdp->sd_log_tr = tr;
911 set_bit(TR_ATTACHED, &tr->tr_flags);
912 }
913
914 sdp->sd_log_commited_revoke += tr->tr_num_revoke;
915 reserved = calc_reserved(sdp);
916 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
917 gfs2_assert_withdraw(sdp, maxres >= reserved);
918 unused = maxres - reserved;
919 atomic_add(unused, &sdp->sd_log_blks_free);
920 trace_gfs2_log_blocks(sdp, unused);
921 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
922 sdp->sd_jdesc->jd_blocks);
923 sdp->sd_log_blks_reserved = reserved;
924
925 gfs2_log_unlock(sdp);
926 }
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
944 {
945 log_refund(sdp, tr);
946
947 if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
948 ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
949 atomic_read(&sdp->sd_log_thresh2)))
950 wake_up(&sdp->sd_logd_waitq);
951 }
952
953
954
955
956
957
958
959 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
960 {
961 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
962 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
963 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
964
965 sdp->sd_log_flush_head = sdp->sd_log_head;
966
967 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
968
969 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
970 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
971
972 sdp->sd_log_head = sdp->sd_log_flush_head;
973 sdp->sd_log_tail = sdp->sd_log_head;
974 }
975
976 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
977 {
978 return (atomic_read(&sdp->sd_log_pinned) +
979 atomic_read(&sdp->sd_log_blks_needed) >=
980 atomic_read(&sdp->sd_log_thresh1));
981 }
982
983 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
984 {
985 unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
986
987 if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
988 return 1;
989
990 return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
991 atomic_read(&sdp->sd_log_thresh2);
992 }
993
994
995
996
997
998
999
1000
1001
1002 int gfs2_logd(void *data)
1003 {
1004 struct gfs2_sbd *sdp = data;
1005 unsigned long t = 1;
1006 DEFINE_WAIT(wait);
1007 bool did_flush;
1008
1009 while (!kthread_should_stop()) {
1010
1011
1012 if (sdp->sd_log_error) {
1013 gfs2_lm_withdraw(sdp,
1014 "GFS2: fsid=%s: error %d: "
1015 "withdrawing the file system to "
1016 "prevent further damage.\n",
1017 sdp->sd_fsname, sdp->sd_log_error);
1018 }
1019
1020 did_flush = false;
1021 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1022 gfs2_ail1_empty(sdp);
1023 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1024 GFS2_LFC_LOGD_JFLUSH_REQD);
1025 did_flush = true;
1026 }
1027
1028 if (gfs2_ail_flush_reqd(sdp)) {
1029 gfs2_ail1_start(sdp);
1030 gfs2_ail1_wait(sdp);
1031 gfs2_ail1_empty(sdp);
1032 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1033 GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1034 did_flush = true;
1035 }
1036
1037 if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1038 wake_up(&sdp->sd_log_waitq);
1039
1040 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1041
1042 try_to_freeze();
1043
1044 do {
1045 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1046 TASK_INTERRUPTIBLE);
1047 if (!gfs2_ail_flush_reqd(sdp) &&
1048 !gfs2_jrnl_flush_reqd(sdp) &&
1049 !kthread_should_stop())
1050 t = schedule_timeout(t);
1051 } while(t && !gfs2_ail_flush_reqd(sdp) &&
1052 !gfs2_jrnl_flush_reqd(sdp) &&
1053 !kthread_should_stop());
1054 finish_wait(&sdp->sd_logd_waitq, &wait);
1055 }
1056
1057 return 0;
1058 }
1059