This source file includes following definitions.
- xlog_verify_bno
- xlog_alloc_buffer
- xlog_align
- xlog_do_io
- xlog_bread_noalign
- xlog_bread
- xlog_bwrite
- xlog_header_check_dump
- xlog_header_check_recover
- xlog_header_check_mount
- xlog_recover_iodone
- xlog_find_cycle_start
- xlog_find_verify_cycle
- xlog_find_verify_log_record
- xlog_find_head
- xlog_rseek_logrec_hdr
- xlog_seek_logrec_hdr
- xlog_tail_distance
- xlog_verify_tail
- xlog_verify_head
- xlog_wrap_logbno
- xlog_check_unmount_rec
- xlog_set_state
- xlog_find_tail
- xlog_find_zeroed
- xlog_add_record
- xlog_write_log_records
- xlog_clear_stale_blocks
- xlog_recover_reorder_trans
- xlog_recover_buffer_pass1
- xlog_peek_buffer_cancelled
- xlog_check_buffer_cancelled
- xlog_recover_do_inode_buffer
- xlog_recover_get_buf_lsn
- xlog_recover_validate_buf_type
- xlog_recover_do_reg_buffer
- xlog_recover_do_dquot_buffer
- xlog_recover_buffer_pass2
- xfs_recover_inode_owner_change
- xlog_recover_inode_pass2
- xlog_recover_quotaoff_pass1
- xlog_recover_dquot_pass2
- xlog_recover_efi_pass2
- xlog_recover_efd_pass2
- xlog_recover_rui_pass2
- xlog_recover_rud_pass2
- xfs_cui_copy_format
- xlog_recover_cui_pass2
- xlog_recover_cud_pass2
- xfs_bui_copy_format
- xlog_recover_bui_pass2
- xlog_recover_bud_pass2
- xlog_recover_do_icreate_pass2
- xlog_recover_buffer_ra_pass2
- xlog_recover_inode_ra_pass2
- xlog_recover_dquot_ra_pass2
- xlog_recover_ra_pass2
- xlog_recover_commit_pass1
- xlog_recover_commit_pass2
- xlog_recover_items_pass2
- xlog_recover_commit_trans
- xlog_recover_add_item
- xlog_recover_add_to_cont_trans
- xlog_recover_add_to_trans
- xlog_recover_free_trans
- xlog_recovery_process_trans
- xlog_recover_ophdr_to_trans
- xlog_recover_process_ophdr
- xlog_recover_process_data
- xlog_recover_process_efi
- xlog_recover_cancel_efi
- xlog_recover_process_rui
- xlog_recover_cancel_rui
- xlog_recover_process_cui
- xlog_recover_cancel_cui
- xlog_recover_process_bui
- xlog_recover_cancel_bui
- xlog_item_is_intent
- xlog_finish_defer_ops
- xlog_recover_process_intents
- xlog_recover_cancel_intents
- xlog_recover_clear_agi_bucket
- xlog_recover_process_one_iunlink
- xlog_recover_process_iunlinks
- xlog_unpack_data
- xlog_recover_process
- xlog_valid_rec_header
- xlog_do_recovery_pass
- xlog_do_log_recovery
- xlog_do_recover
- xlog_recover
- xlog_recover_finish
- xlog_recover_cancel
- xlog_recover_check_summary
1
2
3
4
5
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_log.h"
19 #include "xfs_log_priv.h"
20 #include "xfs_log_recover.h"
21 #include "xfs_inode_item.h"
22 #include "xfs_extfree_item.h"
23 #include "xfs_trans_priv.h"
24 #include "xfs_alloc.h"
25 #include "xfs_ialloc.h"
26 #include "xfs_quota.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_error.h"
31 #include "xfs_dir2.h"
32 #include "xfs_rmap_item.h"
33 #include "xfs_buf_item.h"
34 #include "xfs_refcount_item.h"
35 #include "xfs_bmap_item.h"
36
37 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
38
39 STATIC int
40 xlog_find_zeroed(
41 struct xlog *,
42 xfs_daddr_t *);
43 STATIC int
44 xlog_clear_stale_blocks(
45 struct xlog *,
46 xfs_lsn_t);
47 #if defined(DEBUG)
48 STATIC void
49 xlog_recover_check_summary(
50 struct xlog *);
51 #else
52 #define xlog_recover_check_summary(log)
53 #endif
54 STATIC int
55 xlog_do_recovery_pass(
56 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
57
58
59
60
61
62 struct xfs_buf_cancel {
63 xfs_daddr_t bc_blkno;
64 uint bc_len;
65 int bc_refcount;
66 struct list_head bc_list;
67 };
68
69
70
71
72
73
74
75
76
77
78 static inline bool
79 xlog_verify_bno(
80 struct xlog *log,
81 xfs_daddr_t blk_no,
82 int bbcount)
83 {
84 if (blk_no < 0 || blk_no >= log->l_logBBsize)
85 return false;
86 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
87 return false;
88 return true;
89 }
90
91
92
93
94
95 static char *
96 xlog_alloc_buffer(
97 struct xlog *log,
98 int nbblks)
99 {
100 int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
101
102
103
104
105
106 if (!xlog_verify_bno(log, 0, nbblks)) {
107 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
108 nbblks);
109 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
110 return NULL;
111 }
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127 if (nbblks > 1 && log->l_sectBBsize > 1)
128 nbblks += log->l_sectBBsize;
129 nbblks = round_up(nbblks, log->l_sectBBsize);
130 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
131 }
132
133
134
135
136
137 static inline unsigned int
138 xlog_align(
139 struct xlog *log,
140 xfs_daddr_t blk_no)
141 {
142 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
143 }
144
145 static int
146 xlog_do_io(
147 struct xlog *log,
148 xfs_daddr_t blk_no,
149 unsigned int nbblks,
150 char *data,
151 unsigned int op)
152 {
153 int error;
154
155 if (!xlog_verify_bno(log, blk_no, nbblks)) {
156 xfs_warn(log->l_mp,
157 "Invalid log block/length (0x%llx, 0x%x) for buffer",
158 blk_no, nbblks);
159 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
160 return -EFSCORRUPTED;
161 }
162
163 blk_no = round_down(blk_no, log->l_sectBBsize);
164 nbblks = round_up(nbblks, log->l_sectBBsize);
165 ASSERT(nbblks > 0);
166
167 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
168 BBTOB(nbblks), data, op);
169 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
170 xfs_alert(log->l_mp,
171 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
172 op == REQ_OP_WRITE ? "write" : "read",
173 blk_no, nbblks, error);
174 }
175 return error;
176 }
177
178 STATIC int
179 xlog_bread_noalign(
180 struct xlog *log,
181 xfs_daddr_t blk_no,
182 int nbblks,
183 char *data)
184 {
185 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
186 }
187
188 STATIC int
189 xlog_bread(
190 struct xlog *log,
191 xfs_daddr_t blk_no,
192 int nbblks,
193 char *data,
194 char **offset)
195 {
196 int error;
197
198 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
199 if (!error)
200 *offset = data + xlog_align(log, blk_no);
201 return error;
202 }
203
204 STATIC int
205 xlog_bwrite(
206 struct xlog *log,
207 xfs_daddr_t blk_no,
208 int nbblks,
209 char *data)
210 {
211 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
212 }
213
214 #ifdef DEBUG
215
216
217
218 STATIC void
219 xlog_header_check_dump(
220 xfs_mount_t *mp,
221 xlog_rec_header_t *head)
222 {
223 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
224 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
225 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
226 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
227 }
228 #else
229 #define xlog_header_check_dump(mp, head)
230 #endif
231
232
233
234
235 STATIC int
236 xlog_header_check_recover(
237 xfs_mount_t *mp,
238 xlog_rec_header_t *head)
239 {
240 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
241
242
243
244
245
246
247 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
248 xfs_warn(mp,
249 "dirty log written in incompatible format - can't recover");
250 xlog_header_check_dump(mp, head);
251 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
252 XFS_ERRLEVEL_HIGH, mp);
253 return -EFSCORRUPTED;
254 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
255 xfs_warn(mp,
256 "dirty log entry has mismatched uuid - can't recover");
257 xlog_header_check_dump(mp, head);
258 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
259 XFS_ERRLEVEL_HIGH, mp);
260 return -EFSCORRUPTED;
261 }
262 return 0;
263 }
264
265
266
267
268 STATIC int
269 xlog_header_check_mount(
270 xfs_mount_t *mp,
271 xlog_rec_header_t *head)
272 {
273 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
274
275 if (uuid_is_null(&head->h_fs_uuid)) {
276
277
278
279
280
281 xfs_warn(mp, "null uuid in log - IRIX style log");
282 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
283 xfs_warn(mp, "log has mismatched uuid - can't recover");
284 xlog_header_check_dump(mp, head);
285 XFS_ERROR_REPORT("xlog_header_check_mount",
286 XFS_ERRLEVEL_HIGH, mp);
287 return -EFSCORRUPTED;
288 }
289 return 0;
290 }
291
292 STATIC void
293 xlog_recover_iodone(
294 struct xfs_buf *bp)
295 {
296 if (bp->b_error) {
297
298
299
300
301 if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
302 xfs_buf_ioerror_alert(bp, __func__);
303 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
304 }
305 }
306
307
308
309
310
311 if (bp->b_log_item)
312 xfs_buf_item_relse(bp);
313 ASSERT(bp->b_log_item == NULL);
314
315 bp->b_iodone = NULL;
316 xfs_buf_ioend(bp);
317 }
318
319
320
321
322
323
324
325 STATIC int
326 xlog_find_cycle_start(
327 struct xlog *log,
328 char *buffer,
329 xfs_daddr_t first_blk,
330 xfs_daddr_t *last_blk,
331 uint cycle)
332 {
333 char *offset;
334 xfs_daddr_t mid_blk;
335 xfs_daddr_t end_blk;
336 uint mid_cycle;
337 int error;
338
339 end_blk = *last_blk;
340 mid_blk = BLK_AVG(first_blk, end_blk);
341 while (mid_blk != first_blk && mid_blk != end_blk) {
342 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
343 if (error)
344 return error;
345 mid_cycle = xlog_get_cycle(offset);
346 if (mid_cycle == cycle)
347 end_blk = mid_blk;
348 else
349 first_blk = mid_blk;
350 mid_blk = BLK_AVG(first_blk, end_blk);
351 }
352 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
353 (mid_blk == end_blk && mid_blk-1 == first_blk));
354
355 *last_blk = end_blk;
356
357 return 0;
358 }
359
360
361
362
363
364
365
366
367
368 STATIC int
369 xlog_find_verify_cycle(
370 struct xlog *log,
371 xfs_daddr_t start_blk,
372 int nbblks,
373 uint stop_on_cycle_no,
374 xfs_daddr_t *new_blk)
375 {
376 xfs_daddr_t i, j;
377 uint cycle;
378 char *buffer;
379 xfs_daddr_t bufblks;
380 char *buf = NULL;
381 int error = 0;
382
383
384
385
386
387
388
389 bufblks = 1 << ffs(nbblks);
390 while (bufblks > log->l_logBBsize)
391 bufblks >>= 1;
392 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
393 bufblks >>= 1;
394 if (bufblks < log->l_sectBBsize)
395 return -ENOMEM;
396 }
397
398 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
399 int bcount;
400
401 bcount = min(bufblks, (start_blk + nbblks - i));
402
403 error = xlog_bread(log, i, bcount, buffer, &buf);
404 if (error)
405 goto out;
406
407 for (j = 0; j < bcount; j++) {
408 cycle = xlog_get_cycle(buf);
409 if (cycle == stop_on_cycle_no) {
410 *new_blk = i+j;
411 goto out;
412 }
413
414 buf += BBSIZE;
415 }
416 }
417
418 *new_blk = -1;
419
420 out:
421 kmem_free(buffer);
422 return error;
423 }
424
425
426
427
428
429
430
431
432
433
434
435
436
437 STATIC int
438 xlog_find_verify_log_record(
439 struct xlog *log,
440 xfs_daddr_t start_blk,
441 xfs_daddr_t *last_blk,
442 int extra_bblks)
443 {
444 xfs_daddr_t i;
445 char *buffer;
446 char *offset = NULL;
447 xlog_rec_header_t *head = NULL;
448 int error = 0;
449 int smallmem = 0;
450 int num_blks = *last_blk - start_blk;
451 int xhdrs;
452
453 ASSERT(start_blk != 0 || *last_blk != start_blk);
454
455 buffer = xlog_alloc_buffer(log, num_blks);
456 if (!buffer) {
457 buffer = xlog_alloc_buffer(log, 1);
458 if (!buffer)
459 return -ENOMEM;
460 smallmem = 1;
461 } else {
462 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
463 if (error)
464 goto out;
465 offset += ((num_blks - 1) << BBSHIFT);
466 }
467
468 for (i = (*last_blk) - 1; i >= 0; i--) {
469 if (i < start_blk) {
470
471 xfs_warn(log->l_mp,
472 "Log inconsistent (didn't find previous header)");
473 ASSERT(0);
474 error = -EIO;
475 goto out;
476 }
477
478 if (smallmem) {
479 error = xlog_bread(log, i, 1, buffer, &offset);
480 if (error)
481 goto out;
482 }
483
484 head = (xlog_rec_header_t *)offset;
485
486 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
487 break;
488
489 if (!smallmem)
490 offset -= BBSIZE;
491 }
492
493
494
495
496
497
498 if (i == -1) {
499 error = 1;
500 goto out;
501 }
502
503
504
505
506
507 if ((error = xlog_header_check_mount(log->l_mp, head)))
508 goto out;
509
510
511
512
513
514
515
516
517 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
518 uint h_size = be32_to_cpu(head->h_size);
519
520 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
521 if (h_size % XLOG_HEADER_CYCLE_SIZE)
522 xhdrs++;
523 } else {
524 xhdrs = 1;
525 }
526
527 if (*last_blk - i + extra_bblks !=
528 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
529 *last_blk = i;
530
531 out:
532 kmem_free(buffer);
533 return error;
534 }
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549 STATIC int
550 xlog_find_head(
551 struct xlog *log,
552 xfs_daddr_t *return_head_blk)
553 {
554 char *buffer;
555 char *offset;
556 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
557 int num_scan_bblks;
558 uint first_half_cycle, last_half_cycle;
559 uint stop_on_cycle;
560 int error, log_bbnum = log->l_logBBsize;
561
562
563 error = xlog_find_zeroed(log, &first_blk);
564 if (error < 0) {
565 xfs_warn(log->l_mp, "empty log check failed");
566 return error;
567 }
568 if (error == 1) {
569 *return_head_blk = first_blk;
570
571
572 if (!first_blk) {
573
574
575
576
577 xfs_warn(log->l_mp, "totally zeroed log");
578 }
579
580 return 0;
581 }
582
583 first_blk = 0;
584 buffer = xlog_alloc_buffer(log, 1);
585 if (!buffer)
586 return -ENOMEM;
587
588 error = xlog_bread(log, 0, 1, buffer, &offset);
589 if (error)
590 goto out_free_buffer;
591
592 first_half_cycle = xlog_get_cycle(offset);
593
594 last_blk = head_blk = log_bbnum - 1;
595 error = xlog_bread(log, last_blk, 1, buffer, &offset);
596 if (error)
597 goto out_free_buffer;
598
599 last_half_cycle = xlog_get_cycle(offset);
600 ASSERT(last_half_cycle != 0);
601
602
603
604
605
606
607
608
609
610
611
612
613 if (first_half_cycle == last_half_cycle) {
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639 head_blk = log_bbnum;
640 stop_on_cycle = last_half_cycle - 1;
641 } else {
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664 stop_on_cycle = last_half_cycle;
665 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
666 last_half_cycle);
667 if (error)
668 goto out_free_buffer;
669 }
670
671
672
673
674
675
676
677
678 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
679 if (head_blk >= num_scan_bblks) {
680
681
682
683
684 start_blk = head_blk - num_scan_bblks;
685 if ((error = xlog_find_verify_cycle(log,
686 start_blk, num_scan_bblks,
687 stop_on_cycle, &new_blk)))
688 goto out_free_buffer;
689 if (new_blk != -1)
690 head_blk = new_blk;
691 } else {
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719 ASSERT(head_blk <= INT_MAX &&
720 (xfs_daddr_t) num_scan_bblks >= head_blk);
721 start_blk = log_bbnum - (num_scan_bblks - head_blk);
722 if ((error = xlog_find_verify_cycle(log, start_blk,
723 num_scan_bblks - (int)head_blk,
724 (stop_on_cycle - 1), &new_blk)))
725 goto out_free_buffer;
726 if (new_blk != -1) {
727 head_blk = new_blk;
728 goto validate_head;
729 }
730
731
732
733
734
735
736 start_blk = 0;
737 ASSERT(head_blk <= INT_MAX);
738 if ((error = xlog_find_verify_cycle(log,
739 start_blk, (int)head_blk,
740 stop_on_cycle, &new_blk)))
741 goto out_free_buffer;
742 if (new_blk != -1)
743 head_blk = new_blk;
744 }
745
746 validate_head:
747
748
749
750
751 num_scan_bblks = XLOG_REC_SHIFT(log);
752 if (head_blk >= num_scan_bblks) {
753 start_blk = head_blk - num_scan_bblks;
754
755
756 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
757 if (error == 1)
758 error = -EIO;
759 if (error)
760 goto out_free_buffer;
761 } else {
762 start_blk = 0;
763 ASSERT(head_blk <= INT_MAX);
764 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
765 if (error < 0)
766 goto out_free_buffer;
767 if (error == 1) {
768
769 start_blk = log_bbnum - (num_scan_bblks - head_blk);
770 new_blk = log_bbnum;
771 ASSERT(start_blk <= INT_MAX &&
772 (xfs_daddr_t) log_bbnum-start_blk >= 0);
773 ASSERT(head_blk <= INT_MAX);
774 error = xlog_find_verify_log_record(log, start_blk,
775 &new_blk, (int)head_blk);
776 if (error == 1)
777 error = -EIO;
778 if (error)
779 goto out_free_buffer;
780 if (new_blk != log_bbnum)
781 head_blk = new_blk;
782 } else if (error)
783 goto out_free_buffer;
784 }
785
786 kmem_free(buffer);
787 if (head_blk == log_bbnum)
788 *return_head_blk = 0;
789 else
790 *return_head_blk = head_blk;
791
792
793
794
795
796
797 return 0;
798
799 out_free_buffer:
800 kmem_free(buffer);
801 if (error)
802 xfs_warn(log->l_mp, "failed to find log head");
803 return error;
804 }
805
806
807
808
809
810
811
812
813
814 STATIC int
815 xlog_rseek_logrec_hdr(
816 struct xlog *log,
817 xfs_daddr_t head_blk,
818 xfs_daddr_t tail_blk,
819 int count,
820 char *buffer,
821 xfs_daddr_t *rblk,
822 struct xlog_rec_header **rhead,
823 bool *wrapped)
824 {
825 int i;
826 int error;
827 int found = 0;
828 char *offset = NULL;
829 xfs_daddr_t end_blk;
830
831 *wrapped = false;
832
833
834
835
836
837 end_blk = head_blk > tail_blk ? tail_blk : 0;
838 for (i = (int) head_blk - 1; i >= end_blk; i--) {
839 error = xlog_bread(log, i, 1, buffer, &offset);
840 if (error)
841 goto out_error;
842
843 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
844 *rblk = i;
845 *rhead = (struct xlog_rec_header *) offset;
846 if (++found == count)
847 break;
848 }
849 }
850
851
852
853
854
855
856 if (tail_blk >= head_blk && found != count) {
857 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
858 error = xlog_bread(log, i, 1, buffer, &offset);
859 if (error)
860 goto out_error;
861
862 if (*(__be32 *)offset ==
863 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
864 *wrapped = true;
865 *rblk = i;
866 *rhead = (struct xlog_rec_header *) offset;
867 if (++found == count)
868 break;
869 }
870 }
871 }
872
873 return found;
874
875 out_error:
876 return error;
877 }
878
879
880
881
882
883
884
885
886
887
888 STATIC int
889 xlog_seek_logrec_hdr(
890 struct xlog *log,
891 xfs_daddr_t head_blk,
892 xfs_daddr_t tail_blk,
893 int count,
894 char *buffer,
895 xfs_daddr_t *rblk,
896 struct xlog_rec_header **rhead,
897 bool *wrapped)
898 {
899 int i;
900 int error;
901 int found = 0;
902 char *offset = NULL;
903 xfs_daddr_t end_blk;
904
905 *wrapped = false;
906
907
908
909
910
911 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
912 for (i = (int) tail_blk; i <= end_blk; i++) {
913 error = xlog_bread(log, i, 1, buffer, &offset);
914 if (error)
915 goto out_error;
916
917 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
918 *rblk = i;
919 *rhead = (struct xlog_rec_header *) offset;
920 if (++found == count)
921 break;
922 }
923 }
924
925
926
927
928
929 if (tail_blk > head_blk && found != count) {
930 for (i = 0; i < (int) head_blk; i++) {
931 error = xlog_bread(log, i, 1, buffer, &offset);
932 if (error)
933 goto out_error;
934
935 if (*(__be32 *)offset ==
936 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
937 *wrapped = true;
938 *rblk = i;
939 *rhead = (struct xlog_rec_header *) offset;
940 if (++found == count)
941 break;
942 }
943 }
944 }
945
946 return found;
947
948 out_error:
949 return error;
950 }
951
952
953
954
955 static inline int
956 xlog_tail_distance(
957 struct xlog *log,
958 xfs_daddr_t head_blk,
959 xfs_daddr_t tail_blk)
960 {
961 if (head_blk < tail_blk)
962 return tail_blk - head_blk;
963
964 return tail_blk + (log->l_logBBsize - head_blk);
965 }
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986 STATIC int
987 xlog_verify_tail(
988 struct xlog *log,
989 xfs_daddr_t head_blk,
990 xfs_daddr_t *tail_blk,
991 int hsize)
992 {
993 struct xlog_rec_header *thead;
994 char *buffer;
995 xfs_daddr_t first_bad;
996 int error = 0;
997 bool wrapped;
998 xfs_daddr_t tmp_tail;
999 xfs_daddr_t orig_tail = *tail_blk;
1000
1001 buffer = xlog_alloc_buffer(log, 1);
1002 if (!buffer)
1003 return -ENOMEM;
1004
1005
1006
1007
1008
1009 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
1010 &tmp_tail, &thead, &wrapped);
1011 if (error < 0)
1012 goto out;
1013 if (*tail_blk != tmp_tail)
1014 *tail_blk = tmp_tail;
1015
1016
1017
1018
1019
1020
1021
1022
1023 first_bad = 0;
1024 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1025 XLOG_RECOVER_CRCPASS, &first_bad);
1026 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1027 int tail_distance;
1028
1029
1030
1031
1032
1033 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1034 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1035 break;
1036
1037
1038 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
1039 buffer, &tmp_tail, &thead, &wrapped);
1040 if (error < 0)
1041 goto out;
1042
1043 *tail_blk = tmp_tail;
1044 first_bad = 0;
1045 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1046 XLOG_RECOVER_CRCPASS, &first_bad);
1047 }
1048
1049 if (!error && *tail_blk != orig_tail)
1050 xfs_warn(log->l_mp,
1051 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1052 orig_tail, *tail_blk);
1053 out:
1054 kmem_free(buffer);
1055 return error;
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 STATIC int
1072 xlog_verify_head(
1073 struct xlog *log,
1074 xfs_daddr_t *head_blk,
1075 xfs_daddr_t *tail_blk,
1076 char *buffer,
1077 xfs_daddr_t *rhead_blk,
1078 struct xlog_rec_header **rhead,
1079 bool *wrapped)
1080 {
1081 struct xlog_rec_header *tmp_rhead;
1082 char *tmp_buffer;
1083 xfs_daddr_t first_bad;
1084 xfs_daddr_t tmp_rhead_blk;
1085 int found;
1086 int error;
1087 bool tmp_wrapped;
1088
1089
1090
1091
1092
1093
1094
1095 tmp_buffer = xlog_alloc_buffer(log, 1);
1096 if (!tmp_buffer)
1097 return -ENOMEM;
1098 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1099 XLOG_MAX_ICLOGS, tmp_buffer,
1100 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1101 kmem_free(tmp_buffer);
1102 if (error < 0)
1103 return error;
1104
1105
1106
1107
1108
1109
1110 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1111 XLOG_RECOVER_CRCPASS, &first_bad);
1112 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1113
1114
1115
1116
1117 error = 0;
1118 xfs_warn(log->l_mp,
1119 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1120 first_bad, *head_blk);
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1131 buffer, rhead_blk, rhead, wrapped);
1132 if (found < 0)
1133 return found;
1134 if (found == 0)
1135 return -EIO;
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 *head_blk = first_bad;
1147 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1148 if (*head_blk == *tail_blk) {
1149 ASSERT(0);
1150 return 0;
1151 }
1152 }
1153 if (error)
1154 return error;
1155
1156 return xlog_verify_tail(log, *head_blk, tail_blk,
1157 be32_to_cpu((*rhead)->h_size));
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 static inline xfs_daddr_t
1169 xlog_wrap_logbno(
1170 struct xlog *log,
1171 xfs_daddr_t bno)
1172 {
1173 int mod;
1174
1175 div_s64_rem(bno, log->l_logBBsize, &mod);
1176 return mod;
1177 }
1178
1179
1180
1181
1182
1183
1184 static int
1185 xlog_check_unmount_rec(
1186 struct xlog *log,
1187 xfs_daddr_t *head_blk,
1188 xfs_daddr_t *tail_blk,
1189 struct xlog_rec_header *rhead,
1190 xfs_daddr_t rhead_blk,
1191 char *buffer,
1192 bool *clean)
1193 {
1194 struct xlog_op_header *op_head;
1195 xfs_daddr_t umount_data_blk;
1196 xfs_daddr_t after_umount_blk;
1197 int hblks;
1198 int error;
1199 char *offset;
1200
1201 *clean = false;
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1213 int h_size = be32_to_cpu(rhead->h_size);
1214 int h_version = be32_to_cpu(rhead->h_version);
1215
1216 if ((h_version & XLOG_VERSION_2) &&
1217 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1218 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1219 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1220 hblks++;
1221 } else {
1222 hblks = 1;
1223 }
1224 } else {
1225 hblks = 1;
1226 }
1227
1228 after_umount_blk = xlog_wrap_logbno(log,
1229 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1230
1231 if (*head_blk == after_umount_blk &&
1232 be32_to_cpu(rhead->h_num_logops) == 1) {
1233 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1234 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1235 if (error)
1236 return error;
1237
1238 op_head = (struct xlog_op_header *)offset;
1239 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1240
1241
1242
1243
1244
1245 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1246 log->l_curr_cycle, after_umount_blk);
1247 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1248 log->l_curr_cycle, after_umount_blk);
1249 *tail_blk = after_umount_blk;
1250
1251 *clean = true;
1252 }
1253 }
1254
1255 return 0;
1256 }
1257
1258 static void
1259 xlog_set_state(
1260 struct xlog *log,
1261 xfs_daddr_t head_blk,
1262 struct xlog_rec_header *rhead,
1263 xfs_daddr_t rhead_blk,
1264 bool bump_cycle)
1265 {
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 log->l_prev_block = rhead_blk;
1277 log->l_curr_block = (int)head_blk;
1278 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1279 if (bump_cycle)
1280 log->l_curr_cycle++;
1281 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1282 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1283 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1284 BBTOB(log->l_curr_block));
1285 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1286 BBTOB(log->l_curr_block));
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 STATIC int
1306 xlog_find_tail(
1307 struct xlog *log,
1308 xfs_daddr_t *head_blk,
1309 xfs_daddr_t *tail_blk)
1310 {
1311 xlog_rec_header_t *rhead;
1312 char *offset = NULL;
1313 char *buffer;
1314 int error;
1315 xfs_daddr_t rhead_blk;
1316 xfs_lsn_t tail_lsn;
1317 bool wrapped = false;
1318 bool clean = false;
1319
1320
1321
1322
1323 if ((error = xlog_find_head(log, head_blk)))
1324 return error;
1325 ASSERT(*head_blk < INT_MAX);
1326
1327 buffer = xlog_alloc_buffer(log, 1);
1328 if (!buffer)
1329 return -ENOMEM;
1330 if (*head_blk == 0) {
1331 error = xlog_bread(log, 0, 1, buffer, &offset);
1332 if (error)
1333 goto done;
1334
1335 if (xlog_get_cycle(offset) == 0) {
1336 *tail_blk = 0;
1337
1338 goto done;
1339 }
1340 }
1341
1342
1343
1344
1345
1346
1347 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1348 &rhead_blk, &rhead, &wrapped);
1349 if (error < 0)
1350 return error;
1351 if (!error) {
1352 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1353 return -EIO;
1354 }
1355 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1356
1357
1358
1359
1360 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1361 tail_lsn = atomic64_read(&log->l_tail_lsn);
1362
1363
1364
1365
1366
1367 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1368 rhead_blk, buffer, &clean);
1369 if (error)
1370 goto done;
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 if (!clean) {
1383 xfs_daddr_t orig_head = *head_blk;
1384
1385 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1386 &rhead_blk, &rhead, &wrapped);
1387 if (error)
1388 goto done;
1389
1390
1391 if (*head_blk != orig_head) {
1392 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1393 wrapped);
1394 tail_lsn = atomic64_read(&log->l_tail_lsn);
1395 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1396 rhead, rhead_blk, buffer,
1397 &clean);
1398 if (error)
1399 goto done;
1400 }
1401 }
1402
1403
1404
1405
1406
1407
1408 if (clean)
1409 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430 if (!xfs_readonly_buftarg(log->l_targ))
1431 error = xlog_clear_stale_blocks(log, tail_lsn);
1432
1433 done:
1434 kmem_free(buffer);
1435
1436 if (error)
1437 xfs_warn(log->l_mp, "failed to locate log tail");
1438 return error;
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 STATIC int
1458 xlog_find_zeroed(
1459 struct xlog *log,
1460 xfs_daddr_t *blk_no)
1461 {
1462 char *buffer;
1463 char *offset;
1464 uint first_cycle, last_cycle;
1465 xfs_daddr_t new_blk, last_blk, start_blk;
1466 xfs_daddr_t num_scan_bblks;
1467 int error, log_bbnum = log->l_logBBsize;
1468
1469 *blk_no = 0;
1470
1471
1472 buffer = xlog_alloc_buffer(log, 1);
1473 if (!buffer)
1474 return -ENOMEM;
1475 error = xlog_bread(log, 0, 1, buffer, &offset);
1476 if (error)
1477 goto out_free_buffer;
1478
1479 first_cycle = xlog_get_cycle(offset);
1480 if (first_cycle == 0) {
1481 *blk_no = 0;
1482 kmem_free(buffer);
1483 return 1;
1484 }
1485
1486
1487 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1488 if (error)
1489 goto out_free_buffer;
1490
1491 last_cycle = xlog_get_cycle(offset);
1492 if (last_cycle != 0) {
1493 kmem_free(buffer);
1494 return 0;
1495 }
1496
1497
1498 last_blk = log_bbnum-1;
1499 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1500 if (error)
1501 goto out_free_buffer;
1502
1503
1504
1505
1506
1507
1508
1509 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1510 ASSERT(num_scan_bblks <= INT_MAX);
1511
1512 if (last_blk < num_scan_bblks)
1513 num_scan_bblks = last_blk;
1514 start_blk = last_blk - num_scan_bblks;
1515
1516
1517
1518
1519
1520
1521
1522 if ((error = xlog_find_verify_cycle(log, start_blk,
1523 (int)num_scan_bblks, 0, &new_blk)))
1524 goto out_free_buffer;
1525 if (new_blk != -1)
1526 last_blk = new_blk;
1527
1528
1529
1530
1531
1532 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1533 if (error == 1)
1534 error = -EIO;
1535 if (error)
1536 goto out_free_buffer;
1537
1538 *blk_no = last_blk;
1539 out_free_buffer:
1540 kmem_free(buffer);
1541 if (error)
1542 return error;
1543 return 1;
1544 }
1545
1546
1547
1548
1549
1550
1551 STATIC void
1552 xlog_add_record(
1553 struct xlog *log,
1554 char *buf,
1555 int cycle,
1556 int block,
1557 int tail_cycle,
1558 int tail_block)
1559 {
1560 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1561
1562 memset(buf, 0, BBSIZE);
1563 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1564 recp->h_cycle = cpu_to_be32(cycle);
1565 recp->h_version = cpu_to_be32(
1566 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1567 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1568 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1569 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1570 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1571 }
1572
1573 STATIC int
1574 xlog_write_log_records(
1575 struct xlog *log,
1576 int cycle,
1577 int start_block,
1578 int blocks,
1579 int tail_cycle,
1580 int tail_block)
1581 {
1582 char *offset;
1583 char *buffer;
1584 int balign, ealign;
1585 int sectbb = log->l_sectBBsize;
1586 int end_block = start_block + blocks;
1587 int bufblks;
1588 int error = 0;
1589 int i, j = 0;
1590
1591
1592
1593
1594
1595
1596
1597 bufblks = 1 << ffs(blocks);
1598 while (bufblks > log->l_logBBsize)
1599 bufblks >>= 1;
1600 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1601 bufblks >>= 1;
1602 if (bufblks < sectbb)
1603 return -ENOMEM;
1604 }
1605
1606
1607
1608
1609
1610 balign = round_down(start_block, sectbb);
1611 if (balign != start_block) {
1612 error = xlog_bread_noalign(log, start_block, 1, buffer);
1613 if (error)
1614 goto out_free_buffer;
1615
1616 j = start_block - balign;
1617 }
1618
1619 for (i = start_block; i < end_block; i += bufblks) {
1620 int bcount, endcount;
1621
1622 bcount = min(bufblks, end_block - start_block);
1623 endcount = bcount - j;
1624
1625
1626
1627
1628
1629 ealign = round_down(end_block, sectbb);
1630 if (j == 0 && (start_block + endcount > ealign)) {
1631 error = xlog_bread_noalign(log, ealign, sectbb,
1632 buffer + BBTOB(ealign - start_block));
1633 if (error)
1634 break;
1635
1636 }
1637
1638 offset = buffer + xlog_align(log, start_block);
1639 for (; j < endcount; j++) {
1640 xlog_add_record(log, offset, cycle, i+j,
1641 tail_cycle, tail_block);
1642 offset += BBSIZE;
1643 }
1644 error = xlog_bwrite(log, start_block, endcount, buffer);
1645 if (error)
1646 break;
1647 start_block += endcount;
1648 j = 0;
1649 }
1650
1651 out_free_buffer:
1652 kmem_free(buffer);
1653 return error;
1654 }
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672 STATIC int
1673 xlog_clear_stale_blocks(
1674 struct xlog *log,
1675 xfs_lsn_t tail_lsn)
1676 {
1677 int tail_cycle, head_cycle;
1678 int tail_block, head_block;
1679 int tail_distance, max_distance;
1680 int distance;
1681 int error;
1682
1683 tail_cycle = CYCLE_LSN(tail_lsn);
1684 tail_block = BLOCK_LSN(tail_lsn);
1685 head_cycle = log->l_curr_cycle;
1686 head_block = log->l_curr_block;
1687
1688
1689
1690
1691
1692
1693
1694 if (head_cycle == tail_cycle) {
1695
1696
1697
1698
1699
1700
1701
1702 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1703 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1704 XFS_ERRLEVEL_LOW, log->l_mp);
1705 return -EFSCORRUPTED;
1706 }
1707 tail_distance = tail_block + (log->l_logBBsize - head_block);
1708 } else {
1709
1710
1711
1712
1713
1714 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1715 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1716 XFS_ERRLEVEL_LOW, log->l_mp);
1717 return -EFSCORRUPTED;
1718 }
1719 tail_distance = tail_block - head_block;
1720 }
1721
1722
1723
1724
1725
1726 if (tail_distance <= 0) {
1727 ASSERT(tail_distance == 0);
1728 return 0;
1729 }
1730
1731 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1732
1733
1734
1735
1736
1737
1738
1739 max_distance = min(max_distance, tail_distance);
1740
1741 if ((head_block + max_distance) <= log->l_logBBsize) {
1742
1743
1744
1745
1746
1747
1748
1749 error = xlog_write_log_records(log, (head_cycle - 1),
1750 head_block, max_distance, tail_cycle,
1751 tail_block);
1752 if (error)
1753 return error;
1754 } else {
1755
1756
1757
1758
1759
1760
1761
1762 distance = log->l_logBBsize - head_block;
1763 error = xlog_write_log_records(log, (head_cycle - 1),
1764 head_block, distance, tail_cycle,
1765 tail_block);
1766
1767 if (error)
1768 return error;
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 distance = max_distance - (log->l_logBBsize - head_block);
1779 error = xlog_write_log_records(log, head_cycle, 0, distance,
1780 tail_cycle, tail_block);
1781 if (error)
1782 return error;
1783 }
1784
1785 return 0;
1786 }
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844 STATIC int
1845 xlog_recover_reorder_trans(
1846 struct xlog *log,
1847 struct xlog_recover *trans,
1848 int pass)
1849 {
1850 xlog_recover_item_t *item, *n;
1851 int error = 0;
1852 LIST_HEAD(sort_list);
1853 LIST_HEAD(cancel_list);
1854 LIST_HEAD(buffer_list);
1855 LIST_HEAD(inode_buffer_list);
1856 LIST_HEAD(inode_list);
1857
1858 list_splice_init(&trans->r_itemq, &sort_list);
1859 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1860 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1861
1862 switch (ITEM_TYPE(item)) {
1863 case XFS_LI_ICREATE:
1864 list_move_tail(&item->ri_list, &buffer_list);
1865 break;
1866 case XFS_LI_BUF:
1867 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1868 trace_xfs_log_recover_item_reorder_head(log,
1869 trans, item, pass);
1870 list_move(&item->ri_list, &cancel_list);
1871 break;
1872 }
1873 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1874 list_move(&item->ri_list, &inode_buffer_list);
1875 break;
1876 }
1877 list_move_tail(&item->ri_list, &buffer_list);
1878 break;
1879 case XFS_LI_INODE:
1880 case XFS_LI_DQUOT:
1881 case XFS_LI_QUOTAOFF:
1882 case XFS_LI_EFD:
1883 case XFS_LI_EFI:
1884 case XFS_LI_RUI:
1885 case XFS_LI_RUD:
1886 case XFS_LI_CUI:
1887 case XFS_LI_CUD:
1888 case XFS_LI_BUI:
1889 case XFS_LI_BUD:
1890 trace_xfs_log_recover_item_reorder_tail(log,
1891 trans, item, pass);
1892 list_move_tail(&item->ri_list, &inode_list);
1893 break;
1894 default:
1895 xfs_warn(log->l_mp,
1896 "%s: unrecognized type of log operation",
1897 __func__);
1898 ASSERT(0);
1899
1900
1901
1902
1903 if (!list_empty(&sort_list))
1904 list_splice_init(&sort_list, &trans->r_itemq);
1905 error = -EIO;
1906 goto out;
1907 }
1908 }
1909 out:
1910 ASSERT(list_empty(&sort_list));
1911 if (!list_empty(&buffer_list))
1912 list_splice(&buffer_list, &trans->r_itemq);
1913 if (!list_empty(&inode_list))
1914 list_splice_tail(&inode_list, &trans->r_itemq);
1915 if (!list_empty(&inode_buffer_list))
1916 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1917 if (!list_empty(&cancel_list))
1918 list_splice_tail(&cancel_list, &trans->r_itemq);
1919 return error;
1920 }
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934 STATIC int
1935 xlog_recover_buffer_pass1(
1936 struct xlog *log,
1937 struct xlog_recover_item *item)
1938 {
1939 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1940 struct list_head *bucket;
1941 struct xfs_buf_cancel *bcp;
1942
1943
1944
1945
1946 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1947 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1948 return 0;
1949 }
1950
1951
1952
1953
1954
1955 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1956 list_for_each_entry(bcp, bucket, bc_list) {
1957 if (bcp->bc_blkno == buf_f->blf_blkno &&
1958 bcp->bc_len == buf_f->blf_len) {
1959 bcp->bc_refcount++;
1960 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1961 return 0;
1962 }
1963 }
1964
1965 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
1966 bcp->bc_blkno = buf_f->blf_blkno;
1967 bcp->bc_len = buf_f->blf_len;
1968 bcp->bc_refcount = 1;
1969 list_add_tail(&bcp->bc_list, bucket);
1970
1971 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1972 return 0;
1973 }
1974
1975
1976
1977
1978
1979
1980 STATIC struct xfs_buf_cancel *
1981 xlog_peek_buffer_cancelled(
1982 struct xlog *log,
1983 xfs_daddr_t blkno,
1984 uint len,
1985 unsigned short flags)
1986 {
1987 struct list_head *bucket;
1988 struct xfs_buf_cancel *bcp;
1989
1990 if (!log->l_buf_cancel_table) {
1991
1992 ASSERT(!(flags & XFS_BLF_CANCEL));
1993 return NULL;
1994 }
1995
1996 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1997 list_for_each_entry(bcp, bucket, bc_list) {
1998 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1999 return bcp;
2000 }
2001
2002
2003
2004
2005
2006 ASSERT(!(flags & XFS_BLF_CANCEL));
2007 return NULL;
2008 }
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020 STATIC int
2021 xlog_check_buffer_cancelled(
2022 struct xlog *log,
2023 xfs_daddr_t blkno,
2024 uint len,
2025 unsigned short flags)
2026 {
2027 struct xfs_buf_cancel *bcp;
2028
2029 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2030 if (!bcp)
2031 return 0;
2032
2033
2034
2035
2036
2037
2038
2039 if (flags & XFS_BLF_CANCEL) {
2040 if (--bcp->bc_refcount == 0) {
2041 list_del(&bcp->bc_list);
2042 kmem_free(bcp);
2043 }
2044 }
2045 return 1;
2046 }
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060 STATIC int
2061 xlog_recover_do_inode_buffer(
2062 struct xfs_mount *mp,
2063 xlog_recover_item_t *item,
2064 struct xfs_buf *bp,
2065 xfs_buf_log_format_t *buf_f)
2066 {
2067 int i;
2068 int item_index = 0;
2069 int bit = 0;
2070 int nbits = 0;
2071 int reg_buf_offset = 0;
2072 int reg_buf_bytes = 0;
2073 int next_unlinked_offset;
2074 int inodes_per_buf;
2075 xfs_agino_t *logged_nextp;
2076 xfs_agino_t *buffer_nextp;
2077
2078 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2079
2080
2081
2082
2083
2084 if (xfs_sb_version_hascrc(&mp->m_sb))
2085 bp->b_ops = &xfs_inode_buf_ops;
2086
2087 inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
2088 for (i = 0; i < inodes_per_buf; i++) {
2089 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2090 offsetof(xfs_dinode_t, di_next_unlinked);
2091
2092 while (next_unlinked_offset >=
2093 (reg_buf_offset + reg_buf_bytes)) {
2094
2095
2096
2097
2098
2099
2100 bit += nbits;
2101 bit = xfs_next_bit(buf_f->blf_data_map,
2102 buf_f->blf_map_size, bit);
2103
2104
2105
2106
2107
2108 if (bit == -1)
2109 return 0;
2110
2111 nbits = xfs_contig_bits(buf_f->blf_data_map,
2112 buf_f->blf_map_size, bit);
2113 ASSERT(nbits > 0);
2114 reg_buf_offset = bit << XFS_BLF_SHIFT;
2115 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2116 item_index++;
2117 }
2118
2119
2120
2121
2122
2123
2124 if (next_unlinked_offset < reg_buf_offset)
2125 continue;
2126
2127 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2128 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2129 ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
2130
2131
2132
2133
2134
2135
2136 logged_nextp = item->ri_buf[item_index].i_addr +
2137 next_unlinked_offset - reg_buf_offset;
2138 if (unlikely(*logged_nextp == 0)) {
2139 xfs_alert(mp,
2140 "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
2141 "Trying to replay bad (0) inode di_next_unlinked field.",
2142 item, bp);
2143 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2144 XFS_ERRLEVEL_LOW, mp);
2145 return -EFSCORRUPTED;
2146 }
2147
2148 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2149 *buffer_nextp = *logged_nextp;
2150
2151
2152
2153
2154
2155
2156 xfs_dinode_calc_crc(mp,
2157 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2158
2159 }
2160
2161 return 0;
2162 }
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184 static xfs_lsn_t
2185 xlog_recover_get_buf_lsn(
2186 struct xfs_mount *mp,
2187 struct xfs_buf *bp)
2188 {
2189 uint32_t magic32;
2190 uint16_t magic16;
2191 uint16_t magicda;
2192 void *blk = bp->b_addr;
2193 uuid_t *uuid;
2194 xfs_lsn_t lsn = -1;
2195
2196
2197 if (!xfs_sb_version_hascrc(&mp->m_sb))
2198 goto recover_immediately;
2199
2200 magic32 = be32_to_cpu(*(__be32 *)blk);
2201 switch (magic32) {
2202 case XFS_ABTB_CRC_MAGIC:
2203 case XFS_ABTC_CRC_MAGIC:
2204 case XFS_ABTB_MAGIC:
2205 case XFS_ABTC_MAGIC:
2206 case XFS_RMAP_CRC_MAGIC:
2207 case XFS_REFC_CRC_MAGIC:
2208 case XFS_IBT_CRC_MAGIC:
2209 case XFS_IBT_MAGIC: {
2210 struct xfs_btree_block *btb = blk;
2211
2212 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2213 uuid = &btb->bb_u.s.bb_uuid;
2214 break;
2215 }
2216 case XFS_BMAP_CRC_MAGIC:
2217 case XFS_BMAP_MAGIC: {
2218 struct xfs_btree_block *btb = blk;
2219
2220 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2221 uuid = &btb->bb_u.l.bb_uuid;
2222 break;
2223 }
2224 case XFS_AGF_MAGIC:
2225 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2226 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2227 break;
2228 case XFS_AGFL_MAGIC:
2229 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2230 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2231 break;
2232 case XFS_AGI_MAGIC:
2233 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2234 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2235 break;
2236 case XFS_SYMLINK_MAGIC:
2237 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2238 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2239 break;
2240 case XFS_DIR3_BLOCK_MAGIC:
2241 case XFS_DIR3_DATA_MAGIC:
2242 case XFS_DIR3_FREE_MAGIC:
2243 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2244 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2245 break;
2246 case XFS_ATTR3_RMT_MAGIC:
2247
2248
2249
2250
2251
2252
2253
2254 goto recover_immediately;
2255 case XFS_SB_MAGIC:
2256
2257
2258
2259
2260
2261
2262
2263 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2264 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2265 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2266 else
2267 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2268 break;
2269 default:
2270 break;
2271 }
2272
2273 if (lsn != (xfs_lsn_t)-1) {
2274 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2275 goto recover_immediately;
2276 return lsn;
2277 }
2278
2279 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2280 switch (magicda) {
2281 case XFS_DIR3_LEAF1_MAGIC:
2282 case XFS_DIR3_LEAFN_MAGIC:
2283 case XFS_DA3_NODE_MAGIC:
2284 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2285 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2286 break;
2287 default:
2288 break;
2289 }
2290
2291 if (lsn != (xfs_lsn_t)-1) {
2292 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2293 goto recover_immediately;
2294 return lsn;
2295 }
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308 magic16 = be16_to_cpu(*(__be16 *)blk);
2309 switch (magic16) {
2310 case XFS_DQUOT_MAGIC:
2311 case XFS_DINODE_MAGIC:
2312 goto recover_immediately;
2313 default:
2314 break;
2315 }
2316
2317
2318
2319 recover_immediately:
2320 return (xfs_lsn_t)-1;
2321
2322 }
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332 static void
2333 xlog_recover_validate_buf_type(
2334 struct xfs_mount *mp,
2335 struct xfs_buf *bp,
2336 xfs_buf_log_format_t *buf_f,
2337 xfs_lsn_t current_lsn)
2338 {
2339 struct xfs_da_blkinfo *info = bp->b_addr;
2340 uint32_t magic32;
2341 uint16_t magic16;
2342 uint16_t magicda;
2343 char *warnmsg = NULL;
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353 if (!xfs_sb_version_hascrc(&mp->m_sb))
2354 return;
2355
2356 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2357 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2358 magicda = be16_to_cpu(info->magic);
2359 switch (xfs_blft_from_flags(buf_f)) {
2360 case XFS_BLFT_BTREE_BUF:
2361 switch (magic32) {
2362 case XFS_ABTB_CRC_MAGIC:
2363 case XFS_ABTB_MAGIC:
2364 bp->b_ops = &xfs_bnobt_buf_ops;
2365 break;
2366 case XFS_ABTC_CRC_MAGIC:
2367 case XFS_ABTC_MAGIC:
2368 bp->b_ops = &xfs_cntbt_buf_ops;
2369 break;
2370 case XFS_IBT_CRC_MAGIC:
2371 case XFS_IBT_MAGIC:
2372 bp->b_ops = &xfs_inobt_buf_ops;
2373 break;
2374 case XFS_FIBT_CRC_MAGIC:
2375 case XFS_FIBT_MAGIC:
2376 bp->b_ops = &xfs_finobt_buf_ops;
2377 break;
2378 case XFS_BMAP_CRC_MAGIC:
2379 case XFS_BMAP_MAGIC:
2380 bp->b_ops = &xfs_bmbt_buf_ops;
2381 break;
2382 case XFS_RMAP_CRC_MAGIC:
2383 bp->b_ops = &xfs_rmapbt_buf_ops;
2384 break;
2385 case XFS_REFC_CRC_MAGIC:
2386 bp->b_ops = &xfs_refcountbt_buf_ops;
2387 break;
2388 default:
2389 warnmsg = "Bad btree block magic!";
2390 break;
2391 }
2392 break;
2393 case XFS_BLFT_AGF_BUF:
2394 if (magic32 != XFS_AGF_MAGIC) {
2395 warnmsg = "Bad AGF block magic!";
2396 break;
2397 }
2398 bp->b_ops = &xfs_agf_buf_ops;
2399 break;
2400 case XFS_BLFT_AGFL_BUF:
2401 if (magic32 != XFS_AGFL_MAGIC) {
2402 warnmsg = "Bad AGFL block magic!";
2403 break;
2404 }
2405 bp->b_ops = &xfs_agfl_buf_ops;
2406 break;
2407 case XFS_BLFT_AGI_BUF:
2408 if (magic32 != XFS_AGI_MAGIC) {
2409 warnmsg = "Bad AGI block magic!";
2410 break;
2411 }
2412 bp->b_ops = &xfs_agi_buf_ops;
2413 break;
2414 case XFS_BLFT_UDQUOT_BUF:
2415 case XFS_BLFT_PDQUOT_BUF:
2416 case XFS_BLFT_GDQUOT_BUF:
2417 #ifdef CONFIG_XFS_QUOTA
2418 if (magic16 != XFS_DQUOT_MAGIC) {
2419 warnmsg = "Bad DQUOT block magic!";
2420 break;
2421 }
2422 bp->b_ops = &xfs_dquot_buf_ops;
2423 #else
2424 xfs_alert(mp,
2425 "Trying to recover dquots without QUOTA support built in!");
2426 ASSERT(0);
2427 #endif
2428 break;
2429 case XFS_BLFT_DINO_BUF:
2430 if (magic16 != XFS_DINODE_MAGIC) {
2431 warnmsg = "Bad INODE block magic!";
2432 break;
2433 }
2434 bp->b_ops = &xfs_inode_buf_ops;
2435 break;
2436 case XFS_BLFT_SYMLINK_BUF:
2437 if (magic32 != XFS_SYMLINK_MAGIC) {
2438 warnmsg = "Bad symlink block magic!";
2439 break;
2440 }
2441 bp->b_ops = &xfs_symlink_buf_ops;
2442 break;
2443 case XFS_BLFT_DIR_BLOCK_BUF:
2444 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2445 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2446 warnmsg = "Bad dir block magic!";
2447 break;
2448 }
2449 bp->b_ops = &xfs_dir3_block_buf_ops;
2450 break;
2451 case XFS_BLFT_DIR_DATA_BUF:
2452 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2453 magic32 != XFS_DIR3_DATA_MAGIC) {
2454 warnmsg = "Bad dir data magic!";
2455 break;
2456 }
2457 bp->b_ops = &xfs_dir3_data_buf_ops;
2458 break;
2459 case XFS_BLFT_DIR_FREE_BUF:
2460 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2461 magic32 != XFS_DIR3_FREE_MAGIC) {
2462 warnmsg = "Bad dir3 free magic!";
2463 break;
2464 }
2465 bp->b_ops = &xfs_dir3_free_buf_ops;
2466 break;
2467 case XFS_BLFT_DIR_LEAF1_BUF:
2468 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2469 magicda != XFS_DIR3_LEAF1_MAGIC) {
2470 warnmsg = "Bad dir leaf1 magic!";
2471 break;
2472 }
2473 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2474 break;
2475 case XFS_BLFT_DIR_LEAFN_BUF:
2476 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2477 magicda != XFS_DIR3_LEAFN_MAGIC) {
2478 warnmsg = "Bad dir leafn magic!";
2479 break;
2480 }
2481 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2482 break;
2483 case XFS_BLFT_DA_NODE_BUF:
2484 if (magicda != XFS_DA_NODE_MAGIC &&
2485 magicda != XFS_DA3_NODE_MAGIC) {
2486 warnmsg = "Bad da node magic!";
2487 break;
2488 }
2489 bp->b_ops = &xfs_da3_node_buf_ops;
2490 break;
2491 case XFS_BLFT_ATTR_LEAF_BUF:
2492 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2493 magicda != XFS_ATTR3_LEAF_MAGIC) {
2494 warnmsg = "Bad attr leaf magic!";
2495 break;
2496 }
2497 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2498 break;
2499 case XFS_BLFT_ATTR_RMT_BUF:
2500 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2501 warnmsg = "Bad attr remote magic!";
2502 break;
2503 }
2504 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2505 break;
2506 case XFS_BLFT_SB_BUF:
2507 if (magic32 != XFS_SB_MAGIC) {
2508 warnmsg = "Bad SB block magic!";
2509 break;
2510 }
2511 bp->b_ops = &xfs_sb_buf_ops;
2512 break;
2513 #ifdef CONFIG_XFS_RT
2514 case XFS_BLFT_RTBITMAP_BUF:
2515 case XFS_BLFT_RTSUMMARY_BUF:
2516
2517 bp->b_ops = &xfs_rtbuf_ops;
2518 break;
2519 #endif
2520 default:
2521 xfs_warn(mp, "Unknown buffer type %d!",
2522 xfs_blft_from_flags(buf_f));
2523 break;
2524 }
2525
2526
2527
2528
2529
2530
2531 if (current_lsn == NULLCOMMITLSN)
2532 return;
2533
2534 if (warnmsg) {
2535 xfs_warn(mp, warnmsg);
2536 ASSERT(0);
2537 }
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550 if (bp->b_ops) {
2551 struct xfs_buf_log_item *bip;
2552
2553 ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2554 bp->b_iodone = xlog_recover_iodone;
2555 xfs_buf_item_init(bp, mp);
2556 bip = bp->b_log_item;
2557 bip->bli_item.li_lsn = current_lsn;
2558 }
2559 }
2560
2561
2562
2563
2564
2565
2566
2567 STATIC void
2568 xlog_recover_do_reg_buffer(
2569 struct xfs_mount *mp,
2570 xlog_recover_item_t *item,
2571 struct xfs_buf *bp,
2572 xfs_buf_log_format_t *buf_f,
2573 xfs_lsn_t current_lsn)
2574 {
2575 int i;
2576 int bit;
2577 int nbits;
2578 xfs_failaddr_t fa;
2579
2580 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2581
2582 bit = 0;
2583 i = 1;
2584 while (1) {
2585 bit = xfs_next_bit(buf_f->blf_data_map,
2586 buf_f->blf_map_size, bit);
2587 if (bit == -1)
2588 break;
2589 nbits = xfs_contig_bits(buf_f->blf_data_map,
2590 buf_f->blf_map_size, bit);
2591 ASSERT(nbits > 0);
2592 ASSERT(item->ri_buf[i].i_addr != NULL);
2593 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2594 ASSERT(BBTOB(bp->b_length) >=
2595 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2606 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2607
2608
2609
2610
2611
2612
2613 fa = NULL;
2614 if (buf_f->blf_flags &
2615 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2616 if (item->ri_buf[i].i_addr == NULL) {
2617 xfs_alert(mp,
2618 "XFS: NULL dquot in %s.", __func__);
2619 goto next;
2620 }
2621 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2622 xfs_alert(mp,
2623 "XFS: dquot too small (%d) in %s.",
2624 item->ri_buf[i].i_len, __func__);
2625 goto next;
2626 }
2627 fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
2628 -1, 0);
2629 if (fa) {
2630 xfs_alert(mp,
2631 "dquot corrupt at %pS trying to replay into block 0x%llx",
2632 fa, bp->b_bn);
2633 goto next;
2634 }
2635 }
2636
2637 memcpy(xfs_buf_offset(bp,
2638 (uint)bit << XFS_BLF_SHIFT),
2639 item->ri_buf[i].i_addr,
2640 nbits<<XFS_BLF_SHIFT);
2641 next:
2642 i++;
2643 bit += nbits;
2644 }
2645
2646
2647 ASSERT(i == item->ri_total);
2648
2649 xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2650 }
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661 STATIC bool
2662 xlog_recover_do_dquot_buffer(
2663 struct xfs_mount *mp,
2664 struct xlog *log,
2665 struct xlog_recover_item *item,
2666 struct xfs_buf *bp,
2667 struct xfs_buf_log_format *buf_f)
2668 {
2669 uint type;
2670
2671 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2672
2673
2674
2675
2676 if (!mp->m_qflags)
2677 return false;
2678
2679 type = 0;
2680 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2681 type |= XFS_DQ_USER;
2682 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2683 type |= XFS_DQ_PROJ;
2684 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2685 type |= XFS_DQ_GROUP;
2686
2687
2688
2689 if (log->l_quotaoffs_flag & type)
2690 return false;
2691
2692 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2693 return true;
2694 }
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719 STATIC int
2720 xlog_recover_buffer_pass2(
2721 struct xlog *log,
2722 struct list_head *buffer_list,
2723 struct xlog_recover_item *item,
2724 xfs_lsn_t current_lsn)
2725 {
2726 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2727 xfs_mount_t *mp = log->l_mp;
2728 xfs_buf_t *bp;
2729 int error;
2730 uint buf_flags;
2731 xfs_lsn_t lsn;
2732
2733
2734
2735
2736
2737 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2738 buf_f->blf_len, buf_f->blf_flags)) {
2739 trace_xfs_log_recover_buf_cancel(log, buf_f);
2740 return 0;
2741 }
2742
2743 trace_xfs_log_recover_buf_recover(log, buf_f);
2744
2745 buf_flags = 0;
2746 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2747 buf_flags |= XBF_UNMAPPED;
2748
2749 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2750 buf_flags, NULL);
2751 if (!bp)
2752 return -ENOMEM;
2753 error = bp->b_error;
2754 if (error) {
2755 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2756 goto out_release;
2757 }
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778 lsn = xlog_recover_get_buf_lsn(mp, bp);
2779 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2780 trace_xfs_log_recover_buf_skip(log, buf_f);
2781 xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2782 goto out_release;
2783 }
2784
2785 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2786 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2787 if (error)
2788 goto out_release;
2789 } else if (buf_f->blf_flags &
2790 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2791 bool dirty;
2792
2793 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2794 if (!dirty)
2795 goto out_release;
2796 } else {
2797 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2798 }
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815 if (XFS_DINODE_MAGIC ==
2816 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2817 (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
2818 xfs_buf_stale(bp);
2819 error = xfs_bwrite(bp);
2820 } else {
2821 ASSERT(bp->b_mount == mp);
2822 bp->b_iodone = xlog_recover_iodone;
2823 xfs_buf_delwri_queue(bp, buffer_list);
2824 }
2825
2826 out_release:
2827 xfs_buf_relse(bp);
2828 return error;
2829 }
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861 STATIC int
2862 xfs_recover_inode_owner_change(
2863 struct xfs_mount *mp,
2864 struct xfs_dinode *dip,
2865 struct xfs_inode_log_format *in_f,
2866 struct list_head *buffer_list)
2867 {
2868 struct xfs_inode *ip;
2869 int error;
2870
2871 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2872
2873 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2874 if (!ip)
2875 return -ENOMEM;
2876
2877
2878 xfs_inode_from_disk(ip, dip);
2879 ASSERT(ip->i_d.di_version >= 3);
2880
2881 error = xfs_iformat_fork(ip, dip);
2882 if (error)
2883 goto out_free_ip;
2884
2885 if (!xfs_inode_verify_forks(ip)) {
2886 error = -EFSCORRUPTED;
2887 goto out_free_ip;
2888 }
2889
2890 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2891 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2892 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2893 ip->i_ino, buffer_list);
2894 if (error)
2895 goto out_free_ip;
2896 }
2897
2898 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2899 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2900 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2901 ip->i_ino, buffer_list);
2902 if (error)
2903 goto out_free_ip;
2904 }
2905
2906 out_free_ip:
2907 xfs_inode_free(ip);
2908 return error;
2909 }
2910
2911 STATIC int
2912 xlog_recover_inode_pass2(
2913 struct xlog *log,
2914 struct list_head *buffer_list,
2915 struct xlog_recover_item *item,
2916 xfs_lsn_t current_lsn)
2917 {
2918 struct xfs_inode_log_format *in_f;
2919 xfs_mount_t *mp = log->l_mp;
2920 xfs_buf_t *bp;
2921 xfs_dinode_t *dip;
2922 int len;
2923 char *src;
2924 char *dest;
2925 int error;
2926 int attr_index;
2927 uint fields;
2928 struct xfs_log_dinode *ldip;
2929 uint isize;
2930 int need_free = 0;
2931
2932 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
2933 in_f = item->ri_buf[0].i_addr;
2934 } else {
2935 in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0);
2936 need_free = 1;
2937 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2938 if (error)
2939 goto error;
2940 }
2941
2942
2943
2944
2945
2946 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2947 in_f->ilf_len, 0)) {
2948 error = 0;
2949 trace_xfs_log_recover_inode_cancel(log, in_f);
2950 goto error;
2951 }
2952 trace_xfs_log_recover_inode_recover(log, in_f);
2953
2954 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2955 &xfs_inode_buf_ops);
2956 if (!bp) {
2957 error = -ENOMEM;
2958 goto error;
2959 }
2960 error = bp->b_error;
2961 if (error) {
2962 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2963 goto out_release;
2964 }
2965 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2966 dip = xfs_buf_offset(bp, in_f->ilf_boffset);
2967
2968
2969
2970
2971
2972 if (unlikely(!xfs_verify_magic16(bp, dip->di_magic))) {
2973 xfs_alert(mp,
2974 "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
2975 __func__, dip, bp, in_f->ilf_ino);
2976 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2977 XFS_ERRLEVEL_LOW, mp);
2978 error = -EFSCORRUPTED;
2979 goto out_release;
2980 }
2981 ldip = item->ri_buf[1].i_addr;
2982 if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
2983 xfs_alert(mp,
2984 "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
2985 __func__, item, in_f->ilf_ino);
2986 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2987 XFS_ERRLEVEL_LOW, mp);
2988 error = -EFSCORRUPTED;
2989 goto out_release;
2990 }
2991
2992
2993
2994
2995
2996
2997
2998
2999 if (dip->di_version >= 3) {
3000 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
3001
3002 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3003 trace_xfs_log_recover_inode_skip(log, in_f);
3004 error = 0;
3005 goto out_owner_change;
3006 }
3007 }
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3018 ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3019
3020
3021
3022
3023 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3024 ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3025
3026 } else {
3027 trace_xfs_log_recover_inode_skip(log, in_f);
3028 error = 0;
3029 goto out_release;
3030 }
3031 }
3032
3033
3034 ldip->di_flushiter = 0;
3035
3036 if (unlikely(S_ISREG(ldip->di_mode))) {
3037 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3038 (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3039 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3040 XFS_ERRLEVEL_LOW, mp, ldip,
3041 sizeof(*ldip));
3042 xfs_alert(mp,
3043 "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
3044 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3045 __func__, item, dip, bp, in_f->ilf_ino);
3046 error = -EFSCORRUPTED;
3047 goto out_release;
3048 }
3049 } else if (unlikely(S_ISDIR(ldip->di_mode))) {
3050 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3051 (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3052 (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3053 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3054 XFS_ERRLEVEL_LOW, mp, ldip,
3055 sizeof(*ldip));
3056 xfs_alert(mp,
3057 "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
3058 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3059 __func__, item, dip, bp, in_f->ilf_ino);
3060 error = -EFSCORRUPTED;
3061 goto out_release;
3062 }
3063 }
3064 if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3065 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3066 XFS_ERRLEVEL_LOW, mp, ldip,
3067 sizeof(*ldip));
3068 xfs_alert(mp,
3069 "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3070 "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
3071 __func__, item, dip, bp, in_f->ilf_ino,
3072 ldip->di_nextents + ldip->di_anextents,
3073 ldip->di_nblocks);
3074 error = -EFSCORRUPTED;
3075 goto out_release;
3076 }
3077 if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3078 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3079 XFS_ERRLEVEL_LOW, mp, ldip,
3080 sizeof(*ldip));
3081 xfs_alert(mp,
3082 "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3083 "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
3084 item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3085 error = -EFSCORRUPTED;
3086 goto out_release;
3087 }
3088 isize = xfs_log_dinode_size(ldip->di_version);
3089 if (unlikely(item->ri_buf[1].i_len > isize)) {
3090 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3091 XFS_ERRLEVEL_LOW, mp, ldip,
3092 sizeof(*ldip));
3093 xfs_alert(mp,
3094 "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
3095 __func__, item->ri_buf[1].i_len, item);
3096 error = -EFSCORRUPTED;
3097 goto out_release;
3098 }
3099
3100
3101 xfs_log_dinode_to_disk(ldip, dip);
3102
3103 fields = in_f->ilf_fields;
3104 if (fields & XFS_ILOG_DEV)
3105 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3106
3107 if (in_f->ilf_size == 2)
3108 goto out_owner_change;
3109 len = item->ri_buf[2].i_len;
3110 src = item->ri_buf[2].i_addr;
3111 ASSERT(in_f->ilf_size <= 4);
3112 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3113 ASSERT(!(fields & XFS_ILOG_DFORK) ||
3114 (len == in_f->ilf_dsize));
3115
3116 switch (fields & XFS_ILOG_DFORK) {
3117 case XFS_ILOG_DDATA:
3118 case XFS_ILOG_DEXT:
3119 memcpy(XFS_DFORK_DPTR(dip), src, len);
3120 break;
3121
3122 case XFS_ILOG_DBROOT:
3123 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3124 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3125 XFS_DFORK_DSIZE(dip, mp));
3126 break;
3127
3128 default:
3129
3130
3131
3132 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3133 break;
3134 }
3135
3136
3137
3138
3139
3140
3141 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3142 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3143 attr_index = 3;
3144 } else {
3145 attr_index = 2;
3146 }
3147 len = item->ri_buf[attr_index].i_len;
3148 src = item->ri_buf[attr_index].i_addr;
3149 ASSERT(len == in_f->ilf_asize);
3150
3151 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3152 case XFS_ILOG_ADATA:
3153 case XFS_ILOG_AEXT:
3154 dest = XFS_DFORK_APTR(dip);
3155 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3156 memcpy(dest, src, len);
3157 break;
3158
3159 case XFS_ILOG_ABROOT:
3160 dest = XFS_DFORK_APTR(dip);
3161 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3162 len, (xfs_bmdr_block_t*)dest,
3163 XFS_DFORK_ASIZE(dip, mp));
3164 break;
3165
3166 default:
3167 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3168 ASSERT(0);
3169 error = -EIO;
3170 goto out_release;
3171 }
3172 }
3173
3174 out_owner_change:
3175
3176 if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
3177 (dip->di_mode != 0))
3178 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3179 buffer_list);
3180
3181 xfs_dinode_calc_crc(log->l_mp, dip);
3182
3183 ASSERT(bp->b_mount == mp);
3184 bp->b_iodone = xlog_recover_iodone;
3185 xfs_buf_delwri_queue(bp, buffer_list);
3186
3187 out_release:
3188 xfs_buf_relse(bp);
3189 error:
3190 if (need_free)
3191 kmem_free(in_f);
3192 return error;
3193 }
3194
3195
3196
3197
3198
3199
3200 STATIC int
3201 xlog_recover_quotaoff_pass1(
3202 struct xlog *log,
3203 struct xlog_recover_item *item)
3204 {
3205 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3206 ASSERT(qoff_f);
3207
3208
3209
3210
3211
3212 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3213 log->l_quotaoffs_flag |= XFS_DQ_USER;
3214 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3215 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3216 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3217 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3218
3219 return 0;
3220 }
3221
3222
3223
3224
3225 STATIC int
3226 xlog_recover_dquot_pass2(
3227 struct xlog *log,
3228 struct list_head *buffer_list,
3229 struct xlog_recover_item *item,
3230 xfs_lsn_t current_lsn)
3231 {
3232 xfs_mount_t *mp = log->l_mp;
3233 xfs_buf_t *bp;
3234 struct xfs_disk_dquot *ddq, *recddq;
3235 xfs_failaddr_t fa;
3236 int error;
3237 xfs_dq_logformat_t *dq_f;
3238 uint type;
3239
3240
3241
3242
3243
3244 if (mp->m_qflags == 0)
3245 return 0;
3246
3247 recddq = item->ri_buf[1].i_addr;
3248 if (recddq == NULL) {
3249 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3250 return -EIO;
3251 }
3252 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3253 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3254 item->ri_buf[1].i_len, __func__);
3255 return -EIO;
3256 }
3257
3258
3259
3260
3261 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3262 ASSERT(type);
3263 if (log->l_quotaoffs_flag & type)
3264 return 0;
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276 dq_f = item->ri_buf[0].i_addr;
3277 ASSERT(dq_f);
3278 fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0);
3279 if (fa) {
3280 xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
3281 dq_f->qlf_id, fa);
3282 return -EIO;
3283 }
3284 ASSERT(dq_f->qlf_len == 1);
3285
3286
3287
3288
3289
3290
3291
3292
3293 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3294 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3295 &xfs_dquot_buf_ops);
3296 if (error)
3297 return error;
3298
3299 ASSERT(bp);
3300 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3301
3302
3303
3304
3305
3306 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3307 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3308 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3309
3310 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3311 goto out_release;
3312 }
3313 }
3314
3315 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3316 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3317 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3318 XFS_DQUOT_CRC_OFF);
3319 }
3320
3321 ASSERT(dq_f->qlf_size == 2);
3322 ASSERT(bp->b_mount == mp);
3323 bp->b_iodone = xlog_recover_iodone;
3324 xfs_buf_delwri_queue(bp, buffer_list);
3325
3326 out_release:
3327 xfs_buf_relse(bp);
3328 return 0;
3329 }
3330
3331
3332
3333
3334
3335
3336
3337
3338 STATIC int
3339 xlog_recover_efi_pass2(
3340 struct xlog *log,
3341 struct xlog_recover_item *item,
3342 xfs_lsn_t lsn)
3343 {
3344 int error;
3345 struct xfs_mount *mp = log->l_mp;
3346 struct xfs_efi_log_item *efip;
3347 struct xfs_efi_log_format *efi_formatp;
3348
3349 efi_formatp = item->ri_buf[0].i_addr;
3350
3351 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3352 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3353 if (error) {
3354 xfs_efi_item_free(efip);
3355 return error;
3356 }
3357 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3358
3359 spin_lock(&log->l_ailp->ail_lock);
3360
3361
3362
3363
3364
3365
3366 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3367 xfs_efi_release(efip);
3368 return 0;
3369 }
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379 STATIC int
3380 xlog_recover_efd_pass2(
3381 struct xlog *log,
3382 struct xlog_recover_item *item)
3383 {
3384 xfs_efd_log_format_t *efd_formatp;
3385 xfs_efi_log_item_t *efip = NULL;
3386 struct xfs_log_item *lip;
3387 uint64_t efi_id;
3388 struct xfs_ail_cursor cur;
3389 struct xfs_ail *ailp = log->l_ailp;
3390
3391 efd_formatp = item->ri_buf[0].i_addr;
3392 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3393 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3394 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3395 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3396 efi_id = efd_formatp->efd_efi_id;
3397
3398
3399
3400
3401
3402 spin_lock(&ailp->ail_lock);
3403 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3404 while (lip != NULL) {
3405 if (lip->li_type == XFS_LI_EFI) {
3406 efip = (xfs_efi_log_item_t *)lip;
3407 if (efip->efi_format.efi_id == efi_id) {
3408
3409
3410
3411
3412 spin_unlock(&ailp->ail_lock);
3413 xfs_efi_release(efip);
3414 spin_lock(&ailp->ail_lock);
3415 break;
3416 }
3417 }
3418 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3419 }
3420
3421 xfs_trans_ail_cursor_done(&cur);
3422 spin_unlock(&ailp->ail_lock);
3423
3424 return 0;
3425 }
3426
3427
3428
3429
3430
3431
3432
3433
3434 STATIC int
3435 xlog_recover_rui_pass2(
3436 struct xlog *log,
3437 struct xlog_recover_item *item,
3438 xfs_lsn_t lsn)
3439 {
3440 int error;
3441 struct xfs_mount *mp = log->l_mp;
3442 struct xfs_rui_log_item *ruip;
3443 struct xfs_rui_log_format *rui_formatp;
3444
3445 rui_formatp = item->ri_buf[0].i_addr;
3446
3447 ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3448 error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3449 if (error) {
3450 xfs_rui_item_free(ruip);
3451 return error;
3452 }
3453 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3454
3455 spin_lock(&log->l_ailp->ail_lock);
3456
3457
3458
3459
3460
3461
3462 xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3463 xfs_rui_release(ruip);
3464 return 0;
3465 }
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475 STATIC int
3476 xlog_recover_rud_pass2(
3477 struct xlog *log,
3478 struct xlog_recover_item *item)
3479 {
3480 struct xfs_rud_log_format *rud_formatp;
3481 struct xfs_rui_log_item *ruip = NULL;
3482 struct xfs_log_item *lip;
3483 uint64_t rui_id;
3484 struct xfs_ail_cursor cur;
3485 struct xfs_ail *ailp = log->l_ailp;
3486
3487 rud_formatp = item->ri_buf[0].i_addr;
3488 ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3489 rui_id = rud_formatp->rud_rui_id;
3490
3491
3492
3493
3494
3495 spin_lock(&ailp->ail_lock);
3496 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3497 while (lip != NULL) {
3498 if (lip->li_type == XFS_LI_RUI) {
3499 ruip = (struct xfs_rui_log_item *)lip;
3500 if (ruip->rui_format.rui_id == rui_id) {
3501
3502
3503
3504
3505 spin_unlock(&ailp->ail_lock);
3506 xfs_rui_release(ruip);
3507 spin_lock(&ailp->ail_lock);
3508 break;
3509 }
3510 }
3511 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3512 }
3513
3514 xfs_trans_ail_cursor_done(&cur);
3515 spin_unlock(&ailp->ail_lock);
3516
3517 return 0;
3518 }
3519
3520
3521
3522
3523
3524
3525 static int
3526 xfs_cui_copy_format(
3527 struct xfs_log_iovec *buf,
3528 struct xfs_cui_log_format *dst_cui_fmt)
3529 {
3530 struct xfs_cui_log_format *src_cui_fmt;
3531 uint len;
3532
3533 src_cui_fmt = buf->i_addr;
3534 len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3535
3536 if (buf->i_len == len) {
3537 memcpy(dst_cui_fmt, src_cui_fmt, len);
3538 return 0;
3539 }
3540 return -EFSCORRUPTED;
3541 }
3542
3543
3544
3545
3546
3547
3548
3549
3550 STATIC int
3551 xlog_recover_cui_pass2(
3552 struct xlog *log,
3553 struct xlog_recover_item *item,
3554 xfs_lsn_t lsn)
3555 {
3556 int error;
3557 struct xfs_mount *mp = log->l_mp;
3558 struct xfs_cui_log_item *cuip;
3559 struct xfs_cui_log_format *cui_formatp;
3560
3561 cui_formatp = item->ri_buf[0].i_addr;
3562
3563 cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3564 error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3565 if (error) {
3566 xfs_cui_item_free(cuip);
3567 return error;
3568 }
3569 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3570
3571 spin_lock(&log->l_ailp->ail_lock);
3572
3573
3574
3575
3576
3577
3578 xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3579 xfs_cui_release(cuip);
3580 return 0;
3581 }
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591 STATIC int
3592 xlog_recover_cud_pass2(
3593 struct xlog *log,
3594 struct xlog_recover_item *item)
3595 {
3596 struct xfs_cud_log_format *cud_formatp;
3597 struct xfs_cui_log_item *cuip = NULL;
3598 struct xfs_log_item *lip;
3599 uint64_t cui_id;
3600 struct xfs_ail_cursor cur;
3601 struct xfs_ail *ailp = log->l_ailp;
3602
3603 cud_formatp = item->ri_buf[0].i_addr;
3604 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3605 return -EFSCORRUPTED;
3606 cui_id = cud_formatp->cud_cui_id;
3607
3608
3609
3610
3611
3612 spin_lock(&ailp->ail_lock);
3613 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3614 while (lip != NULL) {
3615 if (lip->li_type == XFS_LI_CUI) {
3616 cuip = (struct xfs_cui_log_item *)lip;
3617 if (cuip->cui_format.cui_id == cui_id) {
3618
3619
3620
3621
3622 spin_unlock(&ailp->ail_lock);
3623 xfs_cui_release(cuip);
3624 spin_lock(&ailp->ail_lock);
3625 break;
3626 }
3627 }
3628 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3629 }
3630
3631 xfs_trans_ail_cursor_done(&cur);
3632 spin_unlock(&ailp->ail_lock);
3633
3634 return 0;
3635 }
3636
3637
3638
3639
3640
3641
3642 static int
3643 xfs_bui_copy_format(
3644 struct xfs_log_iovec *buf,
3645 struct xfs_bui_log_format *dst_bui_fmt)
3646 {
3647 struct xfs_bui_log_format *src_bui_fmt;
3648 uint len;
3649
3650 src_bui_fmt = buf->i_addr;
3651 len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3652
3653 if (buf->i_len == len) {
3654 memcpy(dst_bui_fmt, src_bui_fmt, len);
3655 return 0;
3656 }
3657 return -EFSCORRUPTED;
3658 }
3659
3660
3661
3662
3663
3664
3665
3666
3667 STATIC int
3668 xlog_recover_bui_pass2(
3669 struct xlog *log,
3670 struct xlog_recover_item *item,
3671 xfs_lsn_t lsn)
3672 {
3673 int error;
3674 struct xfs_mount *mp = log->l_mp;
3675 struct xfs_bui_log_item *buip;
3676 struct xfs_bui_log_format *bui_formatp;
3677
3678 bui_formatp = item->ri_buf[0].i_addr;
3679
3680 if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3681 return -EFSCORRUPTED;
3682 buip = xfs_bui_init(mp);
3683 error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3684 if (error) {
3685 xfs_bui_item_free(buip);
3686 return error;
3687 }
3688 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3689
3690 spin_lock(&log->l_ailp->ail_lock);
3691
3692
3693
3694
3695
3696
3697 xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3698 xfs_bui_release(buip);
3699 return 0;
3700 }
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710 STATIC int
3711 xlog_recover_bud_pass2(
3712 struct xlog *log,
3713 struct xlog_recover_item *item)
3714 {
3715 struct xfs_bud_log_format *bud_formatp;
3716 struct xfs_bui_log_item *buip = NULL;
3717 struct xfs_log_item *lip;
3718 uint64_t bui_id;
3719 struct xfs_ail_cursor cur;
3720 struct xfs_ail *ailp = log->l_ailp;
3721
3722 bud_formatp = item->ri_buf[0].i_addr;
3723 if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3724 return -EFSCORRUPTED;
3725 bui_id = bud_formatp->bud_bui_id;
3726
3727
3728
3729
3730
3731 spin_lock(&ailp->ail_lock);
3732 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3733 while (lip != NULL) {
3734 if (lip->li_type == XFS_LI_BUI) {
3735 buip = (struct xfs_bui_log_item *)lip;
3736 if (buip->bui_format.bui_id == bui_id) {
3737
3738
3739
3740
3741 spin_unlock(&ailp->ail_lock);
3742 xfs_bui_release(buip);
3743 spin_lock(&ailp->ail_lock);
3744 break;
3745 }
3746 }
3747 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3748 }
3749
3750 xfs_trans_ail_cursor_done(&cur);
3751 spin_unlock(&ailp->ail_lock);
3752
3753 return 0;
3754 }
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764 STATIC int
3765 xlog_recover_do_icreate_pass2(
3766 struct xlog *log,
3767 struct list_head *buffer_list,
3768 xlog_recover_item_t *item)
3769 {
3770 struct xfs_mount *mp = log->l_mp;
3771 struct xfs_icreate_log *icl;
3772 struct xfs_ino_geometry *igeo = M_IGEO(mp);
3773 xfs_agnumber_t agno;
3774 xfs_agblock_t agbno;
3775 unsigned int count;
3776 unsigned int isize;
3777 xfs_agblock_t length;
3778 int bb_per_cluster;
3779 int cancel_count;
3780 int nbufs;
3781 int i;
3782
3783 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3784 if (icl->icl_type != XFS_LI_ICREATE) {
3785 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3786 return -EINVAL;
3787 }
3788
3789 if (icl->icl_size != 1) {
3790 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3791 return -EINVAL;
3792 }
3793
3794 agno = be32_to_cpu(icl->icl_ag);
3795 if (agno >= mp->m_sb.sb_agcount) {
3796 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3797 return -EINVAL;
3798 }
3799 agbno = be32_to_cpu(icl->icl_agbno);
3800 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3801 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3802 return -EINVAL;
3803 }
3804 isize = be32_to_cpu(icl->icl_isize);
3805 if (isize != mp->m_sb.sb_inodesize) {
3806 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3807 return -EINVAL;
3808 }
3809 count = be32_to_cpu(icl->icl_count);
3810 if (!count) {
3811 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3812 return -EINVAL;
3813 }
3814 length = be32_to_cpu(icl->icl_length);
3815 if (!length || length >= mp->m_sb.sb_agblocks) {
3816 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3817 return -EINVAL;
3818 }
3819
3820
3821
3822
3823
3824 if (length != igeo->ialloc_blks &&
3825 length != igeo->ialloc_min_blks) {
3826 xfs_warn(log->l_mp,
3827 "%s: unsupported chunk length", __FUNCTION__);
3828 return -EINVAL;
3829 }
3830
3831
3832 if ((count >> mp->m_sb.sb_inopblog) != length) {
3833 xfs_warn(log->l_mp,
3834 "%s: inconsistent inode count and chunk length",
3835 __FUNCTION__);
3836 return -EINVAL;
3837 }
3838
3839
3840
3841
3842
3843
3844
3845 bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster);
3846 nbufs = length / igeo->blocks_per_cluster;
3847 for (i = 0, cancel_count = 0; i < nbufs; i++) {
3848 xfs_daddr_t daddr;
3849
3850 daddr = XFS_AGB_TO_DADDR(mp, agno,
3851 agbno + i * igeo->blocks_per_cluster);
3852 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3853 cancel_count++;
3854 }
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866 ASSERT(!cancel_count || cancel_count == nbufs);
3867 if (cancel_count) {
3868 if (cancel_count != nbufs)
3869 xfs_warn(mp,
3870 "WARNING: partial inode chunk cancellation, skipped icreate.");
3871 trace_xfs_log_recover_icreate_cancel(log, icl);
3872 return 0;
3873 }
3874
3875 trace_xfs_log_recover_icreate_recover(log, icl);
3876 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3877 length, be32_to_cpu(icl->icl_gen));
3878 }
3879
3880 STATIC void
3881 xlog_recover_buffer_ra_pass2(
3882 struct xlog *log,
3883 struct xlog_recover_item *item)
3884 {
3885 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3886 struct xfs_mount *mp = log->l_mp;
3887
3888 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3889 buf_f->blf_len, buf_f->blf_flags)) {
3890 return;
3891 }
3892
3893 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3894 buf_f->blf_len, NULL);
3895 }
3896
3897 STATIC void
3898 xlog_recover_inode_ra_pass2(
3899 struct xlog *log,
3900 struct xlog_recover_item *item)
3901 {
3902 struct xfs_inode_log_format ilf_buf;
3903 struct xfs_inode_log_format *ilfp;
3904 struct xfs_mount *mp = log->l_mp;
3905 int error;
3906
3907 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3908 ilfp = item->ri_buf[0].i_addr;
3909 } else {
3910 ilfp = &ilf_buf;
3911 memset(ilfp, 0, sizeof(*ilfp));
3912 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3913 if (error)
3914 return;
3915 }
3916
3917 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3918 return;
3919
3920 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3921 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3922 }
3923
3924 STATIC void
3925 xlog_recover_dquot_ra_pass2(
3926 struct xlog *log,
3927 struct xlog_recover_item *item)
3928 {
3929 struct xfs_mount *mp = log->l_mp;
3930 struct xfs_disk_dquot *recddq;
3931 struct xfs_dq_logformat *dq_f;
3932 uint type;
3933 int len;
3934
3935
3936 if (mp->m_qflags == 0)
3937 return;
3938
3939 recddq = item->ri_buf[1].i_addr;
3940 if (recddq == NULL)
3941 return;
3942 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3943 return;
3944
3945 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3946 ASSERT(type);
3947 if (log->l_quotaoffs_flag & type)
3948 return;
3949
3950 dq_f = item->ri_buf[0].i_addr;
3951 ASSERT(dq_f);
3952 ASSERT(dq_f->qlf_len == 1);
3953
3954 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
3955 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
3956 return;
3957
3958 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
3959 &xfs_dquot_buf_ra_ops);
3960 }
3961
3962 STATIC void
3963 xlog_recover_ra_pass2(
3964 struct xlog *log,
3965 struct xlog_recover_item *item)
3966 {
3967 switch (ITEM_TYPE(item)) {
3968 case XFS_LI_BUF:
3969 xlog_recover_buffer_ra_pass2(log, item);
3970 break;
3971 case XFS_LI_INODE:
3972 xlog_recover_inode_ra_pass2(log, item);
3973 break;
3974 case XFS_LI_DQUOT:
3975 xlog_recover_dquot_ra_pass2(log, item);
3976 break;
3977 case XFS_LI_EFI:
3978 case XFS_LI_EFD:
3979 case XFS_LI_QUOTAOFF:
3980 case XFS_LI_RUI:
3981 case XFS_LI_RUD:
3982 case XFS_LI_CUI:
3983 case XFS_LI_CUD:
3984 case XFS_LI_BUI:
3985 case XFS_LI_BUD:
3986 default:
3987 break;
3988 }
3989 }
3990
3991 STATIC int
3992 xlog_recover_commit_pass1(
3993 struct xlog *log,
3994 struct xlog_recover *trans,
3995 struct xlog_recover_item *item)
3996 {
3997 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3998
3999 switch (ITEM_TYPE(item)) {
4000 case XFS_LI_BUF:
4001 return xlog_recover_buffer_pass1(log, item);
4002 case XFS_LI_QUOTAOFF:
4003 return xlog_recover_quotaoff_pass1(log, item);
4004 case XFS_LI_INODE:
4005 case XFS_LI_EFI:
4006 case XFS_LI_EFD:
4007 case XFS_LI_DQUOT:
4008 case XFS_LI_ICREATE:
4009 case XFS_LI_RUI:
4010 case XFS_LI_RUD:
4011 case XFS_LI_CUI:
4012 case XFS_LI_CUD:
4013 case XFS_LI_BUI:
4014 case XFS_LI_BUD:
4015
4016 return 0;
4017 default:
4018 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4019 __func__, ITEM_TYPE(item));
4020 ASSERT(0);
4021 return -EIO;
4022 }
4023 }
4024
4025 STATIC int
4026 xlog_recover_commit_pass2(
4027 struct xlog *log,
4028 struct xlog_recover *trans,
4029 struct list_head *buffer_list,
4030 struct xlog_recover_item *item)
4031 {
4032 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4033
4034 switch (ITEM_TYPE(item)) {
4035 case XFS_LI_BUF:
4036 return xlog_recover_buffer_pass2(log, buffer_list, item,
4037 trans->r_lsn);
4038 case XFS_LI_INODE:
4039 return xlog_recover_inode_pass2(log, buffer_list, item,
4040 trans->r_lsn);
4041 case XFS_LI_EFI:
4042 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4043 case XFS_LI_EFD:
4044 return xlog_recover_efd_pass2(log, item);
4045 case XFS_LI_RUI:
4046 return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4047 case XFS_LI_RUD:
4048 return xlog_recover_rud_pass2(log, item);
4049 case XFS_LI_CUI:
4050 return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4051 case XFS_LI_CUD:
4052 return xlog_recover_cud_pass2(log, item);
4053 case XFS_LI_BUI:
4054 return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4055 case XFS_LI_BUD:
4056 return xlog_recover_bud_pass2(log, item);
4057 case XFS_LI_DQUOT:
4058 return xlog_recover_dquot_pass2(log, buffer_list, item,
4059 trans->r_lsn);
4060 case XFS_LI_ICREATE:
4061 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4062 case XFS_LI_QUOTAOFF:
4063
4064 return 0;
4065 default:
4066 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4067 __func__, ITEM_TYPE(item));
4068 ASSERT(0);
4069 return -EIO;
4070 }
4071 }
4072
4073 STATIC int
4074 xlog_recover_items_pass2(
4075 struct xlog *log,
4076 struct xlog_recover *trans,
4077 struct list_head *buffer_list,
4078 struct list_head *item_list)
4079 {
4080 struct xlog_recover_item *item;
4081 int error = 0;
4082
4083 list_for_each_entry(item, item_list, ri_list) {
4084 error = xlog_recover_commit_pass2(log, trans,
4085 buffer_list, item);
4086 if (error)
4087 return error;
4088 }
4089
4090 return error;
4091 }
4092
4093
4094
4095
4096
4097
4098
4099 STATIC int
4100 xlog_recover_commit_trans(
4101 struct xlog *log,
4102 struct xlog_recover *trans,
4103 int pass,
4104 struct list_head *buffer_list)
4105 {
4106 int error = 0;
4107 int items_queued = 0;
4108 struct xlog_recover_item *item;
4109 struct xlog_recover_item *next;
4110 LIST_HEAD (ra_list);
4111 LIST_HEAD (done_list);
4112
4113 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4114
4115 hlist_del_init(&trans->r_list);
4116
4117 error = xlog_recover_reorder_trans(log, trans, pass);
4118 if (error)
4119 return error;
4120
4121 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4122 switch (pass) {
4123 case XLOG_RECOVER_PASS1:
4124 error = xlog_recover_commit_pass1(log, trans, item);
4125 break;
4126 case XLOG_RECOVER_PASS2:
4127 xlog_recover_ra_pass2(log, item);
4128 list_move_tail(&item->ri_list, &ra_list);
4129 items_queued++;
4130 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4131 error = xlog_recover_items_pass2(log, trans,
4132 buffer_list, &ra_list);
4133 list_splice_tail_init(&ra_list, &done_list);
4134 items_queued = 0;
4135 }
4136
4137 break;
4138 default:
4139 ASSERT(0);
4140 }
4141
4142 if (error)
4143 goto out;
4144 }
4145
4146 out:
4147 if (!list_empty(&ra_list)) {
4148 if (!error)
4149 error = xlog_recover_items_pass2(log, trans,
4150 buffer_list, &ra_list);
4151 list_splice_tail_init(&ra_list, &done_list);
4152 }
4153
4154 if (!list_empty(&done_list))
4155 list_splice_init(&done_list, &trans->r_itemq);
4156
4157 return error;
4158 }
4159
4160 STATIC void
4161 xlog_recover_add_item(
4162 struct list_head *head)
4163 {
4164 xlog_recover_item_t *item;
4165
4166 item = kmem_zalloc(sizeof(xlog_recover_item_t), 0);
4167 INIT_LIST_HEAD(&item->ri_list);
4168 list_add_tail(&item->ri_list, head);
4169 }
4170
4171 STATIC int
4172 xlog_recover_add_to_cont_trans(
4173 struct xlog *log,
4174 struct xlog_recover *trans,
4175 char *dp,
4176 int len)
4177 {
4178 xlog_recover_item_t *item;
4179 char *ptr, *old_ptr;
4180 int old_len;
4181
4182
4183
4184
4185
4186 if (list_empty(&trans->r_itemq)) {
4187 ASSERT(len <= sizeof(struct xfs_trans_header));
4188 if (len > sizeof(struct xfs_trans_header)) {
4189 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4190 return -EIO;
4191 }
4192
4193 xlog_recover_add_item(&trans->r_itemq);
4194 ptr = (char *)&trans->r_theader +
4195 sizeof(struct xfs_trans_header) - len;
4196 memcpy(ptr, dp, len);
4197 return 0;
4198 }
4199
4200
4201 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4202
4203 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4204 old_len = item->ri_buf[item->ri_cnt-1].i_len;
4205
4206 ptr = kmem_realloc(old_ptr, len + old_len, 0);
4207 memcpy(&ptr[old_len], dp, len);
4208 item->ri_buf[item->ri_cnt-1].i_len += len;
4209 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4210 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4211 return 0;
4212 }
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227 STATIC int
4228 xlog_recover_add_to_trans(
4229 struct xlog *log,
4230 struct xlog_recover *trans,
4231 char *dp,
4232 int len)
4233 {
4234 struct xfs_inode_log_format *in_f;
4235 xlog_recover_item_t *item;
4236 char *ptr;
4237
4238 if (!len)
4239 return 0;
4240 if (list_empty(&trans->r_itemq)) {
4241
4242 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4243 xfs_warn(log->l_mp, "%s: bad header magic number",
4244 __func__);
4245 ASSERT(0);
4246 return -EIO;
4247 }
4248
4249 if (len > sizeof(struct xfs_trans_header)) {
4250 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4251 ASSERT(0);
4252 return -EIO;
4253 }
4254
4255
4256
4257
4258
4259
4260 if (len == sizeof(struct xfs_trans_header))
4261 xlog_recover_add_item(&trans->r_itemq);
4262 memcpy(&trans->r_theader, dp, len);
4263 return 0;
4264 }
4265
4266 ptr = kmem_alloc(len, 0);
4267 memcpy(ptr, dp, len);
4268 in_f = (struct xfs_inode_log_format *)ptr;
4269
4270
4271 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4272 if (item->ri_total != 0 &&
4273 item->ri_total == item->ri_cnt) {
4274
4275 xlog_recover_add_item(&trans->r_itemq);
4276 item = list_entry(trans->r_itemq.prev,
4277 xlog_recover_item_t, ri_list);
4278 }
4279
4280 if (item->ri_total == 0) {
4281 if (in_f->ilf_size == 0 ||
4282 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4283 xfs_warn(log->l_mp,
4284 "bad number of regions (%d) in inode log format",
4285 in_f->ilf_size);
4286 ASSERT(0);
4287 kmem_free(ptr);
4288 return -EIO;
4289 }
4290
4291 item->ri_total = in_f->ilf_size;
4292 item->ri_buf =
4293 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4294 0);
4295 }
4296 ASSERT(item->ri_total > item->ri_cnt);
4297
4298 item->ri_buf[item->ri_cnt].i_addr = ptr;
4299 item->ri_buf[item->ri_cnt].i_len = len;
4300 item->ri_cnt++;
4301 trace_xfs_log_recover_item_add(log, trans, item, 0);
4302 return 0;
4303 }
4304
4305
4306
4307
4308
4309
4310 STATIC void
4311 xlog_recover_free_trans(
4312 struct xlog_recover *trans)
4313 {
4314 xlog_recover_item_t *item, *n;
4315 int i;
4316
4317 hlist_del_init(&trans->r_list);
4318
4319 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4320
4321 list_del(&item->ri_list);
4322 for (i = 0; i < item->ri_cnt; i++)
4323 kmem_free(item->ri_buf[i].i_addr);
4324
4325 kmem_free(item->ri_buf);
4326 kmem_free(item);
4327 }
4328
4329 kmem_free(trans);
4330 }
4331
4332
4333
4334
4335 STATIC int
4336 xlog_recovery_process_trans(
4337 struct xlog *log,
4338 struct xlog_recover *trans,
4339 char *dp,
4340 unsigned int len,
4341 unsigned int flags,
4342 int pass,
4343 struct list_head *buffer_list)
4344 {
4345 int error = 0;
4346 bool freeit = false;
4347
4348
4349 flags &= ~XLOG_END_TRANS;
4350 if (flags & XLOG_WAS_CONT_TRANS)
4351 flags &= ~XLOG_CONTINUE_TRANS;
4352
4353
4354
4355
4356
4357 switch (flags) {
4358
4359 case 0:
4360 case XLOG_CONTINUE_TRANS:
4361 error = xlog_recover_add_to_trans(log, trans, dp, len);
4362 break;
4363 case XLOG_WAS_CONT_TRANS:
4364 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4365 break;
4366 case XLOG_COMMIT_TRANS:
4367 error = xlog_recover_commit_trans(log, trans, pass,
4368 buffer_list);
4369
4370 freeit = true;
4371 break;
4372
4373
4374 case XLOG_UNMOUNT_TRANS:
4375
4376 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4377 freeit = true;
4378 break;
4379 case XLOG_START_TRANS:
4380 default:
4381 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4382 ASSERT(0);
4383 error = -EIO;
4384 break;
4385 }
4386 if (error || freeit)
4387 xlog_recover_free_trans(trans);
4388 return error;
4389 }
4390
4391
4392
4393
4394
4395
4396
4397
4398 STATIC struct xlog_recover *
4399 xlog_recover_ophdr_to_trans(
4400 struct hlist_head rhash[],
4401 struct xlog_rec_header *rhead,
4402 struct xlog_op_header *ohead)
4403 {
4404 struct xlog_recover *trans;
4405 xlog_tid_t tid;
4406 struct hlist_head *rhp;
4407
4408 tid = be32_to_cpu(ohead->oh_tid);
4409 rhp = &rhash[XLOG_RHASH(tid)];
4410 hlist_for_each_entry(trans, rhp, r_list) {
4411 if (trans->r_log_tid == tid)
4412 return trans;
4413 }
4414
4415
4416
4417
4418
4419 if (!(ohead->oh_flags & XLOG_START_TRANS))
4420 return NULL;
4421
4422 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4423
4424
4425
4426
4427
4428 trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
4429 trans->r_log_tid = tid;
4430 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4431 INIT_LIST_HEAD(&trans->r_itemq);
4432 INIT_HLIST_NODE(&trans->r_list);
4433 hlist_add_head(&trans->r_list, rhp);
4434
4435
4436
4437
4438
4439 return NULL;
4440 }
4441
4442 STATIC int
4443 xlog_recover_process_ophdr(
4444 struct xlog *log,
4445 struct hlist_head rhash[],
4446 struct xlog_rec_header *rhead,
4447 struct xlog_op_header *ohead,
4448 char *dp,
4449 char *end,
4450 int pass,
4451 struct list_head *buffer_list)
4452 {
4453 struct xlog_recover *trans;
4454 unsigned int len;
4455 int error;
4456
4457
4458 if (ohead->oh_clientid != XFS_TRANSACTION &&
4459 ohead->oh_clientid != XFS_LOG) {
4460 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4461 __func__, ohead->oh_clientid);
4462 ASSERT(0);
4463 return -EIO;
4464 }
4465
4466
4467
4468
4469 len = be32_to_cpu(ohead->oh_len);
4470 if (dp + len > end) {
4471 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4472 WARN_ON(1);
4473 return -EIO;
4474 }
4475
4476 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4477 if (!trans) {
4478
4479 return 0;
4480 }
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505 if (log->l_recovery_lsn != trans->r_lsn &&
4506 ohead->oh_flags & XLOG_COMMIT_TRANS) {
4507 error = xfs_buf_delwri_submit(buffer_list);
4508 if (error)
4509 return error;
4510 log->l_recovery_lsn = trans->r_lsn;
4511 }
4512
4513 return xlog_recovery_process_trans(log, trans, dp, len,
4514 ohead->oh_flags, pass, buffer_list);
4515 }
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526 STATIC int
4527 xlog_recover_process_data(
4528 struct xlog *log,
4529 struct hlist_head rhash[],
4530 struct xlog_rec_header *rhead,
4531 char *dp,
4532 int pass,
4533 struct list_head *buffer_list)
4534 {
4535 struct xlog_op_header *ohead;
4536 char *end;
4537 int num_logops;
4538 int error;
4539
4540 end = dp + be32_to_cpu(rhead->h_len);
4541 num_logops = be32_to_cpu(rhead->h_num_logops);
4542
4543
4544 if (xlog_header_check_recover(log->l_mp, rhead))
4545 return -EIO;
4546
4547 trace_xfs_log_recover_record(log, rhead, pass);
4548 while ((dp < end) && num_logops) {
4549
4550 ohead = (struct xlog_op_header *)dp;
4551 dp += sizeof(*ohead);
4552 ASSERT(dp <= end);
4553
4554
4555 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4556 dp, end, pass, buffer_list);
4557 if (error)
4558 return error;
4559
4560 dp += be32_to_cpu(ohead->oh_len);
4561 num_logops--;
4562 }
4563 return 0;
4564 }
4565
4566
4567 STATIC int
4568 xlog_recover_process_efi(
4569 struct xfs_mount *mp,
4570 struct xfs_ail *ailp,
4571 struct xfs_log_item *lip)
4572 {
4573 struct xfs_efi_log_item *efip;
4574 int error;
4575
4576
4577
4578
4579 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4580 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4581 return 0;
4582
4583 spin_unlock(&ailp->ail_lock);
4584 error = xfs_efi_recover(mp, efip);
4585 spin_lock(&ailp->ail_lock);
4586
4587 return error;
4588 }
4589
4590
4591 STATIC void
4592 xlog_recover_cancel_efi(
4593 struct xfs_mount *mp,
4594 struct xfs_ail *ailp,
4595 struct xfs_log_item *lip)
4596 {
4597 struct xfs_efi_log_item *efip;
4598
4599 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4600
4601 spin_unlock(&ailp->ail_lock);
4602 xfs_efi_release(efip);
4603 spin_lock(&ailp->ail_lock);
4604 }
4605
4606
4607 STATIC int
4608 xlog_recover_process_rui(
4609 struct xfs_mount *mp,
4610 struct xfs_ail *ailp,
4611 struct xfs_log_item *lip)
4612 {
4613 struct xfs_rui_log_item *ruip;
4614 int error;
4615
4616
4617
4618
4619 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4620 if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4621 return 0;
4622
4623 spin_unlock(&ailp->ail_lock);
4624 error = xfs_rui_recover(mp, ruip);
4625 spin_lock(&ailp->ail_lock);
4626
4627 return error;
4628 }
4629
4630
4631 STATIC void
4632 xlog_recover_cancel_rui(
4633 struct xfs_mount *mp,
4634 struct xfs_ail *ailp,
4635 struct xfs_log_item *lip)
4636 {
4637 struct xfs_rui_log_item *ruip;
4638
4639 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4640
4641 spin_unlock(&ailp->ail_lock);
4642 xfs_rui_release(ruip);
4643 spin_lock(&ailp->ail_lock);
4644 }
4645
4646
4647 STATIC int
4648 xlog_recover_process_cui(
4649 struct xfs_trans *parent_tp,
4650 struct xfs_ail *ailp,
4651 struct xfs_log_item *lip)
4652 {
4653 struct xfs_cui_log_item *cuip;
4654 int error;
4655
4656
4657
4658
4659 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4660 if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4661 return 0;
4662
4663 spin_unlock(&ailp->ail_lock);
4664 error = xfs_cui_recover(parent_tp, cuip);
4665 spin_lock(&ailp->ail_lock);
4666
4667 return error;
4668 }
4669
4670
4671 STATIC void
4672 xlog_recover_cancel_cui(
4673 struct xfs_mount *mp,
4674 struct xfs_ail *ailp,
4675 struct xfs_log_item *lip)
4676 {
4677 struct xfs_cui_log_item *cuip;
4678
4679 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4680
4681 spin_unlock(&ailp->ail_lock);
4682 xfs_cui_release(cuip);
4683 spin_lock(&ailp->ail_lock);
4684 }
4685
4686
4687 STATIC int
4688 xlog_recover_process_bui(
4689 struct xfs_trans *parent_tp,
4690 struct xfs_ail *ailp,
4691 struct xfs_log_item *lip)
4692 {
4693 struct xfs_bui_log_item *buip;
4694 int error;
4695
4696
4697
4698
4699 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4700 if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4701 return 0;
4702
4703 spin_unlock(&ailp->ail_lock);
4704 error = xfs_bui_recover(parent_tp, buip);
4705 spin_lock(&ailp->ail_lock);
4706
4707 return error;
4708 }
4709
4710
4711 STATIC void
4712 xlog_recover_cancel_bui(
4713 struct xfs_mount *mp,
4714 struct xfs_ail *ailp,
4715 struct xfs_log_item *lip)
4716 {
4717 struct xfs_bui_log_item *buip;
4718
4719 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4720
4721 spin_unlock(&ailp->ail_lock);
4722 xfs_bui_release(buip);
4723 spin_lock(&ailp->ail_lock);
4724 }
4725
4726
4727 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4728 {
4729 switch (lip->li_type) {
4730 case XFS_LI_EFI:
4731 case XFS_LI_RUI:
4732 case XFS_LI_CUI:
4733 case XFS_LI_BUI:
4734 return true;
4735 default:
4736 return false;
4737 }
4738 }
4739
4740
4741 static int
4742 xlog_finish_defer_ops(
4743 struct xfs_trans *parent_tp)
4744 {
4745 struct xfs_mount *mp = parent_tp->t_mountp;
4746 struct xfs_trans *tp;
4747 int64_t freeblks;
4748 uint resblks;
4749 int error;
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759 freeblks = percpu_counter_sum(&mp->m_fdblocks);
4760 if (freeblks <= 0)
4761 return -ENOSPC;
4762 resblks = min_t(int64_t, UINT_MAX, freeblks);
4763 resblks = (resblks * 15) >> 4;
4764 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4765 0, XFS_TRANS_RESERVE, &tp);
4766 if (error)
4767 return error;
4768
4769 xfs_defer_move(tp, parent_tp);
4770
4771 return xfs_trans_commit(tp);
4772 }
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790 STATIC int
4791 xlog_recover_process_intents(
4792 struct xlog *log)
4793 {
4794 struct xfs_trans *parent_tp;
4795 struct xfs_ail_cursor cur;
4796 struct xfs_log_item *lip;
4797 struct xfs_ail *ailp;
4798 int error;
4799 #if defined(DEBUG) || defined(XFS_WARN)
4800 xfs_lsn_t last_lsn;
4801 #endif
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812 error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
4813 if (error)
4814 return error;
4815
4816 ailp = log->l_ailp;
4817 spin_lock(&ailp->ail_lock);
4818 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4819 #if defined(DEBUG) || defined(XFS_WARN)
4820 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4821 #endif
4822 while (lip != NULL) {
4823
4824
4825
4826
4827 if (!xlog_item_is_intent(lip)) {
4828 #ifdef DEBUG
4829 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4830 ASSERT(!xlog_item_is_intent(lip));
4831 #endif
4832 break;
4833 }
4834
4835
4836
4837
4838
4839
4840 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4841
4842
4843
4844
4845
4846
4847
4848 switch (lip->li_type) {
4849 case XFS_LI_EFI:
4850 error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4851 break;
4852 case XFS_LI_RUI:
4853 error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4854 break;
4855 case XFS_LI_CUI:
4856 error = xlog_recover_process_cui(parent_tp, ailp, lip);
4857 break;
4858 case XFS_LI_BUI:
4859 error = xlog_recover_process_bui(parent_tp, ailp, lip);
4860 break;
4861 }
4862 if (error)
4863 goto out;
4864 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4865 }
4866 out:
4867 xfs_trans_ail_cursor_done(&cur);
4868 spin_unlock(&ailp->ail_lock);
4869 if (!error)
4870 error = xlog_finish_defer_ops(parent_tp);
4871 xfs_trans_cancel(parent_tp);
4872
4873 return error;
4874 }
4875
4876
4877
4878
4879
4880 STATIC void
4881 xlog_recover_cancel_intents(
4882 struct xlog *log)
4883 {
4884 struct xfs_log_item *lip;
4885 struct xfs_ail_cursor cur;
4886 struct xfs_ail *ailp;
4887
4888 ailp = log->l_ailp;
4889 spin_lock(&ailp->ail_lock);
4890 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4891 while (lip != NULL) {
4892
4893
4894
4895
4896 if (!xlog_item_is_intent(lip)) {
4897 #ifdef DEBUG
4898 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4899 ASSERT(!xlog_item_is_intent(lip));
4900 #endif
4901 break;
4902 }
4903
4904 switch (lip->li_type) {
4905 case XFS_LI_EFI:
4906 xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4907 break;
4908 case XFS_LI_RUI:
4909 xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4910 break;
4911 case XFS_LI_CUI:
4912 xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4913 break;
4914 case XFS_LI_BUI:
4915 xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4916 break;
4917 }
4918
4919 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4920 }
4921
4922 xfs_trans_ail_cursor_done(&cur);
4923 spin_unlock(&ailp->ail_lock);
4924 }
4925
4926
4927
4928
4929
4930 STATIC void
4931 xlog_recover_clear_agi_bucket(
4932 xfs_mount_t *mp,
4933 xfs_agnumber_t agno,
4934 int bucket)
4935 {
4936 xfs_trans_t *tp;
4937 xfs_agi_t *agi;
4938 xfs_buf_t *agibp;
4939 int offset;
4940 int error;
4941
4942 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
4943 if (error)
4944 goto out_error;
4945
4946 error = xfs_read_agi(mp, tp, agno, &agibp);
4947 if (error)
4948 goto out_abort;
4949
4950 agi = XFS_BUF_TO_AGI(agibp);
4951 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4952 offset = offsetof(xfs_agi_t, agi_unlinked) +
4953 (sizeof(xfs_agino_t) * bucket);
4954 xfs_trans_log_buf(tp, agibp, offset,
4955 (offset + sizeof(xfs_agino_t) - 1));
4956
4957 error = xfs_trans_commit(tp);
4958 if (error)
4959 goto out_error;
4960 return;
4961
4962 out_abort:
4963 xfs_trans_cancel(tp);
4964 out_error:
4965 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4966 return;
4967 }
4968
4969 STATIC xfs_agino_t
4970 xlog_recover_process_one_iunlink(
4971 struct xfs_mount *mp,
4972 xfs_agnumber_t agno,
4973 xfs_agino_t agino,
4974 int bucket)
4975 {
4976 struct xfs_buf *ibp;
4977 struct xfs_dinode *dip;
4978 struct xfs_inode *ip;
4979 xfs_ino_t ino;
4980 int error;
4981
4982 ino = XFS_AGINO_TO_INO(mp, agno, agino);
4983 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4984 if (error)
4985 goto fail;
4986
4987
4988
4989
4990 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4991 if (error)
4992 goto fail_iput;
4993
4994 xfs_iflags_clear(ip, XFS_IRECOVERY);
4995 ASSERT(VFS_I(ip)->i_nlink == 0);
4996 ASSERT(VFS_I(ip)->i_mode != 0);
4997
4998
4999 agino = be32_to_cpu(dip->di_next_unlinked);
5000 xfs_buf_relse(ibp);
5001
5002
5003
5004
5005
5006 ip->i_d.di_dmevmask = 0;
5007
5008 xfs_irele(ip);
5009 return agino;
5010
5011 fail_iput:
5012 xfs_irele(ip);
5013 fail:
5014
5015
5016
5017
5018
5019
5020
5021
5022 xlog_recover_clear_agi_bucket(mp, agno, bucket);
5023 return NULLAGINO;
5024 }
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049 STATIC void
5050 xlog_recover_process_iunlinks(
5051 struct xlog *log)
5052 {
5053 xfs_mount_t *mp;
5054 xfs_agnumber_t agno;
5055 xfs_agi_t *agi;
5056 xfs_buf_t *agibp;
5057 xfs_agino_t agino;
5058 int bucket;
5059 int error;
5060
5061 mp = log->l_mp;
5062
5063 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5064
5065
5066
5067 error = xfs_read_agi(mp, NULL, agno, &agibp);
5068 if (error) {
5069
5070
5071
5072
5073
5074
5075 continue;
5076 }
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086 agi = XFS_BUF_TO_AGI(agibp);
5087 xfs_buf_unlock(agibp);
5088
5089 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5090 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5091 while (agino != NULLAGINO) {
5092 agino = xlog_recover_process_one_iunlink(mp,
5093 agno, agino, bucket);
5094 cond_resched();
5095 }
5096 }
5097 xfs_buf_rele(agibp);
5098 }
5099 }
5100
5101 STATIC void
5102 xlog_unpack_data(
5103 struct xlog_rec_header *rhead,
5104 char *dp,
5105 struct xlog *log)
5106 {
5107 int i, j, k;
5108
5109 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5110 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5111 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5112 dp += BBSIZE;
5113 }
5114
5115 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5116 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5117 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5118 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5119 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5120 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5121 dp += BBSIZE;
5122 }
5123 }
5124 }
5125
5126
5127
5128
5129 STATIC int
5130 xlog_recover_process(
5131 struct xlog *log,
5132 struct hlist_head rhash[],
5133 struct xlog_rec_header *rhead,
5134 char *dp,
5135 int pass,
5136 struct list_head *buffer_list)
5137 {
5138 __le32 old_crc = rhead->h_crc;
5139 __le32 crc;
5140
5141 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5142
5143
5144
5145
5146
5147
5148
5149
5150 if (pass == XLOG_RECOVER_CRCPASS) {
5151 if (old_crc && crc != old_crc)
5152 return -EFSBADCRC;
5153 return 0;
5154 }
5155
5156
5157
5158
5159
5160
5161
5162 if (crc != old_crc) {
5163 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5164 xfs_alert(log->l_mp,
5165 "log record CRC mismatch: found 0x%x, expected 0x%x.",
5166 le32_to_cpu(old_crc),
5167 le32_to_cpu(crc));
5168 xfs_hex_dump(dp, 32);
5169 }
5170
5171
5172
5173
5174
5175 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5176 return -EFSCORRUPTED;
5177 }
5178
5179 xlog_unpack_data(rhead, dp, log);
5180
5181 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5182 buffer_list);
5183 }
5184
5185 STATIC int
5186 xlog_valid_rec_header(
5187 struct xlog *log,
5188 struct xlog_rec_header *rhead,
5189 xfs_daddr_t blkno)
5190 {
5191 int hlen;
5192
5193 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5194 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5195 XFS_ERRLEVEL_LOW, log->l_mp);
5196 return -EFSCORRUPTED;
5197 }
5198 if (unlikely(
5199 (!rhead->h_version ||
5200 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5201 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5202 __func__, be32_to_cpu(rhead->h_version));
5203 return -EIO;
5204 }
5205
5206
5207 hlen = be32_to_cpu(rhead->h_len);
5208 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5209 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5210 XFS_ERRLEVEL_LOW, log->l_mp);
5211 return -EFSCORRUPTED;
5212 }
5213 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5214 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5215 XFS_ERRLEVEL_LOW, log->l_mp);
5216 return -EFSCORRUPTED;
5217 }
5218 return 0;
5219 }
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229 STATIC int
5230 xlog_do_recovery_pass(
5231 struct xlog *log,
5232 xfs_daddr_t head_blk,
5233 xfs_daddr_t tail_blk,
5234 int pass,
5235 xfs_daddr_t *first_bad)
5236 {
5237 xlog_rec_header_t *rhead;
5238 xfs_daddr_t blk_no, rblk_no;
5239 xfs_daddr_t rhead_blk;
5240 char *offset;
5241 char *hbp, *dbp;
5242 int error = 0, h_size, h_len;
5243 int error2 = 0;
5244 int bblks, split_bblks;
5245 int hblks, split_hblks, wrapped_hblks;
5246 int i;
5247 struct hlist_head rhash[XLOG_RHASH_SIZE];
5248 LIST_HEAD (buffer_list);
5249
5250 ASSERT(head_blk != tail_blk);
5251 blk_no = rhead_blk = tail_blk;
5252
5253 for (i = 0; i < XLOG_RHASH_SIZE; i++)
5254 INIT_HLIST_HEAD(&rhash[i]);
5255
5256
5257
5258
5259
5260 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5261
5262
5263
5264
5265
5266 hbp = xlog_alloc_buffer(log, 1);
5267 if (!hbp)
5268 return -ENOMEM;
5269
5270 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5271 if (error)
5272 goto bread_err1;
5273
5274 rhead = (xlog_rec_header_t *)offset;
5275 error = xlog_valid_rec_header(log, rhead, tail_blk);
5276 if (error)
5277 goto bread_err1;
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290 h_size = be32_to_cpu(rhead->h_size);
5291 h_len = be32_to_cpu(rhead->h_len);
5292 if (h_len > h_size) {
5293 if (h_len <= log->l_mp->m_logbsize &&
5294 be32_to_cpu(rhead->h_num_logops) == 1) {
5295 xfs_warn(log->l_mp,
5296 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
5297 h_size, log->l_mp->m_logbsize);
5298 h_size = log->l_mp->m_logbsize;
5299 } else
5300 return -EFSCORRUPTED;
5301 }
5302
5303 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5304 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5305 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5306 if (h_size % XLOG_HEADER_CYCLE_SIZE)
5307 hblks++;
5308 kmem_free(hbp);
5309 hbp = xlog_alloc_buffer(log, hblks);
5310 } else {
5311 hblks = 1;
5312 }
5313 } else {
5314 ASSERT(log->l_sectBBsize == 1);
5315 hblks = 1;
5316 hbp = xlog_alloc_buffer(log, 1);
5317 h_size = XLOG_BIG_RECORD_BSIZE;
5318 }
5319
5320 if (!hbp)
5321 return -ENOMEM;
5322 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
5323 if (!dbp) {
5324 kmem_free(hbp);
5325 return -ENOMEM;
5326 }
5327
5328 memset(rhash, 0, sizeof(rhash));
5329 if (tail_blk > head_blk) {
5330
5331
5332
5333
5334
5335 while (blk_no < log->l_logBBsize) {
5336
5337
5338
5339 offset = hbp;
5340 split_hblks = 0;
5341 wrapped_hblks = 0;
5342 if (blk_no + hblks <= log->l_logBBsize) {
5343
5344 error = xlog_bread(log, blk_no, hblks, hbp,
5345 &offset);
5346 if (error)
5347 goto bread_err2;
5348 } else {
5349
5350 if (blk_no != log->l_logBBsize) {
5351
5352 ASSERT(blk_no <= INT_MAX);
5353 split_hblks = log->l_logBBsize - (int)blk_no;
5354 ASSERT(split_hblks > 0);
5355 error = xlog_bread(log, blk_no,
5356 split_hblks, hbp,
5357 &offset);
5358 if (error)
5359 goto bread_err2;
5360 }
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374 wrapped_hblks = hblks - split_hblks;
5375 error = xlog_bread_noalign(log, 0,
5376 wrapped_hblks,
5377 offset + BBTOB(split_hblks));
5378 if (error)
5379 goto bread_err2;
5380 }
5381 rhead = (xlog_rec_header_t *)offset;
5382 error = xlog_valid_rec_header(log, rhead,
5383 split_hblks ? blk_no : 0);
5384 if (error)
5385 goto bread_err2;
5386
5387 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5388 blk_no += hblks;
5389
5390
5391
5392
5393
5394
5395
5396
5397 if (blk_no + bblks <= log->l_logBBsize ||
5398 blk_no >= log->l_logBBsize) {
5399 rblk_no = xlog_wrap_logbno(log, blk_no);
5400 error = xlog_bread(log, rblk_no, bblks, dbp,
5401 &offset);
5402 if (error)
5403 goto bread_err2;
5404 } else {
5405
5406
5407 offset = dbp;
5408 split_bblks = 0;
5409 if (blk_no != log->l_logBBsize) {
5410
5411
5412 ASSERT(!wrapped_hblks);
5413 ASSERT(blk_no <= INT_MAX);
5414 split_bblks =
5415 log->l_logBBsize - (int)blk_no;
5416 ASSERT(split_bblks > 0);
5417 error = xlog_bread(log, blk_no,
5418 split_bblks, dbp,
5419 &offset);
5420 if (error)
5421 goto bread_err2;
5422 }
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436 error = xlog_bread_noalign(log, 0,
5437 bblks - split_bblks,
5438 offset + BBTOB(split_bblks));
5439 if (error)
5440 goto bread_err2;
5441 }
5442
5443 error = xlog_recover_process(log, rhash, rhead, offset,
5444 pass, &buffer_list);
5445 if (error)
5446 goto bread_err2;
5447
5448 blk_no += bblks;
5449 rhead_blk = blk_no;
5450 }
5451
5452 ASSERT(blk_no >= log->l_logBBsize);
5453 blk_no -= log->l_logBBsize;
5454 rhead_blk = blk_no;
5455 }
5456
5457
5458 while (blk_no < head_blk) {
5459 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5460 if (error)
5461 goto bread_err2;
5462
5463 rhead = (xlog_rec_header_t *)offset;
5464 error = xlog_valid_rec_header(log, rhead, blk_no);
5465 if (error)
5466 goto bread_err2;
5467
5468
5469 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5470 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5471 &offset);
5472 if (error)
5473 goto bread_err2;
5474
5475 error = xlog_recover_process(log, rhash, rhead, offset, pass,
5476 &buffer_list);
5477 if (error)
5478 goto bread_err2;
5479
5480 blk_no += bblks + hblks;
5481 rhead_blk = blk_no;
5482 }
5483
5484 bread_err2:
5485 kmem_free(dbp);
5486 bread_err1:
5487 kmem_free(hbp);
5488
5489
5490
5491
5492
5493 if (!list_empty(&buffer_list))
5494 error2 = xfs_buf_delwri_submit(&buffer_list);
5495
5496 if (error && first_bad)
5497 *first_bad = rhead_blk;
5498
5499
5500
5501
5502
5503
5504 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5505 struct hlist_node *tmp;
5506 struct xlog_recover *trans;
5507
5508 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5509 xlog_recover_free_trans(trans);
5510 }
5511
5512 return error ? error : error2;
5513 }
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528 STATIC int
5529 xlog_do_log_recovery(
5530 struct xlog *log,
5531 xfs_daddr_t head_blk,
5532 xfs_daddr_t tail_blk)
5533 {
5534 int error, i;
5535
5536 ASSERT(head_blk != tail_blk);
5537
5538
5539
5540
5541
5542 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5543 sizeof(struct list_head),
5544 0);
5545 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5546 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5547
5548 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5549 XLOG_RECOVER_PASS1, NULL);
5550 if (error != 0) {
5551 kmem_free(log->l_buf_cancel_table);
5552 log->l_buf_cancel_table = NULL;
5553 return error;
5554 }
5555
5556
5557
5558
5559 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5560 XLOG_RECOVER_PASS2, NULL);
5561 #ifdef DEBUG
5562 if (!error) {
5563 int i;
5564
5565 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5566 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5567 }
5568 #endif
5569
5570 kmem_free(log->l_buf_cancel_table);
5571 log->l_buf_cancel_table = NULL;
5572
5573 return error;
5574 }
5575
5576
5577
5578
5579 STATIC int
5580 xlog_do_recover(
5581 struct xlog *log,
5582 xfs_daddr_t head_blk,
5583 xfs_daddr_t tail_blk)
5584 {
5585 struct xfs_mount *mp = log->l_mp;
5586 int error;
5587 xfs_buf_t *bp;
5588 xfs_sb_t *sbp;
5589
5590 trace_xfs_log_recover(log, head_blk, tail_blk);
5591
5592
5593
5594
5595 error = xlog_do_log_recovery(log, head_blk, tail_blk);
5596 if (error)
5597 return error;
5598
5599
5600
5601
5602 if (XFS_FORCED_SHUTDOWN(mp)) {
5603 return -EIO;
5604 }
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615 xlog_assign_tail_lsn(mp);
5616
5617
5618
5619
5620
5621 bp = xfs_getsb(mp);
5622 bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5623 ASSERT(!(bp->b_flags & XBF_WRITE));
5624 bp->b_flags |= XBF_READ;
5625 bp->b_ops = &xfs_sb_buf_ops;
5626
5627 error = xfs_buf_submit(bp);
5628 if (error) {
5629 if (!XFS_FORCED_SHUTDOWN(mp)) {
5630 xfs_buf_ioerror_alert(bp, __func__);
5631 ASSERT(0);
5632 }
5633 xfs_buf_relse(bp);
5634 return error;
5635 }
5636
5637
5638 sbp = &mp->m_sb;
5639 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5640 xfs_buf_relse(bp);
5641
5642
5643 xfs_reinit_percpu_counters(mp);
5644 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5645 if (error) {
5646 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5647 return error;
5648 }
5649 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5650
5651 xlog_recover_check_summary(log);
5652
5653
5654 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5655 return 0;
5656 }
5657
5658
5659
5660
5661
5662
5663 int
5664 xlog_recover(
5665 struct xlog *log)
5666 {
5667 xfs_daddr_t head_blk, tail_blk;
5668 int error;
5669
5670
5671 error = xlog_find_tail(log, &head_blk, &tail_blk);
5672 if (error)
5673 return error;
5674
5675
5676
5677
5678
5679
5680 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5681 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5682 return -EINVAL;
5683
5684 if (tail_blk != head_blk) {
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5697 return error;
5698 }
5699
5700
5701
5702
5703
5704
5705
5706
5707 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5708 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5709 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5710 xfs_warn(log->l_mp,
5711 "Superblock has unknown incompatible log features (0x%x) enabled.",
5712 (log->l_mp->m_sb.sb_features_log_incompat &
5713 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5714 xfs_warn(log->l_mp,
5715 "The log can not be fully and/or safely recovered by this kernel.");
5716 xfs_warn(log->l_mp,
5717 "Please recover the log on a kernel that supports the unknown features.");
5718 return -EINVAL;
5719 }
5720
5721
5722
5723
5724
5725
5726 if (xfs_globals.log_recovery_delay) {
5727 xfs_notice(log->l_mp,
5728 "Delaying log recovery for %d seconds.",
5729 xfs_globals.log_recovery_delay);
5730 msleep(xfs_globals.log_recovery_delay * 1000);
5731 }
5732
5733 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5734 log->l_mp->m_logname ? log->l_mp->m_logname
5735 : "internal");
5736
5737 error = xlog_do_recover(log, head_blk, tail_blk);
5738 log->l_flags |= XLOG_RECOVERY_NEEDED;
5739 }
5740 return error;
5741 }
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752 int
5753 xlog_recover_finish(
5754 struct xlog *log)
5755 {
5756
5757
5758
5759
5760
5761
5762
5763
5764 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5765 int error;
5766 error = xlog_recover_process_intents(log);
5767 if (error) {
5768 xfs_alert(log->l_mp, "Failed to recover intents");
5769 return error;
5770 }
5771
5772
5773
5774
5775
5776
5777
5778 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5779
5780 xlog_recover_process_iunlinks(log);
5781
5782 xlog_recover_check_summary(log);
5783
5784 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5785 log->l_mp->m_logname ? log->l_mp->m_logname
5786 : "internal");
5787 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5788 } else {
5789 xfs_info(log->l_mp, "Ending clean mount");
5790 }
5791 return 0;
5792 }
5793
5794 void
5795 xlog_recover_cancel(
5796 struct xlog *log)
5797 {
5798 if (log->l_flags & XLOG_RECOVERY_NEEDED)
5799 xlog_recover_cancel_intents(log);
5800 }
5801
5802 #if defined(DEBUG)
5803
5804
5805
5806
5807 STATIC void
5808 xlog_recover_check_summary(
5809 struct xlog *log)
5810 {
5811 xfs_mount_t *mp;
5812 xfs_agf_t *agfp;
5813 xfs_buf_t *agfbp;
5814 xfs_buf_t *agibp;
5815 xfs_agnumber_t agno;
5816 uint64_t freeblks;
5817 uint64_t itotal;
5818 uint64_t ifree;
5819 int error;
5820
5821 mp = log->l_mp;
5822
5823 freeblks = 0LL;
5824 itotal = 0LL;
5825 ifree = 0LL;
5826 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5827 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5828 if (error) {
5829 xfs_alert(mp, "%s agf read failed agno %d error %d",
5830 __func__, agno, error);
5831 } else {
5832 agfp = XFS_BUF_TO_AGF(agfbp);
5833 freeblks += be32_to_cpu(agfp->agf_freeblks) +
5834 be32_to_cpu(agfp->agf_flcount);
5835 xfs_buf_relse(agfbp);
5836 }
5837
5838 error = xfs_read_agi(mp, NULL, agno, &agibp);
5839 if (error) {
5840 xfs_alert(mp, "%s agi read failed agno %d error %d",
5841 __func__, agno, error);
5842 } else {
5843 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
5844
5845 itotal += be32_to_cpu(agi->agi_count);
5846 ifree += be32_to_cpu(agi->agi_freecount);
5847 xfs_buf_relse(agibp);
5848 }
5849 }
5850 }
5851 #endif