This source file includes following definitions.
- xlog_cil_ticket_alloc
- xlog_cil_init_post_recovery
- xlog_cil_iovec_space
- xlog_cil_alloc_shadow_bufs
- xfs_cil_prepare_item
- xlog_cil_insert_format_items
- xlog_cil_insert_items
- xlog_cil_free_logvec
- xlog_discard_endio_work
- xlog_discard_endio
- xlog_discard_busy_extents
- xlog_cil_committed
- xlog_cil_process_committed
- xlog_cil_push
- xlog_cil_push_work
- xlog_cil_push_background
- xlog_cil_push_now
- xlog_cil_empty
- xfs_log_commit_cil
- xlog_cil_force_lsn
- xfs_log_item_in_current_chkpt
- xlog_cil_init
- xlog_cil_destroy
1
2
3
4
5
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_log.h"
17 #include "xfs_log_priv.h"
18 #include "xfs_trace.h"
19
20 struct workqueue_struct *xfs_discard_wq;
21
22
23
24
25
26
27
28
29
30
31
32
33
34 static struct xlog_ticket *
35 xlog_cil_ticket_alloc(
36 struct xlog *log)
37 {
38 struct xlog_ticket *tic;
39
40 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
41 KM_NOFS);
42
43
44
45
46
47 tic->t_curr_res = 0;
48 return tic;
49 }
50
51
52
53
54
55
56
57
58
59
60
61 void
62 xlog_cil_init_post_recovery(
63 struct xlog *log)
64 {
65 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
66 log->l_cilp->xc_ctx->sequence = 1;
67 }
68
69 static inline int
70 xlog_cil_iovec_space(
71 uint niovecs)
72 {
73 return round_up((sizeof(struct xfs_log_vec) +
74 niovecs * sizeof(struct xfs_log_iovec)),
75 sizeof(uint64_t));
76 }
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124 static void
125 xlog_cil_alloc_shadow_bufs(
126 struct xlog *log,
127 struct xfs_trans *tp)
128 {
129 struct xfs_log_item *lip;
130
131 list_for_each_entry(lip, &tp->t_items, li_trans) {
132 struct xfs_log_vec *lv;
133 int niovecs = 0;
134 int nbytes = 0;
135 int buf_size;
136 bool ordered = false;
137
138
139 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
140 continue;
141
142
143 lip->li_ops->iop_size(lip, &niovecs, &nbytes);
144
145
146
147
148
149
150 if (niovecs == XFS_LOG_VEC_ORDERED) {
151 ordered = true;
152 niovecs = 0;
153 nbytes = 0;
154 }
155
156
157
158
159
160
161
162
163 nbytes += niovecs * sizeof(uint64_t);
164 nbytes = round_up(nbytes, sizeof(uint64_t));
165
166
167
168
169
170
171 buf_size = nbytes + xlog_cil_iovec_space(niovecs);
172
173
174
175
176
177 if (!lip->li_lv_shadow ||
178 buf_size > lip->li_lv_shadow->lv_size) {
179
180
181
182
183
184
185
186
187 kmem_free(lip->li_lv_shadow);
188
189 lv = kmem_alloc_large(buf_size, KM_NOFS);
190 memset(lv, 0, xlog_cil_iovec_space(niovecs));
191
192 lv->lv_item = lip;
193 lv->lv_size = buf_size;
194 if (ordered)
195 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
196 else
197 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
198 lip->li_lv_shadow = lv;
199 } else {
200
201 lv = lip->li_lv_shadow;
202 if (ordered)
203 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
204 else
205 lv->lv_buf_len = 0;
206 lv->lv_bytes = 0;
207 lv->lv_next = NULL;
208 }
209
210
211 lv->lv_niovecs = niovecs;
212
213
214 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
215 }
216
217 }
218
219
220
221
222
223
224 STATIC void
225 xfs_cil_prepare_item(
226 struct xlog *log,
227 struct xfs_log_vec *lv,
228 struct xfs_log_vec *old_lv,
229 int *diff_len,
230 int *diff_iovecs)
231 {
232
233 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
234 *diff_len += lv->lv_bytes;
235 *diff_iovecs += lv->lv_niovecs;
236 }
237
238
239
240
241
242
243
244
245 if (!old_lv) {
246 if (lv->lv_item->li_ops->iop_pin)
247 lv->lv_item->li_ops->iop_pin(lv->lv_item);
248 lv->lv_item->li_lv_shadow = NULL;
249 } else if (old_lv != lv) {
250 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
251
252 *diff_len -= old_lv->lv_bytes;
253 *diff_iovecs -= old_lv->lv_niovecs;
254 lv->lv_item->li_lv_shadow = old_lv;
255 }
256
257
258 lv->lv_item->li_lv = lv;
259
260
261
262
263
264
265
266 if (!lv->lv_item->li_seq)
267 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
268 }
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298 static void
299 xlog_cil_insert_format_items(
300 struct xlog *log,
301 struct xfs_trans *tp,
302 int *diff_len,
303 int *diff_iovecs)
304 {
305 struct xfs_log_item *lip;
306
307
308
309 if (list_empty(&tp->t_items)) {
310 ASSERT(0);
311 return;
312 }
313
314 list_for_each_entry(lip, &tp->t_items, li_trans) {
315 struct xfs_log_vec *lv;
316 struct xfs_log_vec *old_lv = NULL;
317 struct xfs_log_vec *shadow;
318 bool ordered = false;
319
320
321 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
322 continue;
323
324
325
326
327
328 shadow = lip->li_lv_shadow;
329 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
330 ordered = true;
331
332
333 if (!shadow->lv_niovecs && !ordered)
334 continue;
335
336
337 old_lv = lip->li_lv;
338 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
339
340 lv = lip->li_lv;
341 lv->lv_next = NULL;
342
343 if (ordered)
344 goto insert;
345
346
347
348
349
350 *diff_iovecs -= lv->lv_niovecs;
351 *diff_len -= lv->lv_bytes;
352
353
354 lv->lv_niovecs = shadow->lv_niovecs;
355
356
357 lv->lv_buf_len = 0;
358 lv->lv_bytes = 0;
359 lv->lv_buf = (char *)lv +
360 xlog_cil_iovec_space(lv->lv_niovecs);
361 } else {
362
363 lv = shadow;
364 lv->lv_item = lip;
365 if (ordered) {
366
367 ASSERT(lip->li_lv == NULL);
368 goto insert;
369 }
370 }
371
372 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
373 lip->li_ops->iop_format(lip, lv);
374 insert:
375 xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
376 }
377 }
378
379
380
381
382
383
384
385
386 static void
387 xlog_cil_insert_items(
388 struct xlog *log,
389 struct xfs_trans *tp)
390 {
391 struct xfs_cil *cil = log->l_cilp;
392 struct xfs_cil_ctx *ctx = cil->xc_ctx;
393 struct xfs_log_item *lip;
394 int len = 0;
395 int diff_iovecs = 0;
396 int iclog_space;
397 int iovhdr_res = 0, split_res = 0, ctx_res = 0;
398
399 ASSERT(tp);
400
401
402
403
404
405 xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
406
407 spin_lock(&cil->xc_cil_lock);
408
409
410 iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t);
411 len += iovhdr_res;
412 ctx->nvecs += diff_iovecs;
413
414
415 if (!list_empty(&tp->t_busy))
416 list_splice_init(&tp->t_busy, &ctx->busy_extents);
417
418
419
420
421
422
423
424
425 if (ctx->ticket->t_curr_res == 0) {
426 ctx_res = ctx->ticket->t_unit_res;
427 ctx->ticket->t_curr_res = ctx_res;
428 tp->t_ticket->t_curr_res -= ctx_res;
429 }
430
431
432 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
433 if (len > 0 && (ctx->space_used / iclog_space !=
434 (ctx->space_used + len) / iclog_space)) {
435 split_res = (len + iclog_space - 1) / iclog_space;
436
437 split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
438 ctx->ticket->t_unit_res += split_res;
439 ctx->ticket->t_curr_res += split_res;
440 tp->t_ticket->t_curr_res -= split_res;
441 ASSERT(tp->t_ticket->t_curr_res >= len);
442 }
443 tp->t_ticket->t_curr_res -= len;
444 ctx->space_used += len;
445
446
447
448
449
450 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
451 xfs_warn(log->l_mp, "Transaction log reservation overrun:");
452 xfs_warn(log->l_mp,
453 " log items: %d bytes (iov hdrs: %d bytes)",
454 len, iovhdr_res);
455 xfs_warn(log->l_mp, " split region headers: %d bytes",
456 split_res);
457 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
458 xlog_print_trans(tp);
459 }
460
461
462
463
464
465
466 list_for_each_entry(lip, &tp->t_items, li_trans) {
467
468
469 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
470 continue;
471
472
473
474
475
476
477 if (!list_is_last(&lip->li_cil, &cil->xc_cil))
478 list_move_tail(&lip->li_cil, &cil->xc_cil);
479 }
480
481 spin_unlock(&cil->xc_cil_lock);
482
483 if (tp->t_ticket->t_curr_res < 0)
484 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
485 }
486
487 static void
488 xlog_cil_free_logvec(
489 struct xfs_log_vec *log_vector)
490 {
491 struct xfs_log_vec *lv;
492
493 for (lv = log_vector; lv; ) {
494 struct xfs_log_vec *next = lv->lv_next;
495 kmem_free(lv);
496 lv = next;
497 }
498 }
499
500 static void
501 xlog_discard_endio_work(
502 struct work_struct *work)
503 {
504 struct xfs_cil_ctx *ctx =
505 container_of(work, struct xfs_cil_ctx, discard_endio_work);
506 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
507
508 xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
509 kmem_free(ctx);
510 }
511
512
513
514
515
516
517 static void
518 xlog_discard_endio(
519 struct bio *bio)
520 {
521 struct xfs_cil_ctx *ctx = bio->bi_private;
522
523 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
524 queue_work(xfs_discard_wq, &ctx->discard_endio_work);
525 bio_put(bio);
526 }
527
528 static void
529 xlog_discard_busy_extents(
530 struct xfs_mount *mp,
531 struct xfs_cil_ctx *ctx)
532 {
533 struct list_head *list = &ctx->busy_extents;
534 struct xfs_extent_busy *busyp;
535 struct bio *bio = NULL;
536 struct blk_plug plug;
537 int error = 0;
538
539 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
540
541 blk_start_plug(&plug);
542 list_for_each_entry(busyp, list, list) {
543 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
544 busyp->length);
545
546 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
547 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
548 XFS_FSB_TO_BB(mp, busyp->length),
549 GFP_NOFS, 0, &bio);
550 if (error && error != -EOPNOTSUPP) {
551 xfs_info(mp,
552 "discard failed for extent [0x%llx,%u], error %d",
553 (unsigned long long)busyp->bno,
554 busyp->length,
555 error);
556 break;
557 }
558 }
559
560 if (bio) {
561 bio->bi_private = ctx;
562 bio->bi_end_io = xlog_discard_endio;
563 submit_bio(bio);
564 } else {
565 xlog_discard_endio_work(&ctx->discard_endio_work);
566 }
567 blk_finish_plug(&plug);
568 }
569
570
571
572
573
574
575 static void
576 xlog_cil_committed(
577 struct xfs_cil_ctx *ctx,
578 bool abort)
579 {
580 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
581
582
583
584
585
586
587
588
589 if (abort) {
590 spin_lock(&ctx->cil->xc_push_lock);
591 wake_up_all(&ctx->cil->xc_commit_wait);
592 spin_unlock(&ctx->cil->xc_push_lock);
593 }
594
595 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
596 ctx->start_lsn, abort);
597
598 xfs_extent_busy_sort(&ctx->busy_extents);
599 xfs_extent_busy_clear(mp, &ctx->busy_extents,
600 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
601
602 spin_lock(&ctx->cil->xc_push_lock);
603 list_del(&ctx->committing);
604 spin_unlock(&ctx->cil->xc_push_lock);
605
606 xlog_cil_free_logvec(ctx->lv_chain);
607
608 if (!list_empty(&ctx->busy_extents))
609 xlog_discard_busy_extents(mp, ctx);
610 else
611 kmem_free(ctx);
612 }
613
614 void
615 xlog_cil_process_committed(
616 struct list_head *list,
617 bool aborted)
618 {
619 struct xfs_cil_ctx *ctx;
620
621 while ((ctx = list_first_entry_or_null(list,
622 struct xfs_cil_ctx, iclog_entry))) {
623 list_del(&ctx->iclog_entry);
624 xlog_cil_committed(ctx, aborted);
625 }
626 }
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642 STATIC int
643 xlog_cil_push(
644 struct xlog *log)
645 {
646 struct xfs_cil *cil = log->l_cilp;
647 struct xfs_log_vec *lv;
648 struct xfs_cil_ctx *ctx;
649 struct xfs_cil_ctx *new_ctx;
650 struct xlog_in_core *commit_iclog;
651 struct xlog_ticket *tic;
652 int num_iovecs;
653 int error = 0;
654 struct xfs_trans_header thdr;
655 struct xfs_log_iovec lhdr;
656 struct xfs_log_vec lvhdr = { NULL };
657 xfs_lsn_t commit_lsn;
658 xfs_lsn_t push_seq;
659
660 if (!cil)
661 return 0;
662
663 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
664 new_ctx->ticket = xlog_cil_ticket_alloc(log);
665
666 down_write(&cil->xc_ctx_lock);
667 ctx = cil->xc_ctx;
668
669 spin_lock(&cil->xc_push_lock);
670 push_seq = cil->xc_push_seq;
671 ASSERT(push_seq <= ctx->sequence);
672
673
674
675
676
677
678 if (list_empty(&cil->xc_cil)) {
679 cil->xc_push_seq = 0;
680 spin_unlock(&cil->xc_push_lock);
681 goto out_skip;
682 }
683
684
685
686 if (push_seq < cil->xc_ctx->sequence) {
687 spin_unlock(&cil->xc_push_lock);
688 goto out_skip;
689 }
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715 list_add(&ctx->committing, &cil->xc_committing);
716 spin_unlock(&cil->xc_push_lock);
717
718
719
720
721
722
723
724 lv = NULL;
725 num_iovecs = 0;
726 while (!list_empty(&cil->xc_cil)) {
727 struct xfs_log_item *item;
728
729 item = list_first_entry(&cil->xc_cil,
730 struct xfs_log_item, li_cil);
731 list_del_init(&item->li_cil);
732 if (!ctx->lv_chain)
733 ctx->lv_chain = item->li_lv;
734 else
735 lv->lv_next = item->li_lv;
736 lv = item->li_lv;
737 item->li_lv = NULL;
738 num_iovecs += lv->lv_niovecs;
739 }
740
741
742
743
744
745
746
747 INIT_LIST_HEAD(&new_ctx->committing);
748 INIT_LIST_HEAD(&new_ctx->busy_extents);
749 new_ctx->sequence = ctx->sequence + 1;
750 new_ctx->cil = cil;
751 cil->xc_ctx = new_ctx;
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778 spin_lock(&cil->xc_push_lock);
779 cil->xc_current_sequence = new_ctx->sequence;
780 spin_unlock(&cil->xc_push_lock);
781 up_write(&cil->xc_ctx_lock);
782
783
784
785
786
787
788
789
790
791
792 tic = ctx->ticket;
793 thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
794 thdr.th_type = XFS_TRANS_CHECKPOINT;
795 thdr.th_tid = tic->t_tid;
796 thdr.th_num_items = num_iovecs;
797 lhdr.i_addr = &thdr;
798 lhdr.i_len = sizeof(xfs_trans_header_t);
799 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
800 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
801
802 lvhdr.lv_niovecs = 1;
803 lvhdr.lv_iovecp = &lhdr;
804 lvhdr.lv_next = ctx->lv_chain;
805
806 error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
807 if (error)
808 goto out_abort_free_ticket;
809
810
811
812
813
814 restart:
815 spin_lock(&cil->xc_push_lock);
816 list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
817
818
819
820
821
822 if (XLOG_FORCED_SHUTDOWN(log)) {
823 spin_unlock(&cil->xc_push_lock);
824 goto out_abort_free_ticket;
825 }
826
827
828
829
830
831 if (new_ctx->sequence >= ctx->sequence)
832 continue;
833 if (!new_ctx->commit_lsn) {
834
835
836
837
838 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
839 goto restart;
840 }
841 }
842 spin_unlock(&cil->xc_push_lock);
843
844
845 commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false);
846 if (commit_lsn == -1)
847 goto out_abort;
848
849 spin_lock(&commit_iclog->ic_callback_lock);
850 if (commit_iclog->ic_state & XLOG_STATE_IOERROR) {
851 spin_unlock(&commit_iclog->ic_callback_lock);
852 goto out_abort;
853 }
854 ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE ||
855 commit_iclog->ic_state == XLOG_STATE_WANT_SYNC);
856 list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks);
857 spin_unlock(&commit_iclog->ic_callback_lock);
858
859
860
861
862
863
864 spin_lock(&cil->xc_push_lock);
865 ctx->commit_lsn = commit_lsn;
866 wake_up_all(&cil->xc_commit_wait);
867 spin_unlock(&cil->xc_push_lock);
868
869
870 return xfs_log_release_iclog(log->l_mp, commit_iclog);
871
872 out_skip:
873 up_write(&cil->xc_ctx_lock);
874 xfs_log_ticket_put(new_ctx->ticket);
875 kmem_free(new_ctx);
876 return 0;
877
878 out_abort_free_ticket:
879 xfs_log_ticket_put(tic);
880 out_abort:
881 xlog_cil_committed(ctx, true);
882 return -EIO;
883 }
884
885 static void
886 xlog_cil_push_work(
887 struct work_struct *work)
888 {
889 struct xfs_cil *cil = container_of(work, struct xfs_cil,
890 xc_push_work);
891 xlog_cil_push(cil->xc_log);
892 }
893
894
895
896
897
898
899
900
901 static void
902 xlog_cil_push_background(
903 struct xlog *log)
904 {
905 struct xfs_cil *cil = log->l_cilp;
906
907
908
909
910
911 ASSERT(!list_empty(&cil->xc_cil));
912
913
914
915
916
917 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
918 return;
919
920 spin_lock(&cil->xc_push_lock);
921 if (cil->xc_push_seq < cil->xc_current_sequence) {
922 cil->xc_push_seq = cil->xc_current_sequence;
923 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
924 }
925 spin_unlock(&cil->xc_push_lock);
926
927 }
928
929
930
931
932
933
934
935 static void
936 xlog_cil_push_now(
937 struct xlog *log,
938 xfs_lsn_t push_seq)
939 {
940 struct xfs_cil *cil = log->l_cilp;
941
942 if (!cil)
943 return;
944
945 ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
946
947
948 flush_work(&cil->xc_push_work);
949
950
951
952
953
954 spin_lock(&cil->xc_push_lock);
955 if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
956 spin_unlock(&cil->xc_push_lock);
957 return;
958 }
959
960 cil->xc_push_seq = push_seq;
961 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
962 spin_unlock(&cil->xc_push_lock);
963 }
964
965 bool
966 xlog_cil_empty(
967 struct xlog *log)
968 {
969 struct xfs_cil *cil = log->l_cilp;
970 bool empty = false;
971
972 spin_lock(&cil->xc_push_lock);
973 if (list_empty(&cil->xc_cil))
974 empty = true;
975 spin_unlock(&cil->xc_push_lock);
976 return empty;
977 }
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992 void
993 xfs_log_commit_cil(
994 struct xfs_mount *mp,
995 struct xfs_trans *tp,
996 xfs_lsn_t *commit_lsn,
997 bool regrant)
998 {
999 struct xlog *log = mp->m_log;
1000 struct xfs_cil *cil = log->l_cilp;
1001 struct xfs_log_item *lip, *next;
1002 xfs_lsn_t xc_commit_lsn;
1003
1004
1005
1006
1007
1008
1009 xlog_cil_alloc_shadow_bufs(log, tp);
1010
1011
1012 down_read(&cil->xc_ctx_lock);
1013
1014 xlog_cil_insert_items(log, tp);
1015
1016 xc_commit_lsn = cil->xc_ctx->sequence;
1017 if (commit_lsn)
1018 *commit_lsn = xc_commit_lsn;
1019
1020 xfs_log_done(mp, tp->t_ticket, NULL, regrant);
1021 tp->t_ticket = NULL;
1022 xfs_trans_unreserve_and_mod_sb(tp);
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 trace_xfs_trans_commit_items(tp, _RET_IP_);
1036 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1037 xfs_trans_del_item(lip);
1038 if (lip->li_ops->iop_committing)
1039 lip->li_ops->iop_committing(lip, xc_commit_lsn);
1040 }
1041 xlog_cil_push_background(log);
1042
1043 up_read(&cil->xc_ctx_lock);
1044 }
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 xfs_lsn_t
1057 xlog_cil_force_lsn(
1058 struct xlog *log,
1059 xfs_lsn_t sequence)
1060 {
1061 struct xfs_cil *cil = log->l_cilp;
1062 struct xfs_cil_ctx *ctx;
1063 xfs_lsn_t commit_lsn = NULLCOMMITLSN;
1064
1065 ASSERT(sequence <= cil->xc_current_sequence);
1066
1067
1068
1069
1070
1071
1072 restart:
1073 xlog_cil_push_now(log, sequence);
1074
1075
1076
1077
1078
1079
1080
1081 spin_lock(&cil->xc_push_lock);
1082 list_for_each_entry(ctx, &cil->xc_committing, committing) {
1083
1084
1085
1086
1087
1088 if (XLOG_FORCED_SHUTDOWN(log))
1089 goto out_shutdown;
1090 if (ctx->sequence > sequence)
1091 continue;
1092 if (!ctx->commit_lsn) {
1093
1094
1095
1096
1097 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1098 goto restart;
1099 }
1100 if (ctx->sequence != sequence)
1101 continue;
1102
1103 commit_lsn = ctx->commit_lsn;
1104 }
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (sequence == cil->xc_current_sequence &&
1122 !list_empty(&cil->xc_cil)) {
1123 spin_unlock(&cil->xc_push_lock);
1124 goto restart;
1125 }
1126
1127 spin_unlock(&cil->xc_push_lock);
1128 return commit_lsn;
1129
1130
1131
1132
1133
1134
1135
1136
1137 out_shutdown:
1138 spin_unlock(&cil->xc_push_lock);
1139 return 0;
1140 }
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 bool
1152 xfs_log_item_in_current_chkpt(
1153 struct xfs_log_item *lip)
1154 {
1155 struct xfs_cil_ctx *ctx;
1156
1157 if (list_empty(&lip->li_cil))
1158 return false;
1159
1160 ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
1161
1162
1163
1164
1165
1166
1167 if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
1168 return false;
1169 return true;
1170 }
1171
1172
1173
1174
1175 int
1176 xlog_cil_init(
1177 struct xlog *log)
1178 {
1179 struct xfs_cil *cil;
1180 struct xfs_cil_ctx *ctx;
1181
1182 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1183 if (!cil)
1184 return -ENOMEM;
1185
1186 ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL);
1187 if (!ctx) {
1188 kmem_free(cil);
1189 return -ENOMEM;
1190 }
1191
1192 INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
1193 INIT_LIST_HEAD(&cil->xc_cil);
1194 INIT_LIST_HEAD(&cil->xc_committing);
1195 spin_lock_init(&cil->xc_cil_lock);
1196 spin_lock_init(&cil->xc_push_lock);
1197 init_rwsem(&cil->xc_ctx_lock);
1198 init_waitqueue_head(&cil->xc_commit_wait);
1199
1200 INIT_LIST_HEAD(&ctx->committing);
1201 INIT_LIST_HEAD(&ctx->busy_extents);
1202 ctx->sequence = 1;
1203 ctx->cil = cil;
1204 cil->xc_ctx = ctx;
1205 cil->xc_current_sequence = ctx->sequence;
1206
1207 cil->xc_log = log;
1208 log->l_cilp = cil;
1209 return 0;
1210 }
1211
1212 void
1213 xlog_cil_destroy(
1214 struct xlog *log)
1215 {
1216 if (log->l_cilp->xc_ctx) {
1217 if (log->l_cilp->xc_ctx->ticket)
1218 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
1219 kmem_free(log->l_cilp->xc_ctx);
1220 }
1221
1222 ASSERT(list_empty(&log->l_cilp->xc_cil));
1223 kmem_free(log->l_cilp);
1224 }
1225