Lines Matching refs:log
43 struct xlog *log,
56 struct xlog *log,
60 struct xlog *log,
64 struct xlog *log);
70 struct xlog *log,
75 struct xlog *log,
83 struct xlog *log,
87 struct xlog *log,
92 struct xlog *log,
97 struct xlog *log,
101 struct xlog *log,
105 struct xlog *log,
111 struct xlog *log,
115 struct xlog *log);
118 struct xlog *log,
124 struct xlog *log,
136 struct xlog *log);
140 struct xlog *log, in xlog_grant_sub_space() argument
154 space += log->l_logsize; in xlog_grant_sub_space()
166 struct xlog *log, in xlog_grant_add_space() argument
179 tmp = log->l_logsize - space; in xlog_grant_add_space()
216 struct xlog *log, in xlog_ticket_reservation() argument
220 if (head == &log->l_write_head) { in xlog_ticket_reservation()
233 struct xlog *log, in xlog_grant_head_wake() argument
241 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
246 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
255 struct xlog *log, in xlog_grant_head_wait() argument
264 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
266 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
273 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
275 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
278 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
280 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
308 struct xlog *log, in xlog_grant_head_check() argument
316 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xlog_grant_head_check()
324 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
325 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
328 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
330 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
336 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
375 struct xlog *log = mp->m_log; in xfs_log_regrant() local
379 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_regrant()
392 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
400 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
402 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
407 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
408 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
409 xlog_verify_grant_tail(log); in xfs_log_regrant()
441 struct xlog *log = mp->m_log; in xfs_log_reserve() local
448 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_reserve()
454 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, in xfs_log_reserve()
462 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
465 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
467 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
472 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
473 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
474 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
475 xlog_verify_grant_tail(log); in xfs_log_reserve()
518 struct xlog *log = mp->m_log; in xfs_log_done() local
521 if (XLOG_FORCED_SHUTDOWN(log) || in xfs_log_done()
527 (xlog_commit_record(log, ticket, iclog, &lsn)))) { in xfs_log_done()
537 trace_xfs_log_done_nonperm(log, ticket); in xfs_log_done()
543 xlog_ungrant_log_space(log, ticket); in xfs_log_done()
546 trace_xfs_log_done_perm(log, ticket); in xfs_log_done()
548 xlog_regrant_reserve_log_space(log, ticket); in xfs_log_done()
781 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
798 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); in xfs_log_unmount_write()
801 first_iclog = iclog = log->l_iclog; in xfs_log_unmount_write()
810 if (! (XLOG_FORCED_SHUTDOWN(log))) { in xfs_log_unmount_write()
835 error = xlog_write(log, &vec, tic, &lsn, in xfs_log_unmount_write()
848 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
849 iclog = log->l_iclog; in xfs_log_unmount_write()
851 xlog_state_want_sync(log, iclog); in xfs_log_unmount_write()
852 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
853 error = xlog_state_release_iclog(log, iclog); in xfs_log_unmount_write()
855 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
858 if (!XLOG_FORCED_SHUTDOWN(log)) { in xfs_log_unmount_write()
860 &log->l_icloglock); in xfs_log_unmount_write()
862 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
865 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
868 trace_xfs_log_umount_write(log, tic); in xfs_log_unmount_write()
869 xlog_ungrant_log_space(log, tic); in xfs_log_unmount_write()
886 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
887 iclog = log->l_iclog; in xfs_log_unmount_write()
890 xlog_state_want_sync(log, iclog); in xfs_log_unmount_write()
891 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
892 error = xlog_state_release_iclog(log, iclog); in xfs_log_unmount_write()
894 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
901 &log->l_icloglock); in xfs_log_unmount_write()
903 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
985 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
988 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_space_wake()
991 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
992 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
994 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
995 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
996 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
997 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1000 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1001 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1003 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1004 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1005 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1006 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1030 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1036 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1039 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1040 switch (log->l_covered_state) { in xfs_log_need_covered()
1047 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1049 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1053 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1054 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1056 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1062 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1073 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1088 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1089 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1090 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1123 struct xlog *log, in xlog_space_left() argument
1133 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1136 free_bytes = log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1148 xfs_alert(log->l_mp, in xlog_space_left()
1154 free_bytes = log->l_logsize; in xlog_space_left()
1216 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1222 log->l_iclog_bufs = XLOG_MAX_ICLOGS; in xlog_get_iclog_buffer_size()
1224 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1230 size = log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1231 log->l_iclog_size_log = 0; in xlog_get_iclog_buffer_size()
1233 log->l_iclog_size_log++; in xlog_get_iclog_buffer_size()
1245 log->l_iclog_hsize = xhdrs << BBSHIFT; in xlog_get_iclog_buffer_size()
1246 log->l_iclog_heads = xhdrs; in xlog_get_iclog_buffer_size()
1249 log->l_iclog_hsize = BBSIZE; in xlog_get_iclog_buffer_size()
1250 log->l_iclog_heads = 1; in xlog_get_iclog_buffer_size()
1256 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; in xlog_get_iclog_buffer_size()
1257 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; in xlog_get_iclog_buffer_size()
1260 log->l_iclog_hsize = BBSIZE; in xlog_get_iclog_buffer_size()
1261 log->l_iclog_heads = 1; in xlog_get_iclog_buffer_size()
1266 mp->m_logbufs = log->l_iclog_bufs; in xlog_get_iclog_buffer_size()
1268 mp->m_logbsize = log->l_iclog_size; in xlog_get_iclog_buffer_size()
1289 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1291 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1329 struct xlog *log; in xlog_alloc_log() local
1338 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1339 if (!log) { in xlog_alloc_log()
1344 log->l_mp = mp; in xlog_alloc_log()
1345 log->l_targ = log_target; in xlog_alloc_log()
1346 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1347 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1348 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1349 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1350 log->l_flags |= XLOG_ACTIVE_RECOVERY; in xlog_alloc_log()
1351 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1353 log->l_prev_block = -1; in xlog_alloc_log()
1355 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1356 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1357 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1359 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1360 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1379 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1387 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1389 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1398 BTOBB(log->l_iclog_size), 0); in xlog_alloc_log()
1413 log->l_xbuf = bp; in xlog_alloc_log()
1415 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1416 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1418 iclogp = &log->l_iclog; in xlog_alloc_log()
1426 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1427 for (i=0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1437 BTOBB(log->l_iclog_size), 0); in xlog_alloc_log()
1450 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); in xlog_alloc_log()
1456 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); in xlog_alloc_log()
1457 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1462 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; in xlog_alloc_log()
1464 iclog->ic_log = log; in xlog_alloc_log()
1468 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1475 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1476 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1478 error = xlog_cil_init(log); in xlog_alloc_log()
1481 return log; in xlog_alloc_log()
1484 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1490 spinlock_destroy(&log->l_icloglock); in xlog_alloc_log()
1491 xfs_buf_free(log->l_xbuf); in xlog_alloc_log()
1493 kmem_free(log); in xlog_alloc_log()
1505 struct xlog *log, in xlog_commit_record() argument
1510 struct xfs_mount *mp = log->l_mp; in xlog_commit_record()
1523 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, in xlog_commit_record()
1539 struct xlog *log, in xlog_grant_push_ail() argument
1550 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_ail()
1552 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_ail()
1561 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_ail()
1566 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_ail()
1569 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_ail()
1570 threshold_block -= log->l_logBBsize; in xlog_grant_push_ail()
1580 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_ail()
1589 if (!XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_push_ail()
1590 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1598 struct xlog *log, in xlog_pack_data() argument
1618 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_pack_data()
1629 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1642 struct xlog *log, in xlog_cksum() argument
1655 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_cksum()
1659 for (i = 1; i < log->l_iclog_heads; i++) { in xlog_cksum()
1736 struct xlog *log, in xlog_sync() argument
1746 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); in xlog_sync()
1753 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_sync()
1756 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { in xlog_sync()
1758 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); in xlog_sync()
1764 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && in xlog_sync()
1765 roundoff < log->l_mp->m_sb.sb_logsunit) in xlog_sync()
1767 (log->l_mp->m_sb.sb_logsunit <= 1 && in xlog_sync()
1771 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
1772 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
1775 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1789 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { in xlog_sync()
1792 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); in xlog_sync()
1793 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); in xlog_sync()
1817 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1826 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { in xlog_sync()
1838 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) in xlog_sync()
1839 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); in xlog_sync()
1844 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); in xlog_sync()
1845 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); in xlog_sync()
1847 xlog_verify_iclog(log, iclog, count, true); in xlog_sync()
1850 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); in xlog_sync()
1871 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) in xlog_sync()
1874 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); in xlog_sync()
1875 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); in xlog_sync()
1878 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); in xlog_sync()
1894 struct xlog *log) in xlog_dealloc_log() argument
1899 xlog_cil_destroy(log); in xlog_dealloc_log()
1905 iclog = log->l_iclog; in xlog_dealloc_log()
1906 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1917 xfs_buf_lock(log->l_xbuf); in xlog_dealloc_log()
1918 xfs_buf_unlock(log->l_xbuf); in xlog_dealloc_log()
1919 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); in xlog_dealloc_log()
1920 xfs_buf_free(log->l_xbuf); in xlog_dealloc_log()
1922 iclog = log->l_iclog; in xlog_dealloc_log()
1923 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1929 spinlock_destroy(&log->l_icloglock); in xlog_dealloc_log()
1931 log->l_mp->m_log = NULL; in xlog_dealloc_log()
1932 kmem_free(log); in xlog_dealloc_log()
1941 struct xlog *log, in xlog_state_finish_copy() argument
1946 spin_lock(&log->l_icloglock); in xlog_state_finish_copy()
1951 spin_unlock(&log->l_icloglock); in xlog_state_finish_copy()
2132 struct xlog *log, in xlog_write_setup_ophdr() argument
2155 xfs_warn(log->l_mp, in xlog_write_setup_ophdr()
2215 struct xlog *log, in xlog_write_copy_finish() argument
2230 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2233 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2241 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2245 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2246 xlog_state_want_sync(log, iclog); in xlog_write_copy_finish()
2247 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2250 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2300 struct xlog *log, in xlog_write() argument
2339 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2348 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2390 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); in xlog_write()
2403 xlog_verify_dest_ptr(log, ptr); in xlog_write()
2414 error = xlog_write_copy_finish(log, iclog, flags, in xlog_write()
2455 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); in xlog_write()
2457 return xlog_state_release_iclog(log, iclog); in xlog_write()
2482 struct xlog *log) in xlog_state_clean_log() argument
2487 iclog = log->l_iclog; in xlog_state_clean_log()
2522 } while (iclog != log->l_iclog); in xlog_state_clean_log()
2533 switch (log->l_covered_state) { in xlog_state_clean_log()
2537 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2542 log->l_covered_state = XLOG_STATE_COVER_NEED2; in xlog_state_clean_log()
2544 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2549 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_state_clean_log()
2551 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2562 struct xlog *log) in xlog_get_lowest_lsn() argument
2567 lsn_log = log->l_iclog; in xlog_get_lowest_lsn()
2578 } while (lsn_log != log->l_iclog); in xlog_get_lowest_lsn()
2585 struct xlog *log, in xlog_state_do_callback() argument
2602 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2603 first_iclog = iclog = log->l_iclog; in xlog_state_do_callback()
2617 first_iclog = log->l_iclog; in xlog_state_do_callback()
2618 iclog = log->l_iclog; in xlog_state_do_callback()
2672 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_do_callback()
2701 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_do_callback()
2704 atomic64_set(&log->l_last_sync_lsn, in xlog_state_do_callback()
2710 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2738 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2748 xlog_state_clean_log(log); in xlog_state_do_callback()
2759 xfs_warn(log->l_mp, in xlog_state_do_callback()
2771 first_iclog = iclog = log->l_iclog; in xlog_state_do_callback()
2793 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) in xlog_state_do_callback()
2795 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2798 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2820 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2822 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2838 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2850 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2851 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ in xlog_state_done_syncing()
2875 struct xlog *log, in xlog_state_get_iclog_space() argument
2888 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2889 if (XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_get_iclog_space()
2890 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2894 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2899 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
2914 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
2916 log->l_iclog_hsize, in xlog_state_get_iclog_space()
2918 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
2920 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
2921 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
2934 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2945 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2946 error = xlog_state_release_iclog(log, iclog); in xlog_state_get_iclog_space()
2950 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2966 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2971 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2986 struct xlog *log, in xlog_regrant_reserve_log_space() argument
2989 trace_xfs_log_regrant_reserve_enter(log, ticket); in xlog_regrant_reserve_log_space()
2994 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
2996 xlog_grant_sub_space(log, &log->l_write_head.grant, in xlog_regrant_reserve_log_space()
3001 trace_xfs_log_regrant_reserve_sub(log, ticket); in xlog_regrant_reserve_log_space()
3007 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3010 trace_xfs_log_regrant_reserve_exit(log, ticket); in xlog_regrant_reserve_log_space()
3033 struct xlog *log, in xlog_ungrant_log_space() argument
3041 trace_xfs_log_ungrant_enter(log, ticket); in xlog_ungrant_log_space()
3042 trace_xfs_log_ungrant_sub(log, ticket); in xlog_ungrant_log_space()
3054 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xlog_ungrant_log_space()
3055 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xlog_ungrant_log_space()
3057 trace_xfs_log_ungrant_exit(log, ticket); in xlog_ungrant_log_space()
3059 xfs_log_space_wake(log->l_mp); in xlog_ungrant_log_space()
3073 struct xlog *log, in xlog_state_release_iclog() argument
3082 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) in xlog_state_release_iclog()
3086 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3094 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); in xlog_state_release_iclog()
3098 xlog_verify_tail_lsn(log, iclog, tail_lsn); in xlog_state_release_iclog()
3101 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3111 return xlog_sync(log, iclog); in xlog_state_release_iclog()
3125 struct xlog *log, in xlog_state_switch_iclogs() argument
3133 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3134 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3135 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3138 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3141 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_state_switch_iclogs()
3142 log->l_mp->m_sb.sb_logsunit > 1) { in xlog_state_switch_iclogs()
3143 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); in xlog_state_switch_iclogs()
3144 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3147 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3148 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3149 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3150 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3151 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3152 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3154 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3155 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3191 struct xlog *log = mp->m_log; in _xfs_log_force() local
3197 xlog_cil_force(log); in _xfs_log_force()
3199 spin_lock(&log->l_icloglock); in _xfs_log_force()
3201 iclog = log->l_iclog; in _xfs_log_force()
3203 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3238 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force()
3239 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3241 if (xlog_state_release_iclog(log, iclog)) in _xfs_log_force()
3246 spin_lock(&log->l_icloglock); in _xfs_log_force()
3258 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force()
3277 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3281 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in _xfs_log_force()
3294 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3339 struct xlog *log = mp->m_log; in _xfs_log_force_lsn() local
3347 lsn = xlog_cil_force_lsn(log, lsn); in _xfs_log_force_lsn()
3352 spin_lock(&log->l_icloglock); in _xfs_log_force_lsn()
3353 iclog = log->l_iclog; in _xfs_log_force_lsn()
3355 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3366 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3397 &log->l_icloglock); in _xfs_log_force_lsn()
3404 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force_lsn()
3405 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3406 if (xlog_state_release_iclog(log, iclog)) in _xfs_log_force_lsn()
3410 spin_lock(&log->l_icloglock); in _xfs_log_force_lsn()
3421 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3425 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in _xfs_log_force_lsn()
3437 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3441 } while (iclog != log->l_iclog); in _xfs_log_force_lsn()
3443 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3472 struct xlog *log, in xlog_state_want_sync() argument
3475 assert_spin_locked(&log->l_icloglock); in xlog_state_want_sync()
3478 xlog_state_switch_iclogs(log, iclog, 0); in xlog_state_want_sync()
3523 struct xlog *log = mp->m_log; in xfs_log_calc_unit_res() local
3582 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xfs_log_calc_unit_res()
3594 unit_bytes += log->l_iclog_hsize * num_headers; in xfs_log_calc_unit_res()
3597 unit_bytes += log->l_iclog_hsize; in xfs_log_calc_unit_res()
3616 struct xlog *log, in xlog_ticket_alloc() argument
3630 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); in xlog_ticket_alloc()
3666 struct xlog *log, in xlog_verify_dest_ptr() argument
3672 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_verify_dest_ptr()
3673 if (ptr >= log->l_iclog_bak[i] && in xlog_verify_dest_ptr()
3674 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) in xlog_verify_dest_ptr()
3679 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); in xlog_verify_dest_ptr()
3695 struct xlog *log) in xlog_verify_grant_tail() argument
3700 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3701 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3704 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3705 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3707 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3711 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3712 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3714 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3722 struct xlog *log, in xlog_verify_tail_lsn() argument
3728 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3730 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3731 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3732 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3734 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3736 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3737 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3739 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3741 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3762 struct xlog *log, in xlog_verify_iclog() argument
3778 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3779 icptr = log->l_iclog; in xlog_verify_iclog()
3780 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3783 if (icptr != log->l_iclog) in xlog_verify_iclog()
3784 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3785 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3789 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3795 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3826 xfs_warn(log->l_mp, in xlog_verify_iclog()
3857 struct xlog *log) in xlog_state_ioerror() argument
3861 iclog = log->l_iclog; in xlog_state_ioerror()
3901 struct xlog *log; in xfs_log_force_umount() local
3904 log = mp->m_log; in xfs_log_force_umount()
3910 if (!log || in xfs_log_force_umount()
3911 log->l_flags & XLOG_ACTIVE_RECOVERY) { in xfs_log_force_umount()
3922 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { in xfs_log_force_umount()
3923 ASSERT(XLOG_FORCED_SHUTDOWN(log)); in xfs_log_force_umount()
3941 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
3950 log->l_flags |= XLOG_IO_ERROR; in xfs_log_force_umount()
3951 retval = xlog_state_ioerror(log); in xfs_log_force_umount()
3952 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
3961 xlog_grant_head_wake_all(&log->l_reserve_head); in xfs_log_force_umount()
3962 xlog_grant_head_wake_all(&log->l_write_head); in xfs_log_force_umount()
3970 wake_up_all(&log->l_cilp->xc_commit_wait); in xfs_log_force_umount()
3971 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); in xfs_log_force_umount()
3977 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
3978 iclog = log->l_iclog; in xfs_log_force_umount()
3982 } while (iclog != log->l_iclog); in xfs_log_force_umount()
3983 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
3992 struct xlog *log) in xlog_iclogs_empty() argument
3996 iclog = log->l_iclog; in xlog_iclogs_empty()
4004 } while (iclog != log->l_iclog); in xlog_iclogs_empty()