Lines Matching refs:log

43 	struct xlog		*log,
56 struct xlog *log,
60 struct xlog *log,
64 struct xlog *log);
70 struct xlog *log,
75 struct xlog *log,
83 struct xlog *log,
87 struct xlog *log,
92 struct xlog *log,
97 struct xlog *log,
101 struct xlog *log,
105 struct xlog *log,
111 struct xlog *log,
115 struct xlog *log);
118 struct xlog *log,
124 struct xlog *log,
136 struct xlog *log);
140 struct xlog *log, in xlog_grant_sub_space() argument
154 space += log->l_logsize; in xlog_grant_sub_space()
166 struct xlog *log, in xlog_grant_add_space() argument
179 tmp = log->l_logsize - space; in xlog_grant_add_space()
216 struct xlog *log, in xlog_ticket_reservation() argument
220 if (head == &log->l_write_head) { in xlog_ticket_reservation()
233 struct xlog *log, in xlog_grant_head_wake() argument
241 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
246 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
255 struct xlog *log, in xlog_grant_head_wait() argument
264 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
266 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
271 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
273 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
275 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
278 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
280 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
308 struct xlog *log, in xlog_grant_head_check() argument
316 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xlog_grant_head_check()
324 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
325 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
328 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
330 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
336 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
375 struct xlog *log = mp->m_log; in xfs_log_regrant() local
379 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_regrant()
392 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
400 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
402 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
407 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
408 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
409 xlog_verify_grant_tail(log); in xfs_log_regrant()
441 struct xlog *log = mp->m_log; in xfs_log_reserve() local
448 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_reserve()
454 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, in xfs_log_reserve()
462 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
465 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
467 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
472 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
473 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
474 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
475 xlog_verify_grant_tail(log); in xfs_log_reserve()
518 struct xlog *log = mp->m_log; in xfs_log_done() local
521 if (XLOG_FORCED_SHUTDOWN(log) || in xfs_log_done()
527 (xlog_commit_record(log, ticket, iclog, &lsn)))) { in xfs_log_done()
534 trace_xfs_log_done_nonperm(log, ticket); in xfs_log_done()
540 xlog_ungrant_log_space(log, ticket); in xfs_log_done()
542 trace_xfs_log_done_perm(log, ticket); in xfs_log_done()
544 xlog_regrant_reserve_log_space(log, ticket); in xfs_log_done()
796 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
813 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); in xfs_log_unmount_write()
816 first_iclog = iclog = log->l_iclog; in xfs_log_unmount_write()
825 if (! (XLOG_FORCED_SHUTDOWN(log))) { in xfs_log_unmount_write()
850 error = xlog_write(log, &vec, tic, &lsn, in xfs_log_unmount_write()
863 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
864 iclog = log->l_iclog; in xfs_log_unmount_write()
866 xlog_state_want_sync(log, iclog); in xfs_log_unmount_write()
867 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
868 error = xlog_state_release_iclog(log, iclog); in xfs_log_unmount_write()
870 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
873 if (!XLOG_FORCED_SHUTDOWN(log)) { in xfs_log_unmount_write()
875 &log->l_icloglock); in xfs_log_unmount_write()
877 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
880 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
883 trace_xfs_log_umount_write(log, tic); in xfs_log_unmount_write()
884 xlog_ungrant_log_space(log, tic); in xfs_log_unmount_write()
901 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
902 iclog = log->l_iclog; in xfs_log_unmount_write()
905 xlog_state_want_sync(log, iclog); in xfs_log_unmount_write()
906 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
907 error = xlog_state_release_iclog(log, iclog); in xfs_log_unmount_write()
909 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
916 &log->l_icloglock); in xfs_log_unmount_write()
918 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
1000 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1003 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_space_wake()
1006 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1007 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1009 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1010 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
1011 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1012 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1015 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1016 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1018 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1019 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1020 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1021 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1045 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1051 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1054 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1055 switch (log->l_covered_state) { in xfs_log_need_covered()
1062 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1064 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1068 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1069 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1071 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1077 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1088 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1103 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1104 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1105 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1138 struct xlog *log, in xlog_space_left() argument
1148 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1151 free_bytes = log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1163 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); in xlog_space_left()
1164 xfs_alert(log->l_mp, in xlog_space_left()
1167 xfs_alert(log->l_mp, in xlog_space_left()
1171 free_bytes = log->l_logsize; in xlog_space_left()
1233 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1239 log->l_iclog_bufs = XLOG_MAX_ICLOGS; in xlog_get_iclog_buffer_size()
1241 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1247 size = log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1248 log->l_iclog_size_log = 0; in xlog_get_iclog_buffer_size()
1250 log->l_iclog_size_log++; in xlog_get_iclog_buffer_size()
1262 log->l_iclog_hsize = xhdrs << BBSHIFT; in xlog_get_iclog_buffer_size()
1263 log->l_iclog_heads = xhdrs; in xlog_get_iclog_buffer_size()
1266 log->l_iclog_hsize = BBSIZE; in xlog_get_iclog_buffer_size()
1267 log->l_iclog_heads = 1; in xlog_get_iclog_buffer_size()
1273 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; in xlog_get_iclog_buffer_size()
1274 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; in xlog_get_iclog_buffer_size()
1277 log->l_iclog_hsize = BBSIZE; in xlog_get_iclog_buffer_size()
1278 log->l_iclog_heads = 1; in xlog_get_iclog_buffer_size()
1283 mp->m_logbufs = log->l_iclog_bufs; in xlog_get_iclog_buffer_size()
1285 mp->m_logbsize = log->l_iclog_size; in xlog_get_iclog_buffer_size()
1306 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1308 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1346 struct xlog *log; in xlog_alloc_log() local
1355 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1356 if (!log) { in xlog_alloc_log()
1361 log->l_mp = mp; in xlog_alloc_log()
1362 log->l_targ = log_target; in xlog_alloc_log()
1363 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1364 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1365 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1366 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1367 log->l_flags |= XLOG_ACTIVE_RECOVERY; in xlog_alloc_log()
1368 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1370 log->l_prev_block = -1; in xlog_alloc_log()
1372 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1373 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1374 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1376 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1377 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1396 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1404 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1406 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1415 BTOBB(log->l_iclog_size), 0); in xlog_alloc_log()
1430 log->l_xbuf = bp; in xlog_alloc_log()
1432 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1433 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1435 iclogp = &log->l_iclog; in xlog_alloc_log()
1443 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1444 for (i=0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1454 BTOBB(log->l_iclog_size), 0); in xlog_alloc_log()
1467 log->l_iclog_bak[i] = &iclog->ic_header; in xlog_alloc_log()
1473 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); in xlog_alloc_log()
1474 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1479 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; in xlog_alloc_log()
1481 iclog->ic_log = log; in xlog_alloc_log()
1485 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1492 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1493 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1495 error = xlog_cil_init(log); in xlog_alloc_log()
1498 return log; in xlog_alloc_log()
1501 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1507 spinlock_destroy(&log->l_icloglock); in xlog_alloc_log()
1508 xfs_buf_free(log->l_xbuf); in xlog_alloc_log()
1510 kmem_free(log); in xlog_alloc_log()
1522 struct xlog *log, in xlog_commit_record() argument
1527 struct xfs_mount *mp = log->l_mp; in xlog_commit_record()
1540 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, in xlog_commit_record()
1556 struct xlog *log, in xlog_grant_push_ail() argument
1567 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_ail()
1569 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_ail()
1578 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_ail()
1583 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_ail()
1586 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_ail()
1587 threshold_block -= log->l_logBBsize; in xlog_grant_push_ail()
1597 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_ail()
1606 if (!XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_push_ail()
1607 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1615 struct xlog *log, in xlog_pack_data() argument
1635 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_pack_data()
1646 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1659 struct xlog *log, in xlog_cksum() argument
1672 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_cksum()
1758 struct xlog *log, in xlog_sync() argument
1768 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); in xlog_sync()
1771 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
1775 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_sync()
1778 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { in xlog_sync()
1780 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); in xlog_sync()
1786 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && in xlog_sync()
1787 roundoff < log->l_mp->m_sb.sb_logsunit) in xlog_sync()
1789 (log->l_mp->m_sb.sb_logsunit <= 1 && in xlog_sync()
1793 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
1794 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
1797 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1808 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
1811 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { in xlog_sync()
1814 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); in xlog_sync()
1815 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); in xlog_sync()
1839 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1848 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { in xlog_sync()
1860 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) in xlog_sync()
1861 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); in xlog_sync()
1866 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); in xlog_sync()
1867 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); in xlog_sync()
1869 xlog_verify_iclog(log, iclog, count, true); in xlog_sync()
1872 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); in xlog_sync()
1893 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) in xlog_sync()
1896 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); in xlog_sync()
1897 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); in xlog_sync()
1900 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); in xlog_sync()
1916 struct xlog *log) in xlog_dealloc_log() argument
1921 xlog_cil_destroy(log); in xlog_dealloc_log()
1927 iclog = log->l_iclog; in xlog_dealloc_log()
1928 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1939 xfs_buf_lock(log->l_xbuf); in xlog_dealloc_log()
1940 xfs_buf_unlock(log->l_xbuf); in xlog_dealloc_log()
1941 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); in xlog_dealloc_log()
1942 xfs_buf_free(log->l_xbuf); in xlog_dealloc_log()
1944 iclog = log->l_iclog; in xlog_dealloc_log()
1945 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1951 spinlock_destroy(&log->l_icloglock); in xlog_dealloc_log()
1953 log->l_mp->m_log = NULL; in xlog_dealloc_log()
1954 kmem_free(log); in xlog_dealloc_log()
1963 struct xlog *log, in xlog_state_finish_copy() argument
1968 spin_lock(&log->l_icloglock); in xlog_state_finish_copy()
1973 spin_unlock(&log->l_icloglock); in xlog_state_finish_copy()
2152 struct xlog *log, in xlog_write_setup_ophdr() argument
2175 xfs_warn(log->l_mp, in xlog_write_setup_ophdr()
2235 struct xlog *log, in xlog_write_copy_finish() argument
2250 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2253 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2261 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2265 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2266 xlog_state_want_sync(log, iclog); in xlog_write_copy_finish()
2267 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2270 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2320 struct xlog *log, in xlog_write() argument
2359 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2368 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2410 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); in xlog_write()
2423 xlog_verify_dest_ptr(log, ptr); in xlog_write()
2443 error = xlog_write_copy_finish(log, iclog, flags, in xlog_write()
2484 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); in xlog_write()
2486 return xlog_state_release_iclog(log, iclog); in xlog_write()
2511 struct xlog *log) in xlog_state_clean_log() argument
2516 iclog = log->l_iclog; in xlog_state_clean_log()
2551 } while (iclog != log->l_iclog); in xlog_state_clean_log()
2562 switch (log->l_covered_state) { in xlog_state_clean_log()
2566 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2571 log->l_covered_state = XLOG_STATE_COVER_NEED2; in xlog_state_clean_log()
2573 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2578 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_state_clean_log()
2580 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2591 struct xlog *log) in xlog_get_lowest_lsn() argument
2596 lsn_log = log->l_iclog; in xlog_get_lowest_lsn()
2607 } while (lsn_log != log->l_iclog); in xlog_get_lowest_lsn()
2614 struct xlog *log, in xlog_state_do_callback() argument
2631 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2632 first_iclog = iclog = log->l_iclog; in xlog_state_do_callback()
2646 first_iclog = log->l_iclog; in xlog_state_do_callback()
2647 iclog = log->l_iclog; in xlog_state_do_callback()
2701 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_do_callback()
2730 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_do_callback()
2733 atomic64_set(&log->l_last_sync_lsn, in xlog_state_do_callback()
2739 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2767 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2777 xlog_state_clean_log(log); in xlog_state_do_callback()
2788 xfs_warn(log->l_mp, in xlog_state_do_callback()
2800 first_iclog = iclog = log->l_iclog; in xlog_state_do_callback()
2822 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) in xlog_state_do_callback()
2824 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2827 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2849 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2851 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2867 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2879 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2880 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ in xlog_state_done_syncing()
2904 struct xlog *log, in xlog_state_get_iclog_space() argument
2917 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2918 if (XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_get_iclog_space()
2919 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2923 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2925 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
2928 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
2943 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
2945 log->l_iclog_hsize, in xlog_state_get_iclog_space()
2947 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
2949 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
2950 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
2963 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2974 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2975 error = xlog_state_release_iclog(log, iclog); in xlog_state_get_iclog_space()
2979 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2995 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3000 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3015 struct xlog *log, in xlog_regrant_reserve_log_space() argument
3018 trace_xfs_log_regrant_reserve_enter(log, ticket); in xlog_regrant_reserve_log_space()
3023 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3025 xlog_grant_sub_space(log, &log->l_write_head.grant, in xlog_regrant_reserve_log_space()
3030 trace_xfs_log_regrant_reserve_sub(log, ticket); in xlog_regrant_reserve_log_space()
3036 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3039 trace_xfs_log_regrant_reserve_exit(log, ticket); in xlog_regrant_reserve_log_space()
3062 struct xlog *log, in xlog_ungrant_log_space() argument
3070 trace_xfs_log_ungrant_enter(log, ticket); in xlog_ungrant_log_space()
3071 trace_xfs_log_ungrant_sub(log, ticket); in xlog_ungrant_log_space()
3083 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xlog_ungrant_log_space()
3084 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xlog_ungrant_log_space()
3086 trace_xfs_log_ungrant_exit(log, ticket); in xlog_ungrant_log_space()
3088 xfs_log_space_wake(log->l_mp); in xlog_ungrant_log_space()
3102 struct xlog *log, in xlog_state_release_iclog() argument
3111 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) in xlog_state_release_iclog()
3115 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3123 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); in xlog_state_release_iclog()
3127 xlog_verify_tail_lsn(log, iclog, tail_lsn); in xlog_state_release_iclog()
3130 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3140 return xlog_sync(log, iclog); in xlog_state_release_iclog()
3154 struct xlog *log, in xlog_state_switch_iclogs() argument
3162 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3163 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3164 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3167 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3170 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_state_switch_iclogs()
3171 log->l_mp->m_sb.sb_logsunit > 1) { in xlog_state_switch_iclogs()
3172 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); in xlog_state_switch_iclogs()
3173 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3176 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3184 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3185 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3187 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3188 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3189 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3191 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3192 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3228 struct xlog *log = mp->m_log; in _xfs_log_force() local
3234 xlog_cil_force(log); in _xfs_log_force()
3236 spin_lock(&log->l_icloglock); in _xfs_log_force()
3238 iclog = log->l_iclog; in _xfs_log_force()
3240 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3275 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force()
3276 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3278 if (xlog_state_release_iclog(log, iclog)) in _xfs_log_force()
3283 spin_lock(&log->l_icloglock); in _xfs_log_force()
3295 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force()
3314 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3318 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in _xfs_log_force()
3331 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3376 struct xlog *log = mp->m_log; in _xfs_log_force_lsn() local
3384 lsn = xlog_cil_force_lsn(log, lsn); in _xfs_log_force_lsn()
3389 spin_lock(&log->l_icloglock); in _xfs_log_force_lsn()
3390 iclog = log->l_iclog; in _xfs_log_force_lsn()
3392 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3403 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3434 &log->l_icloglock); in _xfs_log_force_lsn()
3441 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force_lsn()
3442 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3443 if (xlog_state_release_iclog(log, iclog)) in _xfs_log_force_lsn()
3447 spin_lock(&log->l_icloglock); in _xfs_log_force_lsn()
3458 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3462 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in _xfs_log_force_lsn()
3474 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3478 } while (iclog != log->l_iclog); in _xfs_log_force_lsn()
3480 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3509 struct xlog *log, in xlog_state_want_sync() argument
3512 assert_spin_locked(&log->l_icloglock); in xlog_state_want_sync()
3515 xlog_state_switch_iclogs(log, iclog, 0); in xlog_state_want_sync()
3560 struct xlog *log = mp->m_log; in xfs_log_calc_unit_res() local
3619 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xfs_log_calc_unit_res()
3631 unit_bytes += log->l_iclog_hsize * num_headers; in xfs_log_calc_unit_res()
3634 unit_bytes += log->l_iclog_hsize; in xfs_log_calc_unit_res()
3653 struct xlog *log, in xlog_ticket_alloc() argument
3667 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); in xlog_ticket_alloc()
3703 struct xlog *log, in xlog_verify_dest_ptr() argument
3709 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_verify_dest_ptr()
3710 if (ptr >= log->l_iclog_bak[i] && in xlog_verify_dest_ptr()
3711 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) in xlog_verify_dest_ptr()
3716 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); in xlog_verify_dest_ptr()
3732 struct xlog *log) in xlog_verify_grant_tail() argument
3737 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3738 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3741 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3742 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3744 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3748 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3749 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3751 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3759 struct xlog *log, in xlog_verify_tail_lsn() argument
3765 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3767 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3768 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3769 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3771 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3773 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3774 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3776 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3778 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3799 struct xlog *log, in xlog_verify_iclog() argument
3814 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3815 icptr = log->l_iclog; in xlog_verify_iclog()
3816 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3819 if (icptr != log->l_iclog) in xlog_verify_iclog()
3820 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3821 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3825 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3831 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3861 xfs_warn(log->l_mp, in xlog_verify_iclog()
3892 struct xlog *log) in xlog_state_ioerror() argument
3896 iclog = log->l_iclog; in xlog_state_ioerror()
3936 struct xlog *log; in xfs_log_force_umount() local
3939 log = mp->m_log; in xfs_log_force_umount()
3945 if (!log || in xfs_log_force_umount()
3946 log->l_flags & XLOG_ACTIVE_RECOVERY) { in xfs_log_force_umount()
3957 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { in xfs_log_force_umount()
3958 ASSERT(XLOG_FORCED_SHUTDOWN(log)); in xfs_log_force_umount()
3976 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
3985 log->l_flags |= XLOG_IO_ERROR; in xfs_log_force_umount()
3986 retval = xlog_state_ioerror(log); in xfs_log_force_umount()
3987 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
3996 xlog_grant_head_wake_all(&log->l_reserve_head); in xfs_log_force_umount()
3997 xlog_grant_head_wake_all(&log->l_write_head); in xfs_log_force_umount()
4005 wake_up_all(&log->l_cilp->xc_commit_wait); in xfs_log_force_umount()
4006 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); in xfs_log_force_umount()
4012 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
4013 iclog = log->l_iclog; in xfs_log_force_umount()
4017 } while (iclog != log->l_iclog); in xfs_log_force_umount()
4018 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
4027 struct xlog *log) in xlog_iclogs_empty() argument
4031 iclog = log->l_iclog; in xlog_iclogs_empty()
4039 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
4052 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
4074 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
4079 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
4080 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()