cil               391 fs/xfs/xfs_log_cil.c 	struct xfs_cil		*cil = log->l_cilp;
cil               392 fs/xfs/xfs_log_cil.c 	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
cil               407 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_cil_lock);
cil               477 fs/xfs/xfs_log_cil.c 		if (!list_is_last(&lip->li_cil, &cil->xc_cil))
cil               478 fs/xfs/xfs_log_cil.c 			list_move_tail(&lip->li_cil, &cil->xc_cil);
cil               481 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_cil_lock);
cil               506 fs/xfs/xfs_log_cil.c 	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
cil               580 fs/xfs/xfs_log_cil.c 	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
cil               590 fs/xfs/xfs_log_cil.c 		spin_lock(&ctx->cil->xc_push_lock);
cil               591 fs/xfs/xfs_log_cil.c 		wake_up_all(&ctx->cil->xc_commit_wait);
cil               592 fs/xfs/xfs_log_cil.c 		spin_unlock(&ctx->cil->xc_push_lock);
cil               595 fs/xfs/xfs_log_cil.c 	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
cil               602 fs/xfs/xfs_log_cil.c 	spin_lock(&ctx->cil->xc_push_lock);
cil               604 fs/xfs/xfs_log_cil.c 	spin_unlock(&ctx->cil->xc_push_lock);
cil               646 fs/xfs/xfs_log_cil.c 	struct xfs_cil		*cil = log->l_cilp;
cil               660 fs/xfs/xfs_log_cil.c 	if (!cil)
cil               666 fs/xfs/xfs_log_cil.c 	down_write(&cil->xc_ctx_lock);
cil               667 fs/xfs/xfs_log_cil.c 	ctx = cil->xc_ctx;
cil               669 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_push_lock);
cil               670 fs/xfs/xfs_log_cil.c 	push_seq = cil->xc_push_seq;
cil               678 fs/xfs/xfs_log_cil.c 	if (list_empty(&cil->xc_cil)) {
cil               679 fs/xfs/xfs_log_cil.c 		cil->xc_push_seq = 0;
cil               680 fs/xfs/xfs_log_cil.c 		spin_unlock(&cil->xc_push_lock);
cil               686 fs/xfs/xfs_log_cil.c 	if (push_seq < cil->xc_ctx->sequence) {
cil               687 fs/xfs/xfs_log_cil.c 		spin_unlock(&cil->xc_push_lock);
cil               715 fs/xfs/xfs_log_cil.c 	list_add(&ctx->committing, &cil->xc_committing);
cil               716 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil               726 fs/xfs/xfs_log_cil.c 	while (!list_empty(&cil->xc_cil)) {
cil               729 fs/xfs/xfs_log_cil.c 		item = list_first_entry(&cil->xc_cil,
cil               750 fs/xfs/xfs_log_cil.c 	new_ctx->cil = cil;
cil               751 fs/xfs/xfs_log_cil.c 	cil->xc_ctx = new_ctx;
cil               778 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_push_lock);
cil               779 fs/xfs/xfs_log_cil.c 	cil->xc_current_sequence = new_ctx->sequence;
cil               780 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil               781 fs/xfs/xfs_log_cil.c 	up_write(&cil->xc_ctx_lock);
cil               815 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_push_lock);
cil               816 fs/xfs/xfs_log_cil.c 	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
cil               823 fs/xfs/xfs_log_cil.c 			spin_unlock(&cil->xc_push_lock);
cil               838 fs/xfs/xfs_log_cil.c 			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
cil               842 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil               864 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_push_lock);
cil               866 fs/xfs/xfs_log_cil.c 	wake_up_all(&cil->xc_commit_wait);
cil               867 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil               873 fs/xfs/xfs_log_cil.c 	up_write(&cil->xc_ctx_lock);
cil               889 fs/xfs/xfs_log_cil.c 	struct xfs_cil		*cil = container_of(work, struct xfs_cil,
cil               891 fs/xfs/xfs_log_cil.c 	xlog_cil_push(cil->xc_log);
cil               905 fs/xfs/xfs_log_cil.c 	struct xfs_cil	*cil = log->l_cilp;
cil               911 fs/xfs/xfs_log_cil.c 	ASSERT(!list_empty(&cil->xc_cil));
cil               917 fs/xfs/xfs_log_cil.c 	if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
cil               920 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_push_lock);
cil               921 fs/xfs/xfs_log_cil.c 	if (cil->xc_push_seq < cil->xc_current_sequence) {
cil               922 fs/xfs/xfs_log_cil.c 		cil->xc_push_seq = cil->xc_current_sequence;
cil               923 fs/xfs/xfs_log_cil.c 		queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
cil               925 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil               940 fs/xfs/xfs_log_cil.c 	struct xfs_cil	*cil = log->l_cilp;
cil               942 fs/xfs/xfs_log_cil.c 	if (!cil)
cil               945 fs/xfs/xfs_log_cil.c 	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
cil               948 fs/xfs/xfs_log_cil.c 	flush_work(&cil->xc_push_work);
cil               954 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_push_lock);
cil               955 fs/xfs/xfs_log_cil.c 	if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
cil               956 fs/xfs/xfs_log_cil.c 		spin_unlock(&cil->xc_push_lock);
cil               960 fs/xfs/xfs_log_cil.c 	cil->xc_push_seq = push_seq;
cil               961 fs/xfs/xfs_log_cil.c 	queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
cil               962 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil               969 fs/xfs/xfs_log_cil.c 	struct xfs_cil	*cil = log->l_cilp;
cil               972 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_push_lock);
cil               973 fs/xfs/xfs_log_cil.c 	if (list_empty(&cil->xc_cil))
cil               975 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil              1000 fs/xfs/xfs_log_cil.c 	struct xfs_cil		*cil = log->l_cilp;
cil              1012 fs/xfs/xfs_log_cil.c 	down_read(&cil->xc_ctx_lock);
cil              1016 fs/xfs/xfs_log_cil.c 	xc_commit_lsn = cil->xc_ctx->sequence;
cil              1043 fs/xfs/xfs_log_cil.c 	up_read(&cil->xc_ctx_lock);
cil              1061 fs/xfs/xfs_log_cil.c 	struct xfs_cil		*cil = log->l_cilp;
cil              1065 fs/xfs/xfs_log_cil.c 	ASSERT(sequence <= cil->xc_current_sequence);
cil              1081 fs/xfs/xfs_log_cil.c 	spin_lock(&cil->xc_push_lock);
cil              1082 fs/xfs/xfs_log_cil.c 	list_for_each_entry(ctx, &cil->xc_committing, committing) {
cil              1097 fs/xfs/xfs_log_cil.c 			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
cil              1121 fs/xfs/xfs_log_cil.c 	if (sequence == cil->xc_current_sequence &&
cil              1122 fs/xfs/xfs_log_cil.c 	    !list_empty(&cil->xc_cil)) {
cil              1123 fs/xfs/xfs_log_cil.c 		spin_unlock(&cil->xc_push_lock);
cil              1127 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil              1138 fs/xfs/xfs_log_cil.c 	spin_unlock(&cil->xc_push_lock);
cil              1179 fs/xfs/xfs_log_cil.c 	struct xfs_cil	*cil;
cil              1182 fs/xfs/xfs_log_cil.c 	cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
cil              1183 fs/xfs/xfs_log_cil.c 	if (!cil)
cil              1188 fs/xfs/xfs_log_cil.c 		kmem_free(cil);
cil              1192 fs/xfs/xfs_log_cil.c 	INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
cil              1193 fs/xfs/xfs_log_cil.c 	INIT_LIST_HEAD(&cil->xc_cil);
cil              1194 fs/xfs/xfs_log_cil.c 	INIT_LIST_HEAD(&cil->xc_committing);
cil              1195 fs/xfs/xfs_log_cil.c 	spin_lock_init(&cil->xc_cil_lock);
cil              1196 fs/xfs/xfs_log_cil.c 	spin_lock_init(&cil->xc_push_lock);
cil              1197 fs/xfs/xfs_log_cil.c 	init_rwsem(&cil->xc_ctx_lock);
cil              1198 fs/xfs/xfs_log_cil.c 	init_waitqueue_head(&cil->xc_commit_wait);
cil              1203 fs/xfs/xfs_log_cil.c 	ctx->cil = cil;
cil              1204 fs/xfs/xfs_log_cil.c 	cil->xc_ctx = ctx;
cil              1205 fs/xfs/xfs_log_cil.c 	cil->xc_current_sequence = ctx->sequence;
cil              1207 fs/xfs/xfs_log_cil.c 	cil->xc_log = log;
cil              1208 fs/xfs/xfs_log_cil.c 	log->l_cilp = cil;
cil               239 fs/xfs/xfs_log_priv.h 	struct xfs_cil		*cil;