l_ctx              72 fs/nfs/direct.c 	struct nfs_lock_context *l_ctx;		/* Lock context info */
l_ctx             320 fs/nfs/direct.c 	if (dreq->l_ctx != NULL)
l_ctx             321 fs/nfs/direct.c 		nfs_put_lock_context(dreq->l_ctx);
l_ctx             548 fs/nfs/direct.c 	struct nfs_lock_context *l_ctx;
l_ctx             571 fs/nfs/direct.c 	l_ctx = nfs_get_lock_context(dreq->ctx);
l_ctx             572 fs/nfs/direct.c 	if (IS_ERR(l_ctx)) {
l_ctx             573 fs/nfs/direct.c 		result = PTR_ERR(l_ctx);
l_ctx             577 fs/nfs/direct.c 	dreq->l_ctx = l_ctx;
l_ctx             964 fs/nfs/direct.c 	struct nfs_lock_context *l_ctx;
l_ctx             990 fs/nfs/direct.c 	l_ctx = nfs_get_lock_context(dreq->ctx);
l_ctx             991 fs/nfs/direct.c 	if (IS_ERR(l_ctx)) {
l_ctx             992 fs/nfs/direct.c 		result = PTR_ERR(l_ctx);
l_ctx             996 fs/nfs/direct.c 	dreq->l_ctx = l_ctx;
l_ctx             689 fs/nfs/file.c  	struct nfs_lock_context *l_ctx;
l_ctx             698 fs/nfs/file.c  	l_ctx = nfs_get_lock_context(nfs_file_open_context(filp));
l_ctx             699 fs/nfs/file.c  	if (!IS_ERR(l_ctx)) {
l_ctx             700 fs/nfs/file.c  		status = nfs_iocounter_wait(l_ctx);
l_ctx             701 fs/nfs/file.c  		nfs_put_lock_context(l_ctx);
l_ctx             854 fs/nfs/inode.c static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
l_ctx             856 fs/nfs/inode.c 	refcount_set(&l_ctx->count, 1);
l_ctx             857 fs/nfs/inode.c 	l_ctx->lockowner = current->files;
l_ctx             858 fs/nfs/inode.c 	INIT_LIST_HEAD(&l_ctx->list);
l_ctx             859 fs/nfs/inode.c 	atomic_set(&l_ctx->io_count, 0);
l_ctx             907 fs/nfs/inode.c void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
l_ctx             909 fs/nfs/inode.c 	struct nfs_open_context *ctx = l_ctx->open_context;
l_ctx             912 fs/nfs/inode.c 	if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock))
l_ctx             914 fs/nfs/inode.c 	list_del_rcu(&l_ctx->list);
l_ctx             917 fs/nfs/inode.c 	kfree_rcu(l_ctx, rcu_head);
l_ctx             245 fs/nfs/internal.h int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
l_ctx             891 fs/nfs/nfs3proc.c 	struct nfs_lock_context *l_ctx = data;
l_ctx             892 fs/nfs/nfs3proc.c 	if (l_ctx && test_bit(NFS_CONTEXT_UNLOCK, &l_ctx->open_context->flags)) {
l_ctx             893 fs/nfs/nfs3proc.c 		get_nfs_open_context(l_ctx->open_context);
l_ctx             894 fs/nfs/nfs3proc.c 		nfs_get_lock_context(l_ctx->open_context);
l_ctx             900 fs/nfs/nfs3proc.c 	struct nfs_lock_context *l_ctx = data;
l_ctx             901 fs/nfs/nfs3proc.c 	if (l_ctx && test_bit(NFS_CONTEXT_UNLOCK, &l_ctx->open_context->flags))
l_ctx             902 fs/nfs/nfs3proc.c 		return nfs_async_iocounter_wait(task, l_ctx);
l_ctx             909 fs/nfs/nfs3proc.c 	struct nfs_lock_context *l_ctx = data;
l_ctx             911 fs/nfs/nfs3proc.c 	if (l_ctx && test_bit(NFS_CONTEXT_UNLOCK, &l_ctx->open_context->flags)) {
l_ctx             912 fs/nfs/nfs3proc.c 		ctx = l_ctx->open_context;
l_ctx             913 fs/nfs/nfs3proc.c 		nfs_put_lock_context(l_ctx);
l_ctx             928 fs/nfs/nfs3proc.c 	struct nfs_lock_context *l_ctx = NULL;
l_ctx             933 fs/nfs/nfs3proc.c 		l_ctx = nfs_get_lock_context(ctx);
l_ctx             934 fs/nfs/nfs3proc.c 		if (IS_ERR(l_ctx))
l_ctx             935 fs/nfs/nfs3proc.c 			l_ctx = NULL;
l_ctx             940 fs/nfs/nfs3proc.c 	status = nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl, l_ctx);
l_ctx             942 fs/nfs/nfs3proc.c 	if (l_ctx)
l_ctx             943 fs/nfs/nfs3proc.c 		nfs_put_lock_context(l_ctx);
l_ctx             312 fs/nfs/nfs4_fs.h 		const struct nfs_lock_context *l_ctx,
l_ctx            3250 fs/nfs/nfs4proc.c 		struct nfs_lock_context *l_ctx;
l_ctx            3253 fs/nfs/nfs4proc.c 		l_ctx = nfs_get_lock_context(ctx);
l_ctx            3254 fs/nfs/nfs4proc.c 		if (IS_ERR(l_ctx))
l_ctx            3255 fs/nfs/nfs4proc.c 			return PTR_ERR(l_ctx);
l_ctx            3256 fs/nfs/nfs4proc.c 		status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
l_ctx            3258 fs/nfs/nfs4proc.c 		nfs_put_lock_context(l_ctx);
l_ctx            5114 fs/nfs/nfs4proc.c 		const struct nfs_lock_context *l_ctx,
l_ctx            5117 fs/nfs/nfs4proc.c 	return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
l_ctx            5123 fs/nfs/nfs4proc.c 		const struct nfs_lock_context *l_ctx,
l_ctx            5129 fs/nfs/nfs4proc.c 	if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
l_ctx            6483 fs/nfs/nfs4proc.c 	struct nfs_lock_context *l_ctx;
l_ctx            6508 fs/nfs/nfs4proc.c 	p->l_ctx = nfs_get_lock_context(ctx);
l_ctx            6523 fs/nfs/nfs4proc.c 	nfs_put_lock_context(calldata->l_ctx);
l_ctx            6577 fs/nfs/nfs4proc.c 	if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
l_ctx            6578 fs/nfs/nfs4proc.c 		nfs_async_iocounter_wait(task, calldata->l_ctx))
l_ctx             987 fs/nfs/nfs4state.c 		const struct nfs_lock_context *l_ctx)
l_ctx             993 fs/nfs/nfs4state.c 	if (l_ctx == NULL)
l_ctx             999 fs/nfs/nfs4state.c 	fl_owner = l_ctx->lockowner;
l_ctx            1000 fs/nfs/nfs4state.c 	fl_flock_owner = l_ctx->open_context->flock_owner;
l_ctx            1040 fs/nfs/nfs4state.c 		fmode_t fmode, const struct nfs_lock_context *l_ctx,
l_ctx            1049 fs/nfs/nfs4state.c 	ret = nfs4_copy_lock_stateid(dst, state, l_ctx);
l_ctx             100 fs/nfs/pagelist.c nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
l_ctx             102 fs/nfs/pagelist.c 	return wait_var_event_killable(&l_ctx->io_count,
l_ctx             103 fs/nfs/pagelist.c 				       !atomic_read(&l_ctx->io_count));
l_ctx             116 fs/nfs/pagelist.c nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
l_ctx             118 fs/nfs/pagelist.c 	struct inode *inode = d_inode(l_ctx->open_context->dentry);
l_ctx             121 fs/nfs/pagelist.c 	if (atomic_read(&l_ctx->io_count) > 0) {
l_ctx             126 fs/nfs/pagelist.c 	if (atomic_read(&l_ctx->io_count) == 0) {
l_ctx             300 fs/nfs/pagelist.c __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
l_ctx             305 fs/nfs/pagelist.c 	struct nfs_open_context *ctx = l_ctx->open_context;
l_ctx             314 fs/nfs/pagelist.c 	req->wb_lock_context = l_ctx;
l_ctx             315 fs/nfs/pagelist.c 	refcount_inc(&l_ctx->count);
l_ctx             316 fs/nfs/pagelist.c 	atomic_inc(&l_ctx->io_count);
l_ctx             349 fs/nfs/pagelist.c 	struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
l_ctx             352 fs/nfs/pagelist.c 	if (IS_ERR(l_ctx))
l_ctx             353 fs/nfs/pagelist.c 		return ERR_CAST(l_ctx);
l_ctx             354 fs/nfs/pagelist.c 	ret = __nfs_create_request(l_ctx, page, offset, offset, count);
l_ctx             357 fs/nfs/pagelist.c 	nfs_put_lock_context(l_ctx);
l_ctx             417 fs/nfs/pagelist.c 	struct nfs_lock_context *l_ctx = req->wb_lock_context;
l_ctx             424 fs/nfs/pagelist.c 	if (l_ctx != NULL) {
l_ctx             425 fs/nfs/pagelist.c 		if (atomic_dec_and_test(&l_ctx->io_count)) {
l_ctx             426 fs/nfs/pagelist.c 			wake_up_var(&l_ctx->io_count);
l_ctx             427 fs/nfs/pagelist.c 			ctx = l_ctx->open_context;
l_ctx             431 fs/nfs/pagelist.c 		nfs_put_lock_context(l_ctx);
l_ctx            1207 fs/nfs/write.c 	struct nfs_lock_context *l_ctx;
l_ctx            1223 fs/nfs/write.c 		l_ctx = req->wb_lock_context;
l_ctx            1226 fs/nfs/write.c 		if (l_ctx && flctx &&
l_ctx            1229 fs/nfs/write.c 			do_flush |= l_ctx->lockowner != current->files;
l_ctx             401 include/linux/nfs_fs.h extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);