1/*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9#include <linux/completion.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15
16#include <linux/nfs4.h>
17#include <linux/nfs_fs.h>
18#include <linux/nfs_xdr.h>
19
20#include "nfs4_fs.h"
21#include "delegation.h"
22#include "internal.h"
23#include "nfs4trace.h"
24
25static void nfs_free_delegation(struct nfs_delegation *delegation)
26{
27	if (delegation->cred) {
28		put_rpccred(delegation->cred);
29		delegation->cred = NULL;
30	}
31	kfree_rcu(delegation, rcu);
32}
33
34/**
35 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
36 * @delegation: delegation to process
37 *
38 */
39void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
40{
41	set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
42}
43
44static int
45nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
46{
47	struct nfs_delegation *delegation;
48	int ret = 0;
49
50	flags &= FMODE_READ|FMODE_WRITE;
51	rcu_read_lock();
52	delegation = rcu_dereference(NFS_I(inode)->delegation);
53	if (delegation != NULL && (delegation->type & flags) == flags &&
54	    !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
55		if (mark)
56			nfs_mark_delegation_referenced(delegation);
57		ret = 1;
58	}
59	rcu_read_unlock();
60	return ret;
61}
62/**
63 * nfs_have_delegation - check if inode has a delegation, mark it
64 * NFS_DELEGATION_REFERENCED if there is one.
65 * @inode: inode to check
66 * @flags: delegation types to check for
67 *
68 * Returns one if inode has the indicated delegation, otherwise zero.
69 */
70int nfs4_have_delegation(struct inode *inode, fmode_t flags)
71{
72	return nfs4_do_check_delegation(inode, flags, true);
73}
74
75/*
76 * nfs4_check_delegation - check if inode has a delegation, do not mark
77 * NFS_DELEGATION_REFERENCED if it has one.
78 */
79int nfs4_check_delegation(struct inode *inode, fmode_t flags)
80{
81	return nfs4_do_check_delegation(inode, flags, false);
82}
83
84static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
85{
86	struct inode *inode = state->inode;
87	struct file_lock *fl;
88	struct file_lock_context *flctx = inode->i_flctx;
89	struct list_head *list;
90	int status = 0;
91
92	if (flctx == NULL)
93		goto out;
94
95	list = &flctx->flc_posix;
96	spin_lock(&flctx->flc_lock);
97restart:
98	list_for_each_entry(fl, list, fl_list) {
99		if (nfs_file_open_context(fl->fl_file) != ctx)
100			continue;
101		spin_unlock(&flctx->flc_lock);
102		status = nfs4_lock_delegation_recall(fl, state, stateid);
103		if (status < 0)
104			goto out;
105		spin_lock(&flctx->flc_lock);
106	}
107	if (list == &flctx->flc_posix) {
108		list = &flctx->flc_flock;
109		goto restart;
110	}
111	spin_unlock(&flctx->flc_lock);
112out:
113	return status;
114}
115
116static int nfs_delegation_claim_opens(struct inode *inode,
117		const nfs4_stateid *stateid, fmode_t type)
118{
119	struct nfs_inode *nfsi = NFS_I(inode);
120	struct nfs_open_context *ctx;
121	struct nfs4_state_owner *sp;
122	struct nfs4_state *state;
123	unsigned int seq;
124	int err;
125
126again:
127	spin_lock(&inode->i_lock);
128	list_for_each_entry(ctx, &nfsi->open_files, list) {
129		state = ctx->state;
130		if (state == NULL)
131			continue;
132		if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
133			continue;
134		if (!nfs4_valid_open_stateid(state))
135			continue;
136		if (!nfs4_stateid_match(&state->stateid, stateid))
137			continue;
138		get_nfs_open_context(ctx);
139		spin_unlock(&inode->i_lock);
140		sp = state->owner;
141		/* Block nfs4_proc_unlck */
142		mutex_lock(&sp->so_delegreturn_mutex);
143		seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
144		err = nfs4_open_delegation_recall(ctx, state, stateid, type);
145		if (!err)
146			err = nfs_delegation_claim_locks(ctx, state, stateid);
147		if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
148			err = -EAGAIN;
149		mutex_unlock(&sp->so_delegreturn_mutex);
150		put_nfs_open_context(ctx);
151		if (err != 0)
152			return err;
153		goto again;
154	}
155	spin_unlock(&inode->i_lock);
156	return 0;
157}
158
159/**
160 * nfs_inode_reclaim_delegation - process a delegation reclaim request
161 * @inode: inode to process
162 * @cred: credential to use for request
163 * @res: new delegation state from server
164 *
165 */
166void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
167				  struct nfs_openres *res)
168{
169	struct nfs_delegation *delegation;
170	struct rpc_cred *oldcred = NULL;
171
172	rcu_read_lock();
173	delegation = rcu_dereference(NFS_I(inode)->delegation);
174	if (delegation != NULL) {
175		spin_lock(&delegation->lock);
176		if (delegation->inode != NULL) {
177			nfs4_stateid_copy(&delegation->stateid, &res->delegation);
178			delegation->type = res->delegation_type;
179			delegation->pagemod_limit = res->pagemod_limit;
180			oldcred = delegation->cred;
181			delegation->cred = get_rpccred(cred);
182			clear_bit(NFS_DELEGATION_NEED_RECLAIM,
183				  &delegation->flags);
184			spin_unlock(&delegation->lock);
185			rcu_read_unlock();
186			put_rpccred(oldcred);
187			trace_nfs4_reclaim_delegation(inode, res->delegation_type);
188		} else {
189			/* We appear to have raced with a delegation return. */
190			spin_unlock(&delegation->lock);
191			rcu_read_unlock();
192			nfs_inode_set_delegation(inode, cred, res);
193		}
194	} else {
195		rcu_read_unlock();
196	}
197}
198
199static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
200{
201	int res = 0;
202
203	if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
204		res = nfs4_proc_delegreturn(inode,
205				delegation->cred,
206				&delegation->stateid,
207				issync);
208	nfs_free_delegation(delegation);
209	return res;
210}
211
212static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
213{
214	struct inode *inode = NULL;
215
216	spin_lock(&delegation->lock);
217	if (delegation->inode != NULL)
218		inode = igrab(delegation->inode);
219	spin_unlock(&delegation->lock);
220	return inode;
221}
222
223static struct nfs_delegation *
224nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
225{
226	struct nfs_delegation *ret = NULL;
227	struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
228
229	if (delegation == NULL)
230		goto out;
231	spin_lock(&delegation->lock);
232	if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
233		ret = delegation;
234	spin_unlock(&delegation->lock);
235out:
236	return ret;
237}
238
239static struct nfs_delegation *
240nfs_start_delegation_return(struct nfs_inode *nfsi)
241{
242	struct nfs_delegation *delegation;
243
244	rcu_read_lock();
245	delegation = nfs_start_delegation_return_locked(nfsi);
246	rcu_read_unlock();
247	return delegation;
248}
249
250static void
251nfs_abort_delegation_return(struct nfs_delegation *delegation,
252		struct nfs_client *clp)
253{
254
255	spin_lock(&delegation->lock);
256	clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
257	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
258	spin_unlock(&delegation->lock);
259	set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
260}
261
262static struct nfs_delegation *
263nfs_detach_delegation_locked(struct nfs_inode *nfsi,
264		struct nfs_delegation *delegation,
265		struct nfs_client *clp)
266{
267	struct nfs_delegation *deleg_cur =
268		rcu_dereference_protected(nfsi->delegation,
269				lockdep_is_held(&clp->cl_lock));
270
271	if (deleg_cur == NULL || delegation != deleg_cur)
272		return NULL;
273
274	spin_lock(&delegation->lock);
275	set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
276	list_del_rcu(&delegation->super_list);
277	delegation->inode = NULL;
278	rcu_assign_pointer(nfsi->delegation, NULL);
279	spin_unlock(&delegation->lock);
280	return delegation;
281}
282
283static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
284		struct nfs_delegation *delegation,
285		struct nfs_server *server)
286{
287	struct nfs_client *clp = server->nfs_client;
288
289	spin_lock(&clp->cl_lock);
290	delegation = nfs_detach_delegation_locked(nfsi, delegation, clp);
291	spin_unlock(&clp->cl_lock);
292	return delegation;
293}
294
295static struct nfs_delegation *
296nfs_inode_detach_delegation(struct inode *inode)
297{
298	struct nfs_inode *nfsi = NFS_I(inode);
299	struct nfs_server *server = NFS_SERVER(inode);
300	struct nfs_delegation *delegation;
301
302	delegation = nfs_start_delegation_return(nfsi);
303	if (delegation == NULL)
304		return NULL;
305	return nfs_detach_delegation(nfsi, delegation, server);
306}
307
308static void
309nfs_update_inplace_delegation(struct nfs_delegation *delegation,
310		const struct nfs_delegation *update)
311{
312	if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
313		delegation->stateid.seqid = update->stateid.seqid;
314		smp_wmb();
315		delegation->type = update->type;
316	}
317}
318
319/**
320 * nfs_inode_set_delegation - set up a delegation on an inode
321 * @inode: inode to which delegation applies
322 * @cred: cred to use for subsequent delegation processing
323 * @res: new delegation state from server
324 *
325 * Returns zero on success, or a negative errno value.
326 */
327int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
328{
329	struct nfs_server *server = NFS_SERVER(inode);
330	struct nfs_client *clp = server->nfs_client;
331	struct nfs_inode *nfsi = NFS_I(inode);
332	struct nfs_delegation *delegation, *old_delegation;
333	struct nfs_delegation *freeme = NULL;
334	int status = 0;
335
336	delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
337	if (delegation == NULL)
338		return -ENOMEM;
339	nfs4_stateid_copy(&delegation->stateid, &res->delegation);
340	delegation->type = res->delegation_type;
341	delegation->pagemod_limit = res->pagemod_limit;
342	delegation->change_attr = inode->i_version;
343	delegation->cred = get_rpccred(cred);
344	delegation->inode = inode;
345	delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
346	spin_lock_init(&delegation->lock);
347
348	spin_lock(&clp->cl_lock);
349	old_delegation = rcu_dereference_protected(nfsi->delegation,
350					lockdep_is_held(&clp->cl_lock));
351	if (old_delegation != NULL) {
352		/* Is this an update of the existing delegation? */
353		if (nfs4_stateid_match_other(&old_delegation->stateid,
354					&delegation->stateid)) {
355			nfs_update_inplace_delegation(old_delegation,
356					delegation);
357			goto out;
358		}
359		/*
360		 * Deal with broken servers that hand out two
361		 * delegations for the same file.
362		 * Allow for upgrades to a WRITE delegation, but
363		 * nothing else.
364		 */
365		dfprintk(FILE, "%s: server %s handed out "
366				"a duplicate delegation!\n",
367				__func__, clp->cl_hostname);
368		if (delegation->type == old_delegation->type ||
369		    !(delegation->type & FMODE_WRITE)) {
370			freeme = delegation;
371			delegation = NULL;
372			goto out;
373		}
374		if (test_and_set_bit(NFS_DELEGATION_RETURNING,
375					&old_delegation->flags))
376			goto out;
377		freeme = nfs_detach_delegation_locked(nfsi,
378				old_delegation, clp);
379		if (freeme == NULL)
380			goto out;
381	}
382	list_add_tail_rcu(&delegation->super_list, &server->delegations);
383	rcu_assign_pointer(nfsi->delegation, delegation);
384	delegation = NULL;
385
386	/* Ensure we revalidate the attributes and page cache! */
387	spin_lock(&inode->i_lock);
388	nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
389	spin_unlock(&inode->i_lock);
390	trace_nfs4_set_delegation(inode, res->delegation_type);
391
392out:
393	spin_unlock(&clp->cl_lock);
394	if (delegation != NULL)
395		nfs_free_delegation(delegation);
396	if (freeme != NULL)
397		nfs_do_return_delegation(inode, freeme, 0);
398	return status;
399}
400
401/*
402 * Basic procedure for returning a delegation to the server
403 */
404static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
405{
406	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
407	struct nfs_inode *nfsi = NFS_I(inode);
408	int err = 0;
409
410	if (delegation == NULL)
411		return 0;
412	do {
413		if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
414			break;
415		err = nfs_delegation_claim_opens(inode, &delegation->stateid,
416				delegation->type);
417		if (!issync || err != -EAGAIN)
418			break;
419		/*
420		 * Guard against state recovery
421		 */
422		err = nfs4_wait_clnt_recover(clp);
423	} while (err == 0);
424
425	if (err) {
426		nfs_abort_delegation_return(delegation, clp);
427		goto out;
428	}
429	if (!nfs_detach_delegation(nfsi, delegation, NFS_SERVER(inode)))
430		goto out;
431
432	err = nfs_do_return_delegation(inode, delegation, issync);
433out:
434	return err;
435}
436
437static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
438{
439	bool ret = false;
440
441	if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
442		goto out;
443	if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
444		ret = true;
445	if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
446		struct inode *inode;
447
448		spin_lock(&delegation->lock);
449		inode = delegation->inode;
450		if (inode && list_empty(&NFS_I(inode)->open_files))
451			ret = true;
452		spin_unlock(&delegation->lock);
453	}
454out:
455	return ret;
456}
457
458/**
459 * nfs_client_return_marked_delegations - return previously marked delegations
460 * @clp: nfs_client to process
461 *
462 * Note that this function is designed to be called by the state
463 * manager thread. For this reason, it cannot flush the dirty data,
464 * since that could deadlock in case of a state recovery error.
465 *
466 * Returns zero on success, or a negative errno value.
467 */
468int nfs_client_return_marked_delegations(struct nfs_client *clp)
469{
470	struct nfs_delegation *delegation;
471	struct nfs_server *server;
472	struct inode *inode;
473	int err = 0;
474
475restart:
476	rcu_read_lock();
477	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
478		list_for_each_entry_rcu(delegation, &server->delegations,
479								super_list) {
480			if (!nfs_delegation_need_return(delegation))
481				continue;
482			if (!nfs_sb_active(server->super))
483				continue;
484			inode = nfs_delegation_grab_inode(delegation);
485			if (inode == NULL) {
486				rcu_read_unlock();
487				nfs_sb_deactive(server->super);
488				goto restart;
489			}
490			delegation = nfs_start_delegation_return_locked(NFS_I(inode));
491			rcu_read_unlock();
492
493			err = nfs_end_delegation_return(inode, delegation, 0);
494			iput(inode);
495			nfs_sb_deactive(server->super);
496			if (!err)
497				goto restart;
498			set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
499			return err;
500		}
501	}
502	rcu_read_unlock();
503	return 0;
504}
505
506/**
507 * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
508 * @inode: inode to process
509 *
510 * Does not protect against delegation reclaims, therefore really only safe
511 * to be called from nfs4_clear_inode().
512 */
513void nfs_inode_return_delegation_noreclaim(struct inode *inode)
514{
515	struct nfs_delegation *delegation;
516
517	delegation = nfs_inode_detach_delegation(inode);
518	if (delegation != NULL)
519		nfs_do_return_delegation(inode, delegation, 1);
520}
521
522/**
523 * nfs_inode_return_delegation - synchronously return a delegation
524 * @inode: inode to process
525 *
526 * This routine will always flush any dirty data to disk on the
527 * assumption that if we need to return the delegation, then
528 * we should stop caching.
529 *
530 * Returns zero on success, or a negative errno value.
531 */
532int nfs4_inode_return_delegation(struct inode *inode)
533{
534	struct nfs_inode *nfsi = NFS_I(inode);
535	struct nfs_delegation *delegation;
536	int err = 0;
537
538	nfs_wb_all(inode);
539	delegation = nfs_start_delegation_return(nfsi);
540	if (delegation != NULL)
541		err = nfs_end_delegation_return(inode, delegation, 1);
542	return err;
543}
544
545static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
546		struct nfs_delegation *delegation)
547{
548	set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
549	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
550}
551
552static void nfs_mark_return_delegation(struct nfs_server *server,
553		struct nfs_delegation *delegation)
554{
555	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
556	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
557}
558
559static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
560{
561	struct nfs_delegation *delegation;
562	bool ret = false;
563
564	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
565		nfs_mark_return_delegation(server, delegation);
566		ret = true;
567	}
568	return ret;
569}
570
571static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
572{
573	struct nfs_server *server;
574
575	rcu_read_lock();
576	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
577		nfs_server_mark_return_all_delegations(server);
578	rcu_read_unlock();
579}
580
581static void nfs_delegation_run_state_manager(struct nfs_client *clp)
582{
583	if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
584		nfs4_schedule_state_manager(clp);
585}
586
587/**
588 * nfs_expire_all_delegations
589 * @clp: client to process
590 *
591 */
592void nfs_expire_all_delegations(struct nfs_client *clp)
593{
594	nfs_client_mark_return_all_delegations(clp);
595	nfs_delegation_run_state_manager(clp);
596}
597
598/**
599 * nfs_super_return_all_delegations - return delegations for one superblock
600 * @sb: sb to process
601 *
602 */
603void nfs_server_return_all_delegations(struct nfs_server *server)
604{
605	struct nfs_client *clp = server->nfs_client;
606	bool need_wait;
607
608	if (clp == NULL)
609		return;
610
611	rcu_read_lock();
612	need_wait = nfs_server_mark_return_all_delegations(server);
613	rcu_read_unlock();
614
615	if (need_wait) {
616		nfs4_schedule_state_manager(clp);
617		nfs4_wait_clnt_recover(clp);
618	}
619}
620
621static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
622						 fmode_t flags)
623{
624	struct nfs_delegation *delegation;
625
626	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
627		if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
628			continue;
629		if (delegation->type & flags)
630			nfs_mark_return_if_closed_delegation(server, delegation);
631	}
632}
633
634static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
635							fmode_t flags)
636{
637	struct nfs_server *server;
638
639	rcu_read_lock();
640	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
641		nfs_mark_return_unused_delegation_types(server, flags);
642	rcu_read_unlock();
643}
644
645static void nfs_revoke_delegation(struct inode *inode)
646{
647	struct nfs_delegation *delegation;
648	rcu_read_lock();
649	delegation = rcu_dereference(NFS_I(inode)->delegation);
650	if (delegation != NULL) {
651		set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
652		nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
653	}
654	rcu_read_unlock();
655}
656
657void nfs_remove_bad_delegation(struct inode *inode)
658{
659	struct nfs_delegation *delegation;
660
661	nfs_revoke_delegation(inode);
662	delegation = nfs_inode_detach_delegation(inode);
663	if (delegation) {
664		nfs_inode_find_state_and_recover(inode, &delegation->stateid);
665		nfs_free_delegation(delegation);
666	}
667}
668EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
669
670/**
671 * nfs_expire_unused_delegation_types
672 * @clp: client to process
673 * @flags: delegation types to expire
674 *
675 */
676void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
677{
678	nfs_client_mark_return_unused_delegation_types(clp, flags);
679	nfs_delegation_run_state_manager(clp);
680}
681
682static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
683{
684	struct nfs_delegation *delegation;
685
686	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
687		if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
688			continue;
689		nfs_mark_return_if_closed_delegation(server, delegation);
690	}
691}
692
693/**
694 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
695 * @clp: nfs_client to process
696 *
697 */
698void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
699{
700	struct nfs_server *server;
701
702	rcu_read_lock();
703	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
704		nfs_mark_return_unreferenced_delegations(server);
705	rcu_read_unlock();
706
707	nfs_delegation_run_state_manager(clp);
708}
709
710/**
711 * nfs_async_inode_return_delegation - asynchronously return a delegation
712 * @inode: inode to process
713 * @stateid: state ID information
714 *
715 * Returns zero on success, or a negative errno value.
716 */
717int nfs_async_inode_return_delegation(struct inode *inode,
718				      const nfs4_stateid *stateid)
719{
720	struct nfs_server *server = NFS_SERVER(inode);
721	struct nfs_client *clp = server->nfs_client;
722	struct nfs_delegation *delegation;
723
724	rcu_read_lock();
725	delegation = rcu_dereference(NFS_I(inode)->delegation);
726	if (delegation == NULL)
727		goto out_enoent;
728	if (stateid != NULL &&
729	    !clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
730		goto out_enoent;
731	nfs_mark_return_delegation(server, delegation);
732	rcu_read_unlock();
733
734	nfs_delegation_run_state_manager(clp);
735	return 0;
736out_enoent:
737	rcu_read_unlock();
738	return -ENOENT;
739}
740
741static struct inode *
742nfs_delegation_find_inode_server(struct nfs_server *server,
743				 const struct nfs_fh *fhandle)
744{
745	struct nfs_delegation *delegation;
746	struct inode *res = NULL;
747
748	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
749		spin_lock(&delegation->lock);
750		if (delegation->inode != NULL &&
751		    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
752			res = igrab(delegation->inode);
753		}
754		spin_unlock(&delegation->lock);
755		if (res != NULL)
756			break;
757	}
758	return res;
759}
760
761/**
762 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
763 * @clp: client state handle
764 * @fhandle: filehandle from a delegation recall
765 *
766 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
767 * cannot be found.
768 */
769struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
770					const struct nfs_fh *fhandle)
771{
772	struct nfs_server *server;
773	struct inode *res = NULL;
774
775	rcu_read_lock();
776	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
777		res = nfs_delegation_find_inode_server(server, fhandle);
778		if (res != NULL)
779			break;
780	}
781	rcu_read_unlock();
782	return res;
783}
784
785static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
786{
787	struct nfs_delegation *delegation;
788
789	list_for_each_entry_rcu(delegation, &server->delegations, super_list)
790		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
791}
792
793/**
794 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
795 * @clp: nfs_client to process
796 *
797 */
798void nfs_delegation_mark_reclaim(struct nfs_client *clp)
799{
800	struct nfs_server *server;
801
802	rcu_read_lock();
803	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
804		nfs_delegation_mark_reclaim_server(server);
805	rcu_read_unlock();
806}
807
808/**
809 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
810 * @clp: nfs_client to process
811 *
812 */
813void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
814{
815	struct nfs_delegation *delegation;
816	struct nfs_server *server;
817	struct inode *inode;
818
819restart:
820	rcu_read_lock();
821	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
822		list_for_each_entry_rcu(delegation, &server->delegations,
823								super_list) {
824			if (test_bit(NFS_DELEGATION_RETURNING,
825						&delegation->flags))
826				continue;
827			if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
828						&delegation->flags) == 0)
829				continue;
830			if (!nfs_sb_active(server->super))
831				continue;
832			inode = nfs_delegation_grab_inode(delegation);
833			if (inode == NULL) {
834				rcu_read_unlock();
835				nfs_sb_deactive(server->super);
836				goto restart;
837			}
838			delegation = nfs_start_delegation_return_locked(NFS_I(inode));
839			rcu_read_unlock();
840			if (delegation != NULL) {
841				delegation = nfs_detach_delegation(NFS_I(inode),
842					delegation, server);
843				if (delegation != NULL)
844					nfs_free_delegation(delegation);
845			}
846			iput(inode);
847			nfs_sb_deactive(server->super);
848			goto restart;
849		}
850	}
851	rcu_read_unlock();
852}
853
854/**
855 * nfs_delegations_present - check for existence of delegations
856 * @clp: client state handle
857 *
858 * Returns one if there are any nfs_delegation structures attached
859 * to this nfs_client.
860 */
861int nfs_delegations_present(struct nfs_client *clp)
862{
863	struct nfs_server *server;
864	int ret = 0;
865
866	rcu_read_lock();
867	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
868		if (!list_empty(&server->delegations)) {
869			ret = 1;
870			break;
871		}
872	rcu_read_unlock();
873	return ret;
874}
875
876/**
877 * nfs4_copy_delegation_stateid - Copy inode's state ID information
878 * @dst: stateid data structure to fill in
879 * @inode: inode to check
880 * @flags: delegation type requirement
881 *
882 * Returns "true" and fills in "dst->data" * if inode had a delegation,
883 * otherwise "false" is returned.
884 */
885bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
886		fmode_t flags)
887{
888	struct nfs_inode *nfsi = NFS_I(inode);
889	struct nfs_delegation *delegation;
890	bool ret;
891
892	flags &= FMODE_READ|FMODE_WRITE;
893	rcu_read_lock();
894	delegation = rcu_dereference(nfsi->delegation);
895	ret = (delegation != NULL && (delegation->type & flags) == flags);
896	if (ret) {
897		nfs4_stateid_copy(dst, &delegation->stateid);
898		nfs_mark_delegation_referenced(delegation);
899	}
900	rcu_read_unlock();
901	return ret;
902}
903
904/**
905 * nfs4_delegation_flush_on_close - Check if we must flush file on close
906 * @inode: inode to check
907 *
908 * This function checks the number of outstanding writes to the file
909 * against the delegation 'space_limit' field to see if
910 * the spec requires us to flush the file on close.
911 */
912bool nfs4_delegation_flush_on_close(const struct inode *inode)
913{
914	struct nfs_inode *nfsi = NFS_I(inode);
915	struct nfs_delegation *delegation;
916	bool ret = true;
917
918	rcu_read_lock();
919	delegation = rcu_dereference(nfsi->delegation);
920	if (delegation == NULL || !(delegation->type & FMODE_WRITE))
921		goto out;
922	if (nfsi->nrequests < delegation->pagemod_limit)
923		ret = false;
924out:
925	rcu_read_unlock();
926	return ret;
927}
928