This source file includes following definitions.
- is_session_dead
- mark_session_dead_locked
- is_client_expired
- get_client_locked
- renew_client_locked
- put_client_renew_locked
- put_client_renew
- nfsd4_get_session_locked
- nfsd4_put_session_locked
- nfsd4_put_session
- find_blocked_lock
- find_or_allocate_block
- free_blocked_lock
- remove_blocked_locks
- nfsd4_cb_notify_lock_prepare
- nfsd4_cb_notify_lock_done
- nfsd4_cb_notify_lock_release
- nfs4_get_stateowner
- same_owner_str
- find_openstateowner_str_locked
- find_openstateowner_str
- opaque_hashval
- nfsd4_free_file_rcu
- put_nfs4_file
- __nfs4_get_fd
- find_writeable_file_locked
- find_writeable_file
- find_readable_file_locked
- find_readable_file
- find_any_file
- ownerstr_hashval
- nfsd_fh_hashval
- file_hashval
- __nfs4_file_get_access
- nfs4_file_get_access
- nfs4_file_check_deny
- __nfs4_file_put_access
- nfs4_file_put_access
- alloc_clnt_odstate
- hash_clnt_odstate_locked
- get_clnt_odstate
- put_clnt_odstate
- find_or_hash_clnt_odstate
- nfs4_alloc_stid
- nfs4_init_cp_state
- nfs4_free_cp_state
- nfs4_alloc_open_stateid
- nfs4_free_deleg
- delegation_blocked
- block_delegations
- alloc_init_deleg
- nfs4_put_stid
- nfs4_inc_and_copy_stateid
- put_deleg_file
- nfs4_unlock_deleg_lease
- destroy_unhashed_deleg
- nfs4_unhash_stid
- nfs4_delegation_exists
- hash_delegation_locked
- unhash_delegation_locked
- destroy_delegation
- revoke_delegation
- clientid_hashval
- clientstr_hashval
- bmap_to_share_mode
- set_access
- clear_access
- test_access
- set_deny
- clear_deny
- test_deny
- nfs4_access_to_omode
- recalculate_deny_mode
- reset_union_bmap_deny
- release_all_access
- nfs4_free_stateowner
- nfs4_put_stateowner
- unhash_ol_stateid
- nfs4_free_ol_stateid
- nfs4_free_lock_stateid
- put_ol_stateid_locked
- unhash_lock_stateid
- release_lock_stateid
- unhash_lockowner_locked
- free_ol_stateid_reaplist
- release_open_stateid_locks
- unhash_open_stateid
- release_open_stateid
- unhash_openowner_locked
- release_last_closed_stateid
- release_openowner
- hash_sessionid
- dump_sessionid
- dump_sessionid
- nfsd4_bump_seqid
- gen_sessionid
- free_session_slots
- slot_bytes
- nfsd4_get_drc_mem
- nfsd4_put_drc_mem
- alloc_session
- free_conn
- nfsd4_conn_lost
- alloc_conn
- __nfsd4_hash_conn
- nfsd4_hash_conn
- nfsd4_register_conn
- nfsd4_init_conn
- alloc_conn_from_crses
- nfsd4_del_conns
- __free_session
- free_session
- init_session
- __find_in_sessionid_hashtbl
- find_in_sessionid_hashtbl
- unhash_session
- STALE_CLIENTID
- alloc_client
- __free_client
- drop_client
- free_client
- unhash_client_locked
- unhash_client
- mark_client_expired_locked
- __destroy_client
- destroy_client
- inc_reclaim_complete
- expire_client
- copy_verf
- copy_clid
- copy_cred
- compare_blob
- same_verf
- same_clid
- groups_equal
- is_gss_cred
- same_creds
- svc_rqst_integrity_protected
- nfsd4_mach_creds_match
- gen_confirm
- gen_clid
- find_stateid_locked
- find_stateid_by_type
- get_nfsdfs_clp
- seq_quote_mem
- client_info_show
- client_info_open
- states_start
- states_next
- states_stop
- nfs4_show_superblock
- nfs4_show_owner
- nfs4_show_open
- nfs4_show_lock
- nfs4_show_deleg
- nfs4_show_layout
- states_show
- client_states_open
- client_opens_release
- force_expire_client
- client_ctl_write
- create_client
- add_clp_to_name_tree
- find_clp_in_name_tree
- add_to_unconfirmed
- move_to_confirmed
- find_client_in_id_table
- find_confirmed_client
- find_unconfirmed_client
- clp_used_exchangeid
- find_confirmed_client_by_name
- find_unconfirmed_client_by_name
- gen_callback
- nfsd4_store_cache_entry
- nfsd4_enc_sequence_replay
- nfsd4_replay_cache_entry
- nfsd4_set_ex_flags
- client_has_openowners
- client_has_state
- copy_impl_id
- nfsd4_exchange_id
- check_slot_seqid
- nfsd4_cache_create_session
- nfsd4_replay_create_session
- check_forechannel_attrs
- check_backchannel_attrs
- nfsd4_check_cb_sec
- nfsd4_create_session
- nfsd4_map_bcts_dir
- nfsd4_backchannel_ctl
- nfsd4_bind_conn_to_session
- nfsd4_compound_in_session
- nfsd4_destroy_session
- __nfsd4_find_conn
- nfsd4_sequence_check_conn
- nfsd4_session_too_many_ops
- nfsd4_request_too_big
- replay_matches_cache
- nfsd4_sequence
- nfsd4_sequence_done
- nfsd4_destroy_clientid
- nfsd4_reclaim_complete
- nfsd4_setclientid
- nfsd4_setclientid_confirm
- nfsd4_alloc_file
- nfsd4_init_file
- nfsd4_free_slabs
- nfsd4_init_slabs
- init_nfs4_replay
- nfsd4_cstate_assign_replay
- nfsd4_cstate_clear_replay
- alloc_stateowner
- hash_openowner
- nfs4_unhash_openowner
- nfs4_free_openowner
- nfsd4_find_existing_open
- nfsd4_verify_open_stid
- nfsd4_lock_ol_stateid
- nfsd4_find_and_lock_existing_open
- alloc_init_open_stateowner
- init_open_stateid
- move_to_close_lru
- find_file_locked
- find_file
- find_or_add_file
- nfs4_share_conflict
- nfsd4_cb_recall_prepare
- nfsd4_cb_recall_done
- nfsd4_cb_recall_release
- nfsd_break_one_deleg
- nfsd_break_deleg_cb
- nfsd_change_deleg_cb
- nfsd4_check_seqid
- lookup_clientid
- nfsd4_process_open1
- nfs4_check_delegmode
- share_access_to_flags
- find_deleg_stateid
- nfsd4_is_deleg_cur
- nfs4_check_deleg
- nfs4_access_to_access
- nfsd4_truncate
- nfs4_get_vfs_file
- nfs4_upgrade_open
- nfsd4_cb_channel_good
- nfs4_alloc_init_lease
- nfs4_set_delegation
- nfsd4_open_deleg_none_ext
- nfs4_open_delegation
- nfsd4_deleg_xgrade_none_ext
- nfsd4_process_open2
- nfsd4_cleanup_open_state
- nfsd4_renew
- nfsd4_end_grace
- clients_still_reclaiming
- nfs4_laundromat
- laundromat_main
- nfs4_check_fh
- access_permit_read
- access_permit_write
- nfs4_check_openmode
- check_special_stateids
- grace_disallows_io
- check_stateid_generation
- nfsd4_stid_check_stateid_generation
- nfsd4_check_openowner_confirmed
- nfsd4_validate_stateid
- nfsd4_lookup_stateid
- nfs4_find_file
- nfs4_check_olstateid
- nfs4_check_file
- nfs4_preprocess_stateid_op
- nfsd4_test_stateid
- nfsd4_free_lock_stateid
- nfsd4_free_stateid
- setlkflg
- nfs4_seqid_op_checks
- nfs4_preprocess_seqid_op
- nfs4_preprocess_confirmed_seqid_op
- nfsd4_open_confirm
- nfs4_stateid_downgrade_bit
- nfs4_stateid_downgrade
- nfsd4_open_downgrade
- nfsd4_close_open_stateid
- nfsd4_close
- nfsd4_delegreturn
- end_offset
- last_byte_offset
- nfs4_transform_lock_offset
- nfsd4_fl_get_owner
- nfsd4_fl_put_owner
- nfsd4_lm_notify
- nfs4_set_lock_denied
- find_lockowner_str_locked
- find_lockowner_str
- nfs4_unhash_lockowner
- nfs4_free_lockowner
- alloc_init_lock_stateowner
- find_lock_stateid
- init_lock_stateid
- find_or_create_lock_stateid
- check_lock_length
- get_lock_access
- lookup_or_create_lock_state
- nfsd4_lock
- nfsd_test_lock
- nfsd4_lockt
- nfsd4_locku
- check_for_locks
- nfsd4_release_lockowner
- alloc_reclaim
- nfs4_has_reclaimed_state
- nfs4_client_to_reclaim
- nfs4_remove_reclaim_record
- nfs4_release_reclaim
- nfsd4_find_reclaim_client
- nfs4_check_open_reclaim
- put_client
- nfsd_find_client
- nfsd_inject_print_clients
- nfsd_inject_forget_client
- nfsd_inject_forget_clients
- nfsd_print_count
- nfsd_inject_add_lock_to_list
- nfsd_foreach_client_lock
- nfsd_collect_client_locks
- nfsd_print_client_locks
- nfsd_inject_print_locks
- nfsd_reap_locks
- nfsd_inject_forget_client_locks
- nfsd_inject_forget_locks
- nfsd_foreach_client_openowner
- nfsd_print_client_openowners
- nfsd_collect_client_openowners
- nfsd_inject_print_openowners
- nfsd_reap_openowners
- nfsd_inject_forget_client_openowners
- nfsd_inject_forget_openowners
- nfsd_find_all_delegations
- nfsd_print_client_delegations
- nfsd_inject_print_delegations
- nfsd_forget_delegations
- nfsd_inject_forget_client_delegations
- nfsd_inject_forget_delegations
- nfsd_recall_delegations
- nfsd_inject_recall_client_delegations
- nfsd_inject_recall_delegations
- set_max_delegations
- nfs4_state_create_net
- nfs4_state_destroy_net
- nfs4_state_start_net
- nfs4_state_start
- nfs4_state_shutdown_net
- nfs4_state_shutdown
- get_stateid
- put_stateid
- clear_current_stateid
- nfsd4_set_opendowngradestateid
- nfsd4_set_openstateid
- nfsd4_set_closestateid
- nfsd4_set_lockstateid
- nfsd4_get_opendowngradestateid
- nfsd4_get_delegreturnstateid
- nfsd4_get_freestateid
- nfsd4_get_setattrstateid
- nfsd4_get_closestateid
- nfsd4_get_lockustateid
- nfsd4_get_readstateid
- nfsd4_get_writestateid
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include "xdr4.h"
47 #include "xdr4cb.h"
48 #include "vfs.h"
49 #include "current_stateid.h"
50
51 #include "netns.h"
52 #include "pnfs.h"
53 #include "filecache.h"
54
55 #define NFSDDBG_FACILITY NFSDDBG_PROC
56
57 #define all_ones {{~0,~0},~0}
58 static const stateid_t one_stateid = {
59 .si_generation = ~0,
60 .si_opaque = all_ones,
61 };
62 static const stateid_t zero_stateid = {
63
64 };
65 static const stateid_t currentstateid = {
66 .si_generation = 1,
67 };
68 static const stateid_t close_stateid = {
69 .si_generation = 0xffffffffU,
70 };
71
72 static u64 current_sessionid = 1;
73
74 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
75 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
76 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
77 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
78
79
80 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
81 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
82 void nfsd4_end_grace(struct nfsd_net *nn);
83
84
85
86
87
88
89
90
91 static DEFINE_SPINLOCK(state_lock);
92
93 enum nfsd4_st_mutex_lock_subclass {
94 OPEN_STATEID_MUTEX = 0,
95 LOCK_STATEID_MUTEX = 1,
96 };
97
98
99
100
101
102 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
103
104
105
106
107
108
109 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
110
111 static struct kmem_cache *client_slab;
112 static struct kmem_cache *openowner_slab;
113 static struct kmem_cache *lockowner_slab;
114 static struct kmem_cache *file_slab;
115 static struct kmem_cache *stateid_slab;
116 static struct kmem_cache *deleg_slab;
117 static struct kmem_cache *odstate_slab;
118
119 static void free_session(struct nfsd4_session *);
120
121 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
122 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
123
124 static bool is_session_dead(struct nfsd4_session *ses)
125 {
126 return ses->se_flags & NFS4_SESSION_DEAD;
127 }
128
129 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
130 {
131 if (atomic_read(&ses->se_ref) > ref_held_by_me)
132 return nfserr_jukebox;
133 ses->se_flags |= NFS4_SESSION_DEAD;
134 return nfs_ok;
135 }
136
137 static bool is_client_expired(struct nfs4_client *clp)
138 {
139 return clp->cl_time == 0;
140 }
141
142 static __be32 get_client_locked(struct nfs4_client *clp)
143 {
144 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
145
146 lockdep_assert_held(&nn->client_lock);
147
148 if (is_client_expired(clp))
149 return nfserr_expired;
150 atomic_inc(&clp->cl_rpc_users);
151 return nfs_ok;
152 }
153
154
155 static inline void
156 renew_client_locked(struct nfs4_client *clp)
157 {
158 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
159
160 if (is_client_expired(clp)) {
161 WARN_ON(1);
162 printk("%s: client (clientid %08x/%08x) already expired\n",
163 __func__,
164 clp->cl_clientid.cl_boot,
165 clp->cl_clientid.cl_id);
166 return;
167 }
168
169 dprintk("renewing client (clientid %08x/%08x)\n",
170 clp->cl_clientid.cl_boot,
171 clp->cl_clientid.cl_id);
172 list_move_tail(&clp->cl_lru, &nn->client_lru);
173 clp->cl_time = get_seconds();
174 }
175
176 static void put_client_renew_locked(struct nfs4_client *clp)
177 {
178 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
179
180 lockdep_assert_held(&nn->client_lock);
181
182 if (!atomic_dec_and_test(&clp->cl_rpc_users))
183 return;
184 if (!is_client_expired(clp))
185 renew_client_locked(clp);
186 else
187 wake_up_all(&expiry_wq);
188 }
189
190 static void put_client_renew(struct nfs4_client *clp)
191 {
192 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
193
194 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
195 return;
196 if (!is_client_expired(clp))
197 renew_client_locked(clp);
198 else
199 wake_up_all(&expiry_wq);
200 spin_unlock(&nn->client_lock);
201 }
202
203 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
204 {
205 __be32 status;
206
207 if (is_session_dead(ses))
208 return nfserr_badsession;
209 status = get_client_locked(ses->se_client);
210 if (status)
211 return status;
212 atomic_inc(&ses->se_ref);
213 return nfs_ok;
214 }
215
216 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
217 {
218 struct nfs4_client *clp = ses->se_client;
219 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
220
221 lockdep_assert_held(&nn->client_lock);
222
223 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
224 free_session(ses);
225 put_client_renew_locked(clp);
226 }
227
228 static void nfsd4_put_session(struct nfsd4_session *ses)
229 {
230 struct nfs4_client *clp = ses->se_client;
231 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
232
233 spin_lock(&nn->client_lock);
234 nfsd4_put_session_locked(ses);
235 spin_unlock(&nn->client_lock);
236 }
237
238 static struct nfsd4_blocked_lock *
239 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
240 struct nfsd_net *nn)
241 {
242 struct nfsd4_blocked_lock *cur, *found = NULL;
243
244 spin_lock(&nn->blocked_locks_lock);
245 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
246 if (fh_match(fh, &cur->nbl_fh)) {
247 list_del_init(&cur->nbl_list);
248 list_del_init(&cur->nbl_lru);
249 found = cur;
250 break;
251 }
252 }
253 spin_unlock(&nn->blocked_locks_lock);
254 if (found)
255 locks_delete_block(&found->nbl_lock);
256 return found;
257 }
258
259 static struct nfsd4_blocked_lock *
260 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
261 struct nfsd_net *nn)
262 {
263 struct nfsd4_blocked_lock *nbl;
264
265 nbl = find_blocked_lock(lo, fh, nn);
266 if (!nbl) {
267 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
268 if (nbl) {
269 INIT_LIST_HEAD(&nbl->nbl_list);
270 INIT_LIST_HEAD(&nbl->nbl_lru);
271 fh_copy_shallow(&nbl->nbl_fh, fh);
272 locks_init_lock(&nbl->nbl_lock);
273 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
274 &nfsd4_cb_notify_lock_ops,
275 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
276 }
277 }
278 return nbl;
279 }
280
281 static void
282 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
283 {
284 locks_delete_block(&nbl->nbl_lock);
285 locks_release_private(&nbl->nbl_lock);
286 kfree(nbl);
287 }
288
289 static void
290 remove_blocked_locks(struct nfs4_lockowner *lo)
291 {
292 struct nfs4_client *clp = lo->lo_owner.so_client;
293 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
294 struct nfsd4_blocked_lock *nbl;
295 LIST_HEAD(reaplist);
296
297
298 spin_lock(&nn->blocked_locks_lock);
299 while (!list_empty(&lo->lo_blocked)) {
300 nbl = list_first_entry(&lo->lo_blocked,
301 struct nfsd4_blocked_lock,
302 nbl_list);
303 list_del_init(&nbl->nbl_list);
304 list_move(&nbl->nbl_lru, &reaplist);
305 }
306 spin_unlock(&nn->blocked_locks_lock);
307
308
309 while (!list_empty(&reaplist)) {
310 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
311 nbl_lru);
312 list_del_init(&nbl->nbl_lru);
313 free_blocked_lock(nbl);
314 }
315 }
316
317 static void
318 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
319 {
320 struct nfsd4_blocked_lock *nbl = container_of(cb,
321 struct nfsd4_blocked_lock, nbl_cb);
322 locks_delete_block(&nbl->nbl_lock);
323 }
324
325 static int
326 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
327 {
328
329
330
331
332
333 switch (task->tk_status) {
334 case -NFS4ERR_DELAY:
335 rpc_delay(task, 1 * HZ);
336 return 0;
337 default:
338 return 1;
339 }
340 }
341
342 static void
343 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
344 {
345 struct nfsd4_blocked_lock *nbl = container_of(cb,
346 struct nfsd4_blocked_lock, nbl_cb);
347
348 free_blocked_lock(nbl);
349 }
350
351 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
352 .prepare = nfsd4_cb_notify_lock_prepare,
353 .done = nfsd4_cb_notify_lock_done,
354 .release = nfsd4_cb_notify_lock_release,
355 };
356
357 static inline struct nfs4_stateowner *
358 nfs4_get_stateowner(struct nfs4_stateowner *sop)
359 {
360 atomic_inc(&sop->so_count);
361 return sop;
362 }
363
364 static int
365 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
366 {
367 return (sop->so_owner.len == owner->len) &&
368 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
369 }
370
371 static struct nfs4_openowner *
372 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
373 struct nfs4_client *clp)
374 {
375 struct nfs4_stateowner *so;
376
377 lockdep_assert_held(&clp->cl_lock);
378
379 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
380 so_strhash) {
381 if (!so->so_is_open_owner)
382 continue;
383 if (same_owner_str(so, &open->op_owner))
384 return openowner(nfs4_get_stateowner(so));
385 }
386 return NULL;
387 }
388
389 static struct nfs4_openowner *
390 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
391 struct nfs4_client *clp)
392 {
393 struct nfs4_openowner *oo;
394
395 spin_lock(&clp->cl_lock);
396 oo = find_openstateowner_str_locked(hashval, open, clp);
397 spin_unlock(&clp->cl_lock);
398 return oo;
399 }
400
401 static inline u32
402 opaque_hashval(const void *ptr, int nbytes)
403 {
404 unsigned char *cptr = (unsigned char *) ptr;
405
406 u32 x = 0;
407 while (nbytes--) {
408 x *= 37;
409 x += *cptr++;
410 }
411 return x;
412 }
413
414 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
415 {
416 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
417
418 kmem_cache_free(file_slab, fp);
419 }
420
421 void
422 put_nfs4_file(struct nfs4_file *fi)
423 {
424 might_lock(&state_lock);
425
426 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
427 hlist_del_rcu(&fi->fi_hash);
428 spin_unlock(&state_lock);
429 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
430 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
431 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
432 }
433 }
434
435 static struct nfsd_file *
436 __nfs4_get_fd(struct nfs4_file *f, int oflag)
437 {
438 if (f->fi_fds[oflag])
439 return nfsd_file_get(f->fi_fds[oflag]);
440 return NULL;
441 }
442
443 static struct nfsd_file *
444 find_writeable_file_locked(struct nfs4_file *f)
445 {
446 struct nfsd_file *ret;
447
448 lockdep_assert_held(&f->fi_lock);
449
450 ret = __nfs4_get_fd(f, O_WRONLY);
451 if (!ret)
452 ret = __nfs4_get_fd(f, O_RDWR);
453 return ret;
454 }
455
456 static struct nfsd_file *
457 find_writeable_file(struct nfs4_file *f)
458 {
459 struct nfsd_file *ret;
460
461 spin_lock(&f->fi_lock);
462 ret = find_writeable_file_locked(f);
463 spin_unlock(&f->fi_lock);
464
465 return ret;
466 }
467
468 static struct nfsd_file *
469 find_readable_file_locked(struct nfs4_file *f)
470 {
471 struct nfsd_file *ret;
472
473 lockdep_assert_held(&f->fi_lock);
474
475 ret = __nfs4_get_fd(f, O_RDONLY);
476 if (!ret)
477 ret = __nfs4_get_fd(f, O_RDWR);
478 return ret;
479 }
480
481 static struct nfsd_file *
482 find_readable_file(struct nfs4_file *f)
483 {
484 struct nfsd_file *ret;
485
486 spin_lock(&f->fi_lock);
487 ret = find_readable_file_locked(f);
488 spin_unlock(&f->fi_lock);
489
490 return ret;
491 }
492
493 struct nfsd_file *
494 find_any_file(struct nfs4_file *f)
495 {
496 struct nfsd_file *ret;
497
498 spin_lock(&f->fi_lock);
499 ret = __nfs4_get_fd(f, O_RDWR);
500 if (!ret) {
501 ret = __nfs4_get_fd(f, O_WRONLY);
502 if (!ret)
503 ret = __nfs4_get_fd(f, O_RDONLY);
504 }
505 spin_unlock(&f->fi_lock);
506 return ret;
507 }
508
509 static atomic_long_t num_delegations;
510 unsigned long max_delegations;
511
512
513
514
515
516
517 #define OWNER_HASH_BITS 8
518 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
519 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
520
521 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
522 {
523 unsigned int ret;
524
525 ret = opaque_hashval(ownername->data, ownername->len);
526 return ret & OWNER_HASH_MASK;
527 }
528
529
530 #define FILE_HASH_BITS 8
531 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
532
533 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
534 {
535 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
536 }
537
538 static unsigned int file_hashval(struct knfsd_fh *fh)
539 {
540 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
541 }
542
543 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
544
545 static void
546 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
547 {
548 lockdep_assert_held(&fp->fi_lock);
549
550 if (access & NFS4_SHARE_ACCESS_WRITE)
551 atomic_inc(&fp->fi_access[O_WRONLY]);
552 if (access & NFS4_SHARE_ACCESS_READ)
553 atomic_inc(&fp->fi_access[O_RDONLY]);
554 }
555
556 static __be32
557 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
558 {
559 lockdep_assert_held(&fp->fi_lock);
560
561
562 if (access & ~NFS4_SHARE_ACCESS_BOTH)
563 return nfserr_inval;
564
565
566 if ((access & fp->fi_share_deny) != 0)
567 return nfserr_share_denied;
568
569 __nfs4_file_get_access(fp, access);
570 return nfs_ok;
571 }
572
573 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
574 {
575
576 if (deny) {
577
578 if (deny & ~NFS4_SHARE_DENY_BOTH)
579 return nfserr_inval;
580
581 if ((deny & NFS4_SHARE_DENY_READ) &&
582 atomic_read(&fp->fi_access[O_RDONLY]))
583 return nfserr_share_denied;
584
585 if ((deny & NFS4_SHARE_DENY_WRITE) &&
586 atomic_read(&fp->fi_access[O_WRONLY]))
587 return nfserr_share_denied;
588 }
589 return nfs_ok;
590 }
591
592 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
593 {
594 might_lock(&fp->fi_lock);
595
596 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
597 struct nfsd_file *f1 = NULL;
598 struct nfsd_file *f2 = NULL;
599
600 swap(f1, fp->fi_fds[oflag]);
601 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
602 swap(f2, fp->fi_fds[O_RDWR]);
603 spin_unlock(&fp->fi_lock);
604 if (f1)
605 nfsd_file_put(f1);
606 if (f2)
607 nfsd_file_put(f2);
608 }
609 }
610
611 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
612 {
613 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
614
615 if (access & NFS4_SHARE_ACCESS_WRITE)
616 __nfs4_file_put_access(fp, O_WRONLY);
617 if (access & NFS4_SHARE_ACCESS_READ)
618 __nfs4_file_put_access(fp, O_RDONLY);
619 }
620
621
622
623
624
625
626
627
628 static struct nfs4_clnt_odstate *
629 alloc_clnt_odstate(struct nfs4_client *clp)
630 {
631 struct nfs4_clnt_odstate *co;
632
633 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
634 if (co) {
635 co->co_client = clp;
636 refcount_set(&co->co_odcount, 1);
637 }
638 return co;
639 }
640
641 static void
642 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
643 {
644 struct nfs4_file *fp = co->co_file;
645
646 lockdep_assert_held(&fp->fi_lock);
647 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
648 }
649
650 static inline void
651 get_clnt_odstate(struct nfs4_clnt_odstate *co)
652 {
653 if (co)
654 refcount_inc(&co->co_odcount);
655 }
656
657 static void
658 put_clnt_odstate(struct nfs4_clnt_odstate *co)
659 {
660 struct nfs4_file *fp;
661
662 if (!co)
663 return;
664
665 fp = co->co_file;
666 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
667 list_del(&co->co_perfile);
668 spin_unlock(&fp->fi_lock);
669
670 nfsd4_return_all_file_layouts(co->co_client, fp);
671 kmem_cache_free(odstate_slab, co);
672 }
673 }
674
675 static struct nfs4_clnt_odstate *
676 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
677 {
678 struct nfs4_clnt_odstate *co;
679 struct nfs4_client *cl;
680
681 if (!new)
682 return NULL;
683
684 cl = new->co_client;
685
686 spin_lock(&fp->fi_lock);
687 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
688 if (co->co_client == cl) {
689 get_clnt_odstate(co);
690 goto out;
691 }
692 }
693 co = new;
694 co->co_file = fp;
695 hash_clnt_odstate_locked(new);
696 out:
697 spin_unlock(&fp->fi_lock);
698 return co;
699 }
700
701 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
702 void (*sc_free)(struct nfs4_stid *))
703 {
704 struct nfs4_stid *stid;
705 int new_id;
706
707 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
708 if (!stid)
709 return NULL;
710
711 idr_preload(GFP_KERNEL);
712 spin_lock(&cl->cl_lock);
713
714 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
715 spin_unlock(&cl->cl_lock);
716 idr_preload_end();
717 if (new_id < 0)
718 goto out_free;
719
720 stid->sc_free = sc_free;
721 stid->sc_client = cl;
722 stid->sc_stateid.si_opaque.so_id = new_id;
723 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
724
725 refcount_set(&stid->sc_count, 1);
726 spin_lock_init(&stid->sc_lock);
727
728
729
730
731
732
733
734
735
736
737 return stid;
738 out_free:
739 kmem_cache_free(slab, stid);
740 return NULL;
741 }
742
743
744
745
746 int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
747 {
748 int new_id;
749
750 idr_preload(GFP_KERNEL);
751 spin_lock(&nn->s2s_cp_lock);
752 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, copy, 0, 0, GFP_NOWAIT);
753 spin_unlock(&nn->s2s_cp_lock);
754 idr_preload_end();
755 if (new_id < 0)
756 return 0;
757 copy->cp_stateid.si_opaque.so_id = new_id;
758 copy->cp_stateid.si_opaque.so_clid.cl_boot = nn->boot_time;
759 copy->cp_stateid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
760 return 1;
761 }
762
763 void nfs4_free_cp_state(struct nfsd4_copy *copy)
764 {
765 struct nfsd_net *nn;
766
767 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
768 spin_lock(&nn->s2s_cp_lock);
769 idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.si_opaque.so_id);
770 spin_unlock(&nn->s2s_cp_lock);
771 }
772
773 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
774 {
775 struct nfs4_stid *stid;
776
777 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
778 if (!stid)
779 return NULL;
780
781 return openlockstateid(stid);
782 }
783
784 static void nfs4_free_deleg(struct nfs4_stid *stid)
785 {
786 kmem_cache_free(deleg_slab, stid);
787 atomic_long_dec(&num_delegations);
788 }
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808 static DEFINE_SPINLOCK(blocked_delegations_lock);
809 static struct bloom_pair {
810 int entries, old_entries;
811 time_t swap_time;
812 int new;
813 DECLARE_BITMAP(set[2], 256);
814 } blocked_delegations;
815
816 static int delegation_blocked(struct knfsd_fh *fh)
817 {
818 u32 hash;
819 struct bloom_pair *bd = &blocked_delegations;
820
821 if (bd->entries == 0)
822 return 0;
823 if (seconds_since_boot() - bd->swap_time > 30) {
824 spin_lock(&blocked_delegations_lock);
825 if (seconds_since_boot() - bd->swap_time > 30) {
826 bd->entries -= bd->old_entries;
827 bd->old_entries = bd->entries;
828 memset(bd->set[bd->new], 0,
829 sizeof(bd->set[0]));
830 bd->new = 1-bd->new;
831 bd->swap_time = seconds_since_boot();
832 }
833 spin_unlock(&blocked_delegations_lock);
834 }
835 hash = jhash(&fh->fh_base, fh->fh_size, 0);
836 if (test_bit(hash&255, bd->set[0]) &&
837 test_bit((hash>>8)&255, bd->set[0]) &&
838 test_bit((hash>>16)&255, bd->set[0]))
839 return 1;
840
841 if (test_bit(hash&255, bd->set[1]) &&
842 test_bit((hash>>8)&255, bd->set[1]) &&
843 test_bit((hash>>16)&255, bd->set[1]))
844 return 1;
845
846 return 0;
847 }
848
849 static void block_delegations(struct knfsd_fh *fh)
850 {
851 u32 hash;
852 struct bloom_pair *bd = &blocked_delegations;
853
854 hash = jhash(&fh->fh_base, fh->fh_size, 0);
855
856 spin_lock(&blocked_delegations_lock);
857 __set_bit(hash&255, bd->set[bd->new]);
858 __set_bit((hash>>8)&255, bd->set[bd->new]);
859 __set_bit((hash>>16)&255, bd->set[bd->new]);
860 if (bd->entries == 0)
861 bd->swap_time = seconds_since_boot();
862 bd->entries += 1;
863 spin_unlock(&blocked_delegations_lock);
864 }
865
866 static struct nfs4_delegation *
867 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
868 struct svc_fh *current_fh,
869 struct nfs4_clnt_odstate *odstate)
870 {
871 struct nfs4_delegation *dp;
872 long n;
873
874 dprintk("NFSD alloc_init_deleg\n");
875 n = atomic_long_inc_return(&num_delegations);
876 if (n < 0 || n > max_delegations)
877 goto out_dec;
878 if (delegation_blocked(¤t_fh->fh_handle))
879 goto out_dec;
880 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
881 if (dp == NULL)
882 goto out_dec;
883
884
885
886
887
888
889 dp->dl_stid.sc_stateid.si_generation = 1;
890 INIT_LIST_HEAD(&dp->dl_perfile);
891 INIT_LIST_HEAD(&dp->dl_perclnt);
892 INIT_LIST_HEAD(&dp->dl_recall_lru);
893 dp->dl_clnt_odstate = odstate;
894 get_clnt_odstate(odstate);
895 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
896 dp->dl_retries = 1;
897 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
898 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
899 get_nfs4_file(fp);
900 dp->dl_stid.sc_file = fp;
901 return dp;
902 out_dec:
903 atomic_long_dec(&num_delegations);
904 return NULL;
905 }
906
907 void
908 nfs4_put_stid(struct nfs4_stid *s)
909 {
910 struct nfs4_file *fp = s->sc_file;
911 struct nfs4_client *clp = s->sc_client;
912
913 might_lock(&clp->cl_lock);
914
915 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
916 wake_up_all(&close_wq);
917 return;
918 }
919 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
920 spin_unlock(&clp->cl_lock);
921 s->sc_free(s);
922 if (fp)
923 put_nfs4_file(fp);
924 }
925
926 void
927 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
928 {
929 stateid_t *src = &stid->sc_stateid;
930
931 spin_lock(&stid->sc_lock);
932 if (unlikely(++src->si_generation == 0))
933 src->si_generation = 1;
934 memcpy(dst, src, sizeof(*dst));
935 spin_unlock(&stid->sc_lock);
936 }
937
938 static void put_deleg_file(struct nfs4_file *fp)
939 {
940 struct nfsd_file *nf = NULL;
941
942 spin_lock(&fp->fi_lock);
943 if (--fp->fi_delegees == 0)
944 swap(nf, fp->fi_deleg_file);
945 spin_unlock(&fp->fi_lock);
946
947 if (nf)
948 nfsd_file_put(nf);
949 }
950
951 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
952 {
953 struct nfs4_file *fp = dp->dl_stid.sc_file;
954 struct nfsd_file *nf = fp->fi_deleg_file;
955
956 WARN_ON_ONCE(!fp->fi_delegees);
957
958 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
959 put_deleg_file(fp);
960 }
961
962 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
963 {
964 put_clnt_odstate(dp->dl_clnt_odstate);
965 nfs4_unlock_deleg_lease(dp);
966 nfs4_put_stid(&dp->dl_stid);
967 }
968
969 void nfs4_unhash_stid(struct nfs4_stid *s)
970 {
971 s->sc_type = 0;
972 }
973
974
975
976
977
978
979
980
981
982
983 static bool
984 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
985 {
986 struct nfs4_delegation *searchdp = NULL;
987 struct nfs4_client *searchclp = NULL;
988
989 lockdep_assert_held(&state_lock);
990 lockdep_assert_held(&fp->fi_lock);
991
992 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
993 searchclp = searchdp->dl_stid.sc_client;
994 if (clp == searchclp) {
995 return true;
996 }
997 }
998 return false;
999 }
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014 static int
1015 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1016 {
1017 struct nfs4_client *clp = dp->dl_stid.sc_client;
1018
1019 lockdep_assert_held(&state_lock);
1020 lockdep_assert_held(&fp->fi_lock);
1021
1022 if (nfs4_delegation_exists(clp, fp))
1023 return -EAGAIN;
1024 refcount_inc(&dp->dl_stid.sc_count);
1025 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1026 list_add(&dp->dl_perfile, &fp->fi_delegations);
1027 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1028 return 0;
1029 }
1030
1031 static bool
1032 unhash_delegation_locked(struct nfs4_delegation *dp)
1033 {
1034 struct nfs4_file *fp = dp->dl_stid.sc_file;
1035
1036 lockdep_assert_held(&state_lock);
1037
1038 if (list_empty(&dp->dl_perfile))
1039 return false;
1040
1041 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1042
1043 ++dp->dl_time;
1044 spin_lock(&fp->fi_lock);
1045 list_del_init(&dp->dl_perclnt);
1046 list_del_init(&dp->dl_recall_lru);
1047 list_del_init(&dp->dl_perfile);
1048 spin_unlock(&fp->fi_lock);
1049 return true;
1050 }
1051
1052 static void destroy_delegation(struct nfs4_delegation *dp)
1053 {
1054 bool unhashed;
1055
1056 spin_lock(&state_lock);
1057 unhashed = unhash_delegation_locked(dp);
1058 spin_unlock(&state_lock);
1059 if (unhashed)
1060 destroy_unhashed_deleg(dp);
1061 }
1062
1063 static void revoke_delegation(struct nfs4_delegation *dp)
1064 {
1065 struct nfs4_client *clp = dp->dl_stid.sc_client;
1066
1067 WARN_ON(!list_empty(&dp->dl_recall_lru));
1068
1069 if (clp->cl_minorversion) {
1070 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1071 refcount_inc(&dp->dl_stid.sc_count);
1072 spin_lock(&clp->cl_lock);
1073 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1074 spin_unlock(&clp->cl_lock);
1075 }
1076 destroy_unhashed_deleg(dp);
1077 }
1078
1079
1080
1081
1082
1083 static unsigned int clientid_hashval(u32 id)
1084 {
1085 return id & CLIENT_HASH_MASK;
1086 }
1087
1088 static unsigned int clientstr_hashval(struct xdr_netobj name)
1089 {
1090 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 static unsigned int
1112 bmap_to_share_mode(unsigned long bmap) {
1113 int i;
1114 unsigned int access = 0;
1115
1116 for (i = 1; i < 4; i++) {
1117 if (test_bit(i, &bmap))
1118 access |= i;
1119 }
1120 return access;
1121 }
1122
1123
1124 static inline void
1125 set_access(u32 access, struct nfs4_ol_stateid *stp)
1126 {
1127 unsigned char mask = 1 << access;
1128
1129 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1130 stp->st_access_bmap |= mask;
1131 }
1132
1133
1134 static inline void
1135 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1136 {
1137 unsigned char mask = 1 << access;
1138
1139 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1140 stp->st_access_bmap &= ~mask;
1141 }
1142
1143
1144 static inline bool
1145 test_access(u32 access, struct nfs4_ol_stateid *stp)
1146 {
1147 unsigned char mask = 1 << access;
1148
1149 return (bool)(stp->st_access_bmap & mask);
1150 }
1151
1152
1153 static inline void
1154 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1155 {
1156 unsigned char mask = 1 << deny;
1157
1158 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1159 stp->st_deny_bmap |= mask;
1160 }
1161
1162
1163 static inline void
1164 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1165 {
1166 unsigned char mask = 1 << deny;
1167
1168 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1169 stp->st_deny_bmap &= ~mask;
1170 }
1171
1172
1173 static inline bool
1174 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1175 {
1176 unsigned char mask = 1 << deny;
1177
1178 return (bool)(stp->st_deny_bmap & mask);
1179 }
1180
1181 static int nfs4_access_to_omode(u32 access)
1182 {
1183 switch (access & NFS4_SHARE_ACCESS_BOTH) {
1184 case NFS4_SHARE_ACCESS_READ:
1185 return O_RDONLY;
1186 case NFS4_SHARE_ACCESS_WRITE:
1187 return O_WRONLY;
1188 case NFS4_SHARE_ACCESS_BOTH:
1189 return O_RDWR;
1190 }
1191 WARN_ON_ONCE(1);
1192 return O_RDONLY;
1193 }
1194
1195
1196
1197
1198
1199 static void
1200 recalculate_deny_mode(struct nfs4_file *fp)
1201 {
1202 struct nfs4_ol_stateid *stp;
1203
1204 spin_lock(&fp->fi_lock);
1205 fp->fi_share_deny = 0;
1206 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1207 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1208 spin_unlock(&fp->fi_lock);
1209 }
1210
1211 static void
1212 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1213 {
1214 int i;
1215 bool change = false;
1216
1217 for (i = 1; i < 4; i++) {
1218 if ((i & deny) != i) {
1219 change = true;
1220 clear_deny(i, stp);
1221 }
1222 }
1223
1224
1225 if (change)
1226 recalculate_deny_mode(stp->st_stid.sc_file);
1227 }
1228
1229
1230 static void
1231 release_all_access(struct nfs4_ol_stateid *stp)
1232 {
1233 int i;
1234 struct nfs4_file *fp = stp->st_stid.sc_file;
1235
1236 if (fp && stp->st_deny_bmap != 0)
1237 recalculate_deny_mode(fp);
1238
1239 for (i = 1; i < 4; i++) {
1240 if (test_access(i, stp))
1241 nfs4_file_put_access(stp->st_stid.sc_file, i);
1242 clear_access(i, stp);
1243 }
1244 }
1245
1246 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1247 {
1248 kfree(sop->so_owner.data);
1249 sop->so_ops->so_free(sop);
1250 }
1251
1252 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1253 {
1254 struct nfs4_client *clp = sop->so_client;
1255
1256 might_lock(&clp->cl_lock);
1257
1258 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1259 return;
1260 sop->so_ops->so_unhash(sop);
1261 spin_unlock(&clp->cl_lock);
1262 nfs4_free_stateowner(sop);
1263 }
1264
1265 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1266 {
1267 struct nfs4_file *fp = stp->st_stid.sc_file;
1268
1269 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1270
1271 if (list_empty(&stp->st_perfile))
1272 return false;
1273
1274 spin_lock(&fp->fi_lock);
1275 list_del_init(&stp->st_perfile);
1276 spin_unlock(&fp->fi_lock);
1277 list_del(&stp->st_perstateowner);
1278 return true;
1279 }
1280
1281 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1282 {
1283 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1284
1285 put_clnt_odstate(stp->st_clnt_odstate);
1286 release_all_access(stp);
1287 if (stp->st_stateowner)
1288 nfs4_put_stateowner(stp->st_stateowner);
1289 kmem_cache_free(stateid_slab, stid);
1290 }
1291
1292 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1293 {
1294 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1295 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1296 struct nfsd_file *nf;
1297
1298 nf = find_any_file(stp->st_stid.sc_file);
1299 if (nf) {
1300 get_file(nf->nf_file);
1301 filp_close(nf->nf_file, (fl_owner_t)lo);
1302 nfsd_file_put(nf);
1303 }
1304 nfs4_free_ol_stateid(stid);
1305 }
1306
1307
1308
1309
1310
1311
1312 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1313 struct list_head *reaplist)
1314 {
1315 struct nfs4_stid *s = &stp->st_stid;
1316 struct nfs4_client *clp = s->sc_client;
1317
1318 lockdep_assert_held(&clp->cl_lock);
1319
1320 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1321
1322 if (!refcount_dec_and_test(&s->sc_count)) {
1323 wake_up_all(&close_wq);
1324 return;
1325 }
1326
1327 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1328 list_add(&stp->st_locks, reaplist);
1329 }
1330
1331 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1332 {
1333 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1334
1335 list_del_init(&stp->st_locks);
1336 nfs4_unhash_stid(&stp->st_stid);
1337 return unhash_ol_stateid(stp);
1338 }
1339
1340 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1341 {
1342 struct nfs4_client *clp = stp->st_stid.sc_client;
1343 bool unhashed;
1344
1345 spin_lock(&clp->cl_lock);
1346 unhashed = unhash_lock_stateid(stp);
1347 spin_unlock(&clp->cl_lock);
1348 if (unhashed)
1349 nfs4_put_stid(&stp->st_stid);
1350 }
1351
1352 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1353 {
1354 struct nfs4_client *clp = lo->lo_owner.so_client;
1355
1356 lockdep_assert_held(&clp->cl_lock);
1357
1358 list_del_init(&lo->lo_owner.so_strhash);
1359 }
1360
1361
1362
1363
1364
1365 static void
1366 free_ol_stateid_reaplist(struct list_head *reaplist)
1367 {
1368 struct nfs4_ol_stateid *stp;
1369 struct nfs4_file *fp;
1370
1371 might_sleep();
1372
1373 while (!list_empty(reaplist)) {
1374 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1375 st_locks);
1376 list_del(&stp->st_locks);
1377 fp = stp->st_stid.sc_file;
1378 stp->st_stid.sc_free(&stp->st_stid);
1379 if (fp)
1380 put_nfs4_file(fp);
1381 }
1382 }
1383
1384 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1385 struct list_head *reaplist)
1386 {
1387 struct nfs4_ol_stateid *stp;
1388
1389 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1390
1391 while (!list_empty(&open_stp->st_locks)) {
1392 stp = list_entry(open_stp->st_locks.next,
1393 struct nfs4_ol_stateid, st_locks);
1394 WARN_ON(!unhash_lock_stateid(stp));
1395 put_ol_stateid_locked(stp, reaplist);
1396 }
1397 }
1398
1399 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1400 struct list_head *reaplist)
1401 {
1402 bool unhashed;
1403
1404 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1405
1406 unhashed = unhash_ol_stateid(stp);
1407 release_open_stateid_locks(stp, reaplist);
1408 return unhashed;
1409 }
1410
1411 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1412 {
1413 LIST_HEAD(reaplist);
1414
1415 spin_lock(&stp->st_stid.sc_client->cl_lock);
1416 if (unhash_open_stateid(stp, &reaplist))
1417 put_ol_stateid_locked(stp, &reaplist);
1418 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1419 free_ol_stateid_reaplist(&reaplist);
1420 }
1421
1422 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1423 {
1424 struct nfs4_client *clp = oo->oo_owner.so_client;
1425
1426 lockdep_assert_held(&clp->cl_lock);
1427
1428 list_del_init(&oo->oo_owner.so_strhash);
1429 list_del_init(&oo->oo_perclient);
1430 }
1431
1432 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1433 {
1434 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1435 nfsd_net_id);
1436 struct nfs4_ol_stateid *s;
1437
1438 spin_lock(&nn->client_lock);
1439 s = oo->oo_last_closed_stid;
1440 if (s) {
1441 list_del_init(&oo->oo_close_lru);
1442 oo->oo_last_closed_stid = NULL;
1443 }
1444 spin_unlock(&nn->client_lock);
1445 if (s)
1446 nfs4_put_stid(&s->st_stid);
1447 }
1448
1449 static void release_openowner(struct nfs4_openowner *oo)
1450 {
1451 struct nfs4_ol_stateid *stp;
1452 struct nfs4_client *clp = oo->oo_owner.so_client;
1453 struct list_head reaplist;
1454
1455 INIT_LIST_HEAD(&reaplist);
1456
1457 spin_lock(&clp->cl_lock);
1458 unhash_openowner_locked(oo);
1459 while (!list_empty(&oo->oo_owner.so_stateids)) {
1460 stp = list_first_entry(&oo->oo_owner.so_stateids,
1461 struct nfs4_ol_stateid, st_perstateowner);
1462 if (unhash_open_stateid(stp, &reaplist))
1463 put_ol_stateid_locked(stp, &reaplist);
1464 }
1465 spin_unlock(&clp->cl_lock);
1466 free_ol_stateid_reaplist(&reaplist);
1467 release_last_closed_stateid(oo);
1468 nfs4_put_stateowner(&oo->oo_owner);
1469 }
1470
1471 static inline int
1472 hash_sessionid(struct nfs4_sessionid *sessionid)
1473 {
1474 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1475
1476 return sid->sequence % SESSION_HASH_SIZE;
1477 }
1478
1479 #ifdef CONFIG_SUNRPC_DEBUG
1480 static inline void
1481 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1482 {
1483 u32 *ptr = (u32 *)(&sessionid->data[0]);
1484 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1485 }
1486 #else
1487 static inline void
1488 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1489 {
1490 }
1491 #endif
1492
1493
1494
1495
1496
1497 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1498 {
1499 struct nfs4_stateowner *so = cstate->replay_owner;
1500
1501 if (nfserr == nfserr_replay_me)
1502 return;
1503
1504 if (!seqid_mutating_err(ntohl(nfserr))) {
1505 nfsd4_cstate_clear_replay(cstate);
1506 return;
1507 }
1508 if (!so)
1509 return;
1510 if (so->so_is_open_owner)
1511 release_last_closed_stateid(openowner(so));
1512 so->so_seqid++;
1513 return;
1514 }
1515
1516 static void
1517 gen_sessionid(struct nfsd4_session *ses)
1518 {
1519 struct nfs4_client *clp = ses->se_client;
1520 struct nfsd4_sessionid *sid;
1521
1522 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1523 sid->clientid = clp->cl_clientid;
1524 sid->sequence = current_sessionid++;
1525 sid->reserved = 0;
1526 }
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1541
1542 static void
1543 free_session_slots(struct nfsd4_session *ses)
1544 {
1545 int i;
1546
1547 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1548 free_svc_cred(&ses->se_slots[i]->sl_cred);
1549 kfree(ses->se_slots[i]);
1550 }
1551 }
1552
1553
1554
1555
1556
1557 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1558 {
1559 u32 size;
1560
1561 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1562 size = 0;
1563 else
1564 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1565 return size + sizeof(struct nfsd4_slot);
1566 }
1567
1568
1569
1570
1571
1572
1573 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1574 {
1575 u32 slotsize = slot_bytes(ca);
1576 u32 num = ca->maxreqs;
1577 unsigned long avail, total_avail;
1578 unsigned int scale_factor;
1579
1580 spin_lock(&nfsd_drc_lock);
1581 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1582 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1583 else
1584
1585
1586
1587
1588
1589 total_avail = 0;
1590 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1601
1602 avail = clamp_t(unsigned long, avail, slotsize,
1603 total_avail/scale_factor);
1604 num = min_t(int, num, avail / slotsize);
1605 num = max_t(int, num, 1);
1606 nfsd_drc_mem_used += num * slotsize;
1607 spin_unlock(&nfsd_drc_lock);
1608
1609 return num;
1610 }
1611
1612 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1613 {
1614 int slotsize = slot_bytes(ca);
1615
1616 spin_lock(&nfsd_drc_lock);
1617 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1618 spin_unlock(&nfsd_drc_lock);
1619 }
1620
1621 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1622 struct nfsd4_channel_attrs *battrs)
1623 {
1624 int numslots = fattrs->maxreqs;
1625 int slotsize = slot_bytes(fattrs);
1626 struct nfsd4_session *new;
1627 int mem, i;
1628
1629 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1630 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1631 mem = numslots * sizeof(struct nfsd4_slot *);
1632
1633 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1634 if (!new)
1635 return NULL;
1636
1637 for (i = 0; i < numslots; i++) {
1638 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1639 if (!new->se_slots[i])
1640 goto out_free;
1641 }
1642
1643 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1644 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1645
1646 return new;
1647 out_free:
1648 while (i--)
1649 kfree(new->se_slots[i]);
1650 kfree(new);
1651 return NULL;
1652 }
1653
1654 static void free_conn(struct nfsd4_conn *c)
1655 {
1656 svc_xprt_put(c->cn_xprt);
1657 kfree(c);
1658 }
1659
1660 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1661 {
1662 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1663 struct nfs4_client *clp = c->cn_session->se_client;
1664
1665 spin_lock(&clp->cl_lock);
1666 if (!list_empty(&c->cn_persession)) {
1667 list_del(&c->cn_persession);
1668 free_conn(c);
1669 }
1670 nfsd4_probe_callback(clp);
1671 spin_unlock(&clp->cl_lock);
1672 }
1673
1674 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1675 {
1676 struct nfsd4_conn *conn;
1677
1678 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1679 if (!conn)
1680 return NULL;
1681 svc_xprt_get(rqstp->rq_xprt);
1682 conn->cn_xprt = rqstp->rq_xprt;
1683 conn->cn_flags = flags;
1684 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1685 return conn;
1686 }
1687
1688 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1689 {
1690 conn->cn_session = ses;
1691 list_add(&conn->cn_persession, &ses->se_conns);
1692 }
1693
1694 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1695 {
1696 struct nfs4_client *clp = ses->se_client;
1697
1698 spin_lock(&clp->cl_lock);
1699 __nfsd4_hash_conn(conn, ses);
1700 spin_unlock(&clp->cl_lock);
1701 }
1702
1703 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1704 {
1705 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1706 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1707 }
1708
1709 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1710 {
1711 int ret;
1712
1713 nfsd4_hash_conn(conn, ses);
1714 ret = nfsd4_register_conn(conn);
1715 if (ret)
1716
1717 nfsd4_conn_lost(&conn->cn_xpt_user);
1718
1719 nfsd4_probe_callback_sync(ses->se_client);
1720 }
1721
1722 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1723 {
1724 u32 dir = NFS4_CDFC4_FORE;
1725
1726 if (cses->flags & SESSION4_BACK_CHAN)
1727 dir |= NFS4_CDFC4_BACK;
1728 return alloc_conn(rqstp, dir);
1729 }
1730
1731
1732 static void nfsd4_del_conns(struct nfsd4_session *s)
1733 {
1734 struct nfs4_client *clp = s->se_client;
1735 struct nfsd4_conn *c;
1736
1737 spin_lock(&clp->cl_lock);
1738 while (!list_empty(&s->se_conns)) {
1739 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1740 list_del_init(&c->cn_persession);
1741 spin_unlock(&clp->cl_lock);
1742
1743 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1744 free_conn(c);
1745
1746 spin_lock(&clp->cl_lock);
1747 }
1748 spin_unlock(&clp->cl_lock);
1749 }
1750
1751 static void __free_session(struct nfsd4_session *ses)
1752 {
1753 free_session_slots(ses);
1754 kfree(ses);
1755 }
1756
1757 static void free_session(struct nfsd4_session *ses)
1758 {
1759 nfsd4_del_conns(ses);
1760 nfsd4_put_drc_mem(&ses->se_fchannel);
1761 __free_session(ses);
1762 }
1763
1764 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1765 {
1766 int idx;
1767 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1768
1769 new->se_client = clp;
1770 gen_sessionid(new);
1771
1772 INIT_LIST_HEAD(&new->se_conns);
1773
1774 new->se_cb_seq_nr = 1;
1775 new->se_flags = cses->flags;
1776 new->se_cb_prog = cses->callback_prog;
1777 new->se_cb_sec = cses->cb_sec;
1778 atomic_set(&new->se_ref, 0);
1779 idx = hash_sessionid(&new->se_sessionid);
1780 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1781 spin_lock(&clp->cl_lock);
1782 list_add(&new->se_perclnt, &clp->cl_sessions);
1783 spin_unlock(&clp->cl_lock);
1784
1785 {
1786 struct sockaddr *sa = svc_addr(rqstp);
1787
1788
1789
1790
1791
1792
1793
1794 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1795 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1796 }
1797 }
1798
1799
1800 static struct nfsd4_session *
1801 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1802 {
1803 struct nfsd4_session *elem;
1804 int idx;
1805 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1806
1807 lockdep_assert_held(&nn->client_lock);
1808
1809 dump_sessionid(__func__, sessionid);
1810 idx = hash_sessionid(sessionid);
1811
1812 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1813 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1814 NFS4_MAX_SESSIONID_LEN)) {
1815 return elem;
1816 }
1817 }
1818
1819 dprintk("%s: session not found\n", __func__);
1820 return NULL;
1821 }
1822
1823 static struct nfsd4_session *
1824 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1825 __be32 *ret)
1826 {
1827 struct nfsd4_session *session;
1828 __be32 status = nfserr_badsession;
1829
1830 session = __find_in_sessionid_hashtbl(sessionid, net);
1831 if (!session)
1832 goto out;
1833 status = nfsd4_get_session_locked(session);
1834 if (status)
1835 session = NULL;
1836 out:
1837 *ret = status;
1838 return session;
1839 }
1840
1841
1842 static void
1843 unhash_session(struct nfsd4_session *ses)
1844 {
1845 struct nfs4_client *clp = ses->se_client;
1846 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1847
1848 lockdep_assert_held(&nn->client_lock);
1849
1850 list_del(&ses->se_hash);
1851 spin_lock(&ses->se_client->cl_lock);
1852 list_del(&ses->se_perclnt);
1853 spin_unlock(&ses->se_client->cl_lock);
1854 }
1855
1856
1857 static int
1858 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1859 {
1860
1861
1862
1863
1864
1865 if (clid->cl_boot == (u32)nn->boot_time)
1866 return 0;
1867 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1868 clid->cl_boot, clid->cl_id, nn->boot_time);
1869 return 1;
1870 }
1871
1872
1873
1874
1875
1876
1877 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1878 {
1879 struct nfs4_client *clp;
1880 int i;
1881
1882 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1883 if (clp == NULL)
1884 return NULL;
1885 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
1886 if (clp->cl_name.data == NULL)
1887 goto err_no_name;
1888 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1889 sizeof(struct list_head),
1890 GFP_KERNEL);
1891 if (!clp->cl_ownerstr_hashtbl)
1892 goto err_no_hashtbl;
1893 for (i = 0; i < OWNER_HASH_SIZE; i++)
1894 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1895 INIT_LIST_HEAD(&clp->cl_sessions);
1896 idr_init(&clp->cl_stateids);
1897 atomic_set(&clp->cl_rpc_users, 0);
1898 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1899 INIT_LIST_HEAD(&clp->cl_idhash);
1900 INIT_LIST_HEAD(&clp->cl_openowners);
1901 INIT_LIST_HEAD(&clp->cl_delegations);
1902 INIT_LIST_HEAD(&clp->cl_lru);
1903 INIT_LIST_HEAD(&clp->cl_revoked);
1904 #ifdef CONFIG_NFSD_PNFS
1905 INIT_LIST_HEAD(&clp->cl_lo_states);
1906 #endif
1907 INIT_LIST_HEAD(&clp->async_copies);
1908 spin_lock_init(&clp->async_lock);
1909 spin_lock_init(&clp->cl_lock);
1910 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1911 return clp;
1912 err_no_hashtbl:
1913 kfree(clp->cl_name.data);
1914 err_no_name:
1915 kmem_cache_free(client_slab, clp);
1916 return NULL;
1917 }
1918
1919 static void __free_client(struct kref *k)
1920 {
1921 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
1922 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
1923
1924 free_svc_cred(&clp->cl_cred);
1925 kfree(clp->cl_ownerstr_hashtbl);
1926 kfree(clp->cl_name.data);
1927 kfree(clp->cl_nii_domain.data);
1928 kfree(clp->cl_nii_name.data);
1929 idr_destroy(&clp->cl_stateids);
1930 kmem_cache_free(client_slab, clp);
1931 }
1932
1933 static void drop_client(struct nfs4_client *clp)
1934 {
1935 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
1936 }
1937
1938 static void
1939 free_client(struct nfs4_client *clp)
1940 {
1941 while (!list_empty(&clp->cl_sessions)) {
1942 struct nfsd4_session *ses;
1943 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1944 se_perclnt);
1945 list_del(&ses->se_perclnt);
1946 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1947 free_session(ses);
1948 }
1949 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1950 if (clp->cl_nfsd_dentry) {
1951 nfsd_client_rmdir(clp->cl_nfsd_dentry);
1952 clp->cl_nfsd_dentry = NULL;
1953 wake_up_all(&expiry_wq);
1954 }
1955 drop_client(clp);
1956 }
1957
1958
1959 static void
1960 unhash_client_locked(struct nfs4_client *clp)
1961 {
1962 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1963 struct nfsd4_session *ses;
1964
1965 lockdep_assert_held(&nn->client_lock);
1966
1967
1968 clp->cl_time = 0;
1969
1970 if (!list_empty(&clp->cl_idhash)) {
1971 list_del_init(&clp->cl_idhash);
1972 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1973 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1974 else
1975 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1976 }
1977 list_del_init(&clp->cl_lru);
1978 spin_lock(&clp->cl_lock);
1979 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1980 list_del_init(&ses->se_hash);
1981 spin_unlock(&clp->cl_lock);
1982 }
1983
1984 static void
1985 unhash_client(struct nfs4_client *clp)
1986 {
1987 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1988
1989 spin_lock(&nn->client_lock);
1990 unhash_client_locked(clp);
1991 spin_unlock(&nn->client_lock);
1992 }
1993
1994 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1995 {
1996 if (atomic_read(&clp->cl_rpc_users))
1997 return nfserr_jukebox;
1998 unhash_client_locked(clp);
1999 return nfs_ok;
2000 }
2001
2002 static void
2003 __destroy_client(struct nfs4_client *clp)
2004 {
2005 int i;
2006 struct nfs4_openowner *oo;
2007 struct nfs4_delegation *dp;
2008 struct list_head reaplist;
2009
2010 INIT_LIST_HEAD(&reaplist);
2011 spin_lock(&state_lock);
2012 while (!list_empty(&clp->cl_delegations)) {
2013 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2014 WARN_ON(!unhash_delegation_locked(dp));
2015 list_add(&dp->dl_recall_lru, &reaplist);
2016 }
2017 spin_unlock(&state_lock);
2018 while (!list_empty(&reaplist)) {
2019 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2020 list_del_init(&dp->dl_recall_lru);
2021 destroy_unhashed_deleg(dp);
2022 }
2023 while (!list_empty(&clp->cl_revoked)) {
2024 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2025 list_del_init(&dp->dl_recall_lru);
2026 nfs4_put_stid(&dp->dl_stid);
2027 }
2028 while (!list_empty(&clp->cl_openowners)) {
2029 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2030 nfs4_get_stateowner(&oo->oo_owner);
2031 release_openowner(oo);
2032 }
2033 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2034 struct nfs4_stateowner *so, *tmp;
2035
2036 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2037 so_strhash) {
2038
2039 WARN_ON_ONCE(so->so_is_open_owner);
2040 remove_blocked_locks(lockowner(so));
2041 }
2042 }
2043 nfsd4_return_all_client_layouts(clp);
2044 nfsd4_shutdown_copy(clp);
2045 nfsd4_shutdown_callback(clp);
2046 if (clp->cl_cb_conn.cb_xprt)
2047 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2048 free_client(clp);
2049 wake_up_all(&expiry_wq);
2050 }
2051
2052 static void
2053 destroy_client(struct nfs4_client *clp)
2054 {
2055 unhash_client(clp);
2056 __destroy_client(clp);
2057 }
2058
2059 static void inc_reclaim_complete(struct nfs4_client *clp)
2060 {
2061 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2062
2063 if (!nn->track_reclaim_completes)
2064 return;
2065 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2066 return;
2067 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2068 nn->reclaim_str_hashtbl_size) {
2069 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2070 clp->net->ns.inum);
2071 nfsd4_end_grace(nn);
2072 }
2073 }
2074
2075 static void expire_client(struct nfs4_client *clp)
2076 {
2077 unhash_client(clp);
2078 nfsd4_client_record_remove(clp);
2079 __destroy_client(clp);
2080 }
2081
2082 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2083 {
2084 memcpy(target->cl_verifier.data, source->data,
2085 sizeof(target->cl_verifier.data));
2086 }
2087
2088 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2089 {
2090 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2091 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2092 }
2093
2094 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2095 {
2096 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2097 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2098 GFP_KERNEL);
2099 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2100 if ((source->cr_principal && !target->cr_principal) ||
2101 (source->cr_raw_principal && !target->cr_raw_principal) ||
2102 (source->cr_targ_princ && !target->cr_targ_princ))
2103 return -ENOMEM;
2104
2105 target->cr_flavor = source->cr_flavor;
2106 target->cr_uid = source->cr_uid;
2107 target->cr_gid = source->cr_gid;
2108 target->cr_group_info = source->cr_group_info;
2109 get_group_info(target->cr_group_info);
2110 target->cr_gss_mech = source->cr_gss_mech;
2111 if (source->cr_gss_mech)
2112 gss_mech_get(source->cr_gss_mech);
2113 return 0;
2114 }
2115
2116 static int
2117 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2118 {
2119 if (o1->len < o2->len)
2120 return -1;
2121 if (o1->len > o2->len)
2122 return 1;
2123 return memcmp(o1->data, o2->data, o1->len);
2124 }
2125
2126 static int
2127 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2128 {
2129 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2130 }
2131
2132 static int
2133 same_clid(clientid_t *cl1, clientid_t *cl2)
2134 {
2135 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2136 }
2137
2138 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2139 {
2140 int i;
2141
2142 if (g1->ngroups != g2->ngroups)
2143 return false;
2144 for (i=0; i<g1->ngroups; i++)
2145 if (!gid_eq(g1->gid[i], g2->gid[i]))
2146 return false;
2147 return true;
2148 }
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159 static bool is_gss_cred(struct svc_cred *cr)
2160 {
2161
2162 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2163 }
2164
2165
2166 static bool
2167 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2168 {
2169 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2170 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2171 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2172 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2173 return false;
2174
2175 if (cr1->cr_principal == cr2->cr_principal)
2176 return true;
2177 if (!cr1->cr_principal || !cr2->cr_principal)
2178 return false;
2179 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2180 }
2181
2182 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2183 {
2184 struct svc_cred *cr = &rqstp->rq_cred;
2185 u32 service;
2186
2187 if (!cr->cr_gss_mech)
2188 return false;
2189 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2190 return service == RPC_GSS_SVC_INTEGRITY ||
2191 service == RPC_GSS_SVC_PRIVACY;
2192 }
2193
2194 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2195 {
2196 struct svc_cred *cr = &rqstp->rq_cred;
2197
2198 if (!cl->cl_mach_cred)
2199 return true;
2200 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2201 return false;
2202 if (!svc_rqst_integrity_protected(rqstp))
2203 return false;
2204 if (cl->cl_cred.cr_raw_principal)
2205 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2206 cr->cr_raw_principal);
2207 if (!cr->cr_principal)
2208 return false;
2209 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2210 }
2211
2212 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2213 {
2214 __be32 verf[2];
2215
2216
2217
2218
2219
2220 verf[0] = (__force __be32)get_seconds();
2221 verf[1] = (__force __be32)nn->clverifier_counter++;
2222 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2223 }
2224
2225 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2226 {
2227 clp->cl_clientid.cl_boot = nn->boot_time;
2228 clp->cl_clientid.cl_id = nn->clientid_counter++;
2229 gen_confirm(clp, nn);
2230 }
2231
2232 static struct nfs4_stid *
2233 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2234 {
2235 struct nfs4_stid *ret;
2236
2237 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2238 if (!ret || !ret->sc_type)
2239 return NULL;
2240 return ret;
2241 }
2242
2243 static struct nfs4_stid *
2244 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2245 {
2246 struct nfs4_stid *s;
2247
2248 spin_lock(&cl->cl_lock);
2249 s = find_stateid_locked(cl, t);
2250 if (s != NULL) {
2251 if (typemask & s->sc_type)
2252 refcount_inc(&s->sc_count);
2253 else
2254 s = NULL;
2255 }
2256 spin_unlock(&cl->cl_lock);
2257 return s;
2258 }
2259
2260 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2261 {
2262 struct nfsdfs_client *nc;
2263 nc = get_nfsdfs_client(inode);
2264 if (!nc)
2265 return NULL;
2266 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2267 }
2268
2269 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2270 {
2271 seq_printf(m, "\"");
2272 seq_escape_mem_ascii(m, data, len);
2273 seq_printf(m, "\"");
2274 }
2275
2276 static int client_info_show(struct seq_file *m, void *v)
2277 {
2278 struct inode *inode = m->private;
2279 struct nfs4_client *clp;
2280 u64 clid;
2281
2282 clp = get_nfsdfs_clp(inode);
2283 if (!clp)
2284 return -ENXIO;
2285 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2286 seq_printf(m, "clientid: 0x%llx\n", clid);
2287 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2288 seq_printf(m, "name: ");
2289 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2290 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2291 if (clp->cl_nii_domain.data) {
2292 seq_printf(m, "Implementation domain: ");
2293 seq_quote_mem(m, clp->cl_nii_domain.data,
2294 clp->cl_nii_domain.len);
2295 seq_printf(m, "\nImplementation name: ");
2296 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2297 seq_printf(m, "\nImplementation time: [%ld, %ld]\n",
2298 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2299 }
2300 drop_client(clp);
2301
2302 return 0;
2303 }
2304
2305 static int client_info_open(struct inode *inode, struct file *file)
2306 {
2307 return single_open(file, client_info_show, inode);
2308 }
2309
2310 static const struct file_operations client_info_fops = {
2311 .open = client_info_open,
2312 .read = seq_read,
2313 .llseek = seq_lseek,
2314 .release = single_release,
2315 };
2316
2317 static void *states_start(struct seq_file *s, loff_t *pos)
2318 __acquires(&clp->cl_lock)
2319 {
2320 struct nfs4_client *clp = s->private;
2321 unsigned long id = *pos;
2322 void *ret;
2323
2324 spin_lock(&clp->cl_lock);
2325 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2326 *pos = id;
2327 return ret;
2328 }
2329
2330 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2331 {
2332 struct nfs4_client *clp = s->private;
2333 unsigned long id = *pos;
2334 void *ret;
2335
2336 id = *pos;
2337 id++;
2338 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2339 *pos = id;
2340 return ret;
2341 }
2342
2343 static void states_stop(struct seq_file *s, void *v)
2344 __releases(&clp->cl_lock)
2345 {
2346 struct nfs4_client *clp = s->private;
2347
2348 spin_unlock(&clp->cl_lock);
2349 }
2350
2351 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2352 {
2353 struct inode *inode = f->nf_inode;
2354
2355 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2356 MAJOR(inode->i_sb->s_dev),
2357 MINOR(inode->i_sb->s_dev),
2358 inode->i_ino);
2359 }
2360
2361 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2362 {
2363 seq_printf(s, "owner: ");
2364 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2365 }
2366
2367 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2368 {
2369 struct nfs4_ol_stateid *ols;
2370 struct nfs4_file *nf;
2371 struct nfsd_file *file;
2372 struct nfs4_stateowner *oo;
2373 unsigned int access, deny;
2374
2375 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2376 return 0;
2377 ols = openlockstateid(st);
2378 oo = ols->st_stateowner;
2379 nf = st->sc_file;
2380 file = find_any_file(nf);
2381
2382 seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid);
2383
2384 access = bmap_to_share_mode(ols->st_access_bmap);
2385 deny = bmap_to_share_mode(ols->st_deny_bmap);
2386
2387 seq_printf(s, "access: \%s\%s, ",
2388 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2389 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2390 seq_printf(s, "deny: \%s\%s, ",
2391 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2392 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2393
2394 nfs4_show_superblock(s, file);
2395 seq_printf(s, ", ");
2396 nfs4_show_owner(s, oo);
2397 seq_printf(s, " }\n");
2398 nfsd_file_put(file);
2399
2400 return 0;
2401 }
2402
2403 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2404 {
2405 struct nfs4_ol_stateid *ols;
2406 struct nfs4_file *nf;
2407 struct nfsd_file *file;
2408 struct nfs4_stateowner *oo;
2409
2410 ols = openlockstateid(st);
2411 oo = ols->st_stateowner;
2412 nf = st->sc_file;
2413 file = find_any_file(nf);
2414
2415 seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid);
2416
2417
2418
2419
2420
2421
2422
2423
2424 nfs4_show_superblock(s, file);
2425
2426 seq_printf(s, ", ");
2427 nfs4_show_owner(s, oo);
2428 seq_printf(s, " }\n");
2429 nfsd_file_put(file);
2430
2431 return 0;
2432 }
2433
2434 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2435 {
2436 struct nfs4_delegation *ds;
2437 struct nfs4_file *nf;
2438 struct nfsd_file *file;
2439
2440 ds = delegstateid(st);
2441 nf = st->sc_file;
2442 file = nf->fi_deleg_file;
2443
2444 seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid);
2445
2446
2447 seq_printf(s, "access: %s, ",
2448 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2449
2450
2451
2452 nfs4_show_superblock(s, file);
2453 seq_printf(s, " }\n");
2454
2455 return 0;
2456 }
2457
2458 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2459 {
2460 struct nfs4_layout_stateid *ls;
2461 struct nfsd_file *file;
2462
2463 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2464 file = ls->ls_file;
2465
2466 seq_printf(s, "- 0x%16phN: { type: layout, ", &st->sc_stateid);
2467
2468
2469
2470 nfs4_show_superblock(s, file);
2471 seq_printf(s, " }\n");
2472
2473 return 0;
2474 }
2475
2476 static int states_show(struct seq_file *s, void *v)
2477 {
2478 struct nfs4_stid *st = v;
2479
2480 switch (st->sc_type) {
2481 case NFS4_OPEN_STID:
2482 return nfs4_show_open(s, st);
2483 case NFS4_LOCK_STID:
2484 return nfs4_show_lock(s, st);
2485 case NFS4_DELEG_STID:
2486 return nfs4_show_deleg(s, st);
2487 case NFS4_LAYOUT_STID:
2488 return nfs4_show_layout(s, st);
2489 default:
2490 return 0;
2491 }
2492
2493 }
2494
2495 static struct seq_operations states_seq_ops = {
2496 .start = states_start,
2497 .next = states_next,
2498 .stop = states_stop,
2499 .show = states_show
2500 };
2501
2502 static int client_states_open(struct inode *inode, struct file *file)
2503 {
2504 struct seq_file *s;
2505 struct nfs4_client *clp;
2506 int ret;
2507
2508 clp = get_nfsdfs_clp(inode);
2509 if (!clp)
2510 return -ENXIO;
2511
2512 ret = seq_open(file, &states_seq_ops);
2513 if (ret)
2514 return ret;
2515 s = file->private_data;
2516 s->private = clp;
2517 return 0;
2518 }
2519
2520 static int client_opens_release(struct inode *inode, struct file *file)
2521 {
2522 struct seq_file *m = file->private_data;
2523 struct nfs4_client *clp = m->private;
2524
2525
2526 drop_client(clp);
2527 return 0;
2528 }
2529
2530 static const struct file_operations client_states_fops = {
2531 .open = client_states_open,
2532 .read = seq_read,
2533 .llseek = seq_lseek,
2534 .release = client_opens_release,
2535 };
2536
2537
2538
2539
2540
2541
2542
2543 static void force_expire_client(struct nfs4_client *clp)
2544 {
2545 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2546 bool already_expired;
2547
2548 spin_lock(&clp->cl_lock);
2549 clp->cl_time = 0;
2550 spin_unlock(&clp->cl_lock);
2551
2552 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2553 spin_lock(&nn->client_lock);
2554 already_expired = list_empty(&clp->cl_lru);
2555 if (!already_expired)
2556 unhash_client_locked(clp);
2557 spin_unlock(&nn->client_lock);
2558
2559 if (!already_expired)
2560 expire_client(clp);
2561 else
2562 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2563 }
2564
2565 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2566 size_t size, loff_t *pos)
2567 {
2568 char *data;
2569 struct nfs4_client *clp;
2570
2571 data = simple_transaction_get(file, buf, size);
2572 if (IS_ERR(data))
2573 return PTR_ERR(data);
2574 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2575 return -EINVAL;
2576 clp = get_nfsdfs_clp(file_inode(file));
2577 if (!clp)
2578 return -ENXIO;
2579 force_expire_client(clp);
2580 drop_client(clp);
2581 return 7;
2582 }
2583
2584 static const struct file_operations client_ctl_fops = {
2585 .write = client_ctl_write,
2586 .release = simple_transaction_release,
2587 };
2588
2589 static const struct tree_descr client_files[] = {
2590 [0] = {"info", &client_info_fops, S_IRUSR},
2591 [1] = {"states", &client_states_fops, S_IRUSR},
2592 [2] = {"ctl", &client_ctl_fops, S_IRUSR|S_IWUSR},
2593 [3] = {""},
2594 };
2595
2596 static struct nfs4_client *create_client(struct xdr_netobj name,
2597 struct svc_rqst *rqstp, nfs4_verifier *verf)
2598 {
2599 struct nfs4_client *clp;
2600 struct sockaddr *sa = svc_addr(rqstp);
2601 int ret;
2602 struct net *net = SVC_NET(rqstp);
2603 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2604
2605 clp = alloc_client(name);
2606 if (clp == NULL)
2607 return NULL;
2608
2609 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2610 if (ret) {
2611 free_client(clp);
2612 return NULL;
2613 }
2614 gen_clid(clp, nn);
2615 kref_init(&clp->cl_nfsdfs.cl_ref);
2616 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2617 clp->cl_time = get_seconds();
2618 clear_bit(0, &clp->cl_cb_slot_busy);
2619 copy_verf(clp, verf);
2620 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2621 clp->cl_cb_session = NULL;
2622 clp->net = net;
2623 clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs,
2624 clp->cl_clientid.cl_id - nn->clientid_base,
2625 client_files);
2626 if (!clp->cl_nfsd_dentry) {
2627 free_client(clp);
2628 return NULL;
2629 }
2630 return clp;
2631 }
2632
2633 static void
2634 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2635 {
2636 struct rb_node **new = &(root->rb_node), *parent = NULL;
2637 struct nfs4_client *clp;
2638
2639 while (*new) {
2640 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2641 parent = *new;
2642
2643 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2644 new = &((*new)->rb_left);
2645 else
2646 new = &((*new)->rb_right);
2647 }
2648
2649 rb_link_node(&new_clp->cl_namenode, parent, new);
2650 rb_insert_color(&new_clp->cl_namenode, root);
2651 }
2652
2653 static struct nfs4_client *
2654 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2655 {
2656 int cmp;
2657 struct rb_node *node = root->rb_node;
2658 struct nfs4_client *clp;
2659
2660 while (node) {
2661 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2662 cmp = compare_blob(&clp->cl_name, name);
2663 if (cmp > 0)
2664 node = node->rb_left;
2665 else if (cmp < 0)
2666 node = node->rb_right;
2667 else
2668 return clp;
2669 }
2670 return NULL;
2671 }
2672
2673 static void
2674 add_to_unconfirmed(struct nfs4_client *clp)
2675 {
2676 unsigned int idhashval;
2677 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2678
2679 lockdep_assert_held(&nn->client_lock);
2680
2681 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2682 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2683 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2684 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2685 renew_client_locked(clp);
2686 }
2687
2688 static void
2689 move_to_confirmed(struct nfs4_client *clp)
2690 {
2691 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2692 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2693
2694 lockdep_assert_held(&nn->client_lock);
2695
2696 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2697 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2698 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2699 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2700 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2701 renew_client_locked(clp);
2702 }
2703
2704 static struct nfs4_client *
2705 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2706 {
2707 struct nfs4_client *clp;
2708 unsigned int idhashval = clientid_hashval(clid->cl_id);
2709
2710 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2711 if (same_clid(&clp->cl_clientid, clid)) {
2712 if ((bool)clp->cl_minorversion != sessions)
2713 return NULL;
2714 renew_client_locked(clp);
2715 return clp;
2716 }
2717 }
2718 return NULL;
2719 }
2720
2721 static struct nfs4_client *
2722 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2723 {
2724 struct list_head *tbl = nn->conf_id_hashtbl;
2725
2726 lockdep_assert_held(&nn->client_lock);
2727 return find_client_in_id_table(tbl, clid, sessions);
2728 }
2729
2730 static struct nfs4_client *
2731 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2732 {
2733 struct list_head *tbl = nn->unconf_id_hashtbl;
2734
2735 lockdep_assert_held(&nn->client_lock);
2736 return find_client_in_id_table(tbl, clid, sessions);
2737 }
2738
2739 static bool clp_used_exchangeid(struct nfs4_client *clp)
2740 {
2741 return clp->cl_exchange_flags != 0;
2742 }
2743
2744 static struct nfs4_client *
2745 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2746 {
2747 lockdep_assert_held(&nn->client_lock);
2748 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2749 }
2750
2751 static struct nfs4_client *
2752 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2753 {
2754 lockdep_assert_held(&nn->client_lock);
2755 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2756 }
2757
2758 static void
2759 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2760 {
2761 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2762 struct sockaddr *sa = svc_addr(rqstp);
2763 u32 scopeid = rpc_get_scope_id(sa);
2764 unsigned short expected_family;
2765
2766
2767 if (se->se_callback_netid_len == 3 &&
2768 !memcmp(se->se_callback_netid_val, "tcp", 3))
2769 expected_family = AF_INET;
2770 else if (se->se_callback_netid_len == 4 &&
2771 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2772 expected_family = AF_INET6;
2773 else
2774 goto out_err;
2775
2776 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2777 se->se_callback_addr_len,
2778 (struct sockaddr *)&conn->cb_addr,
2779 sizeof(conn->cb_addr));
2780
2781 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2782 goto out_err;
2783
2784 if (conn->cb_addr.ss_family == AF_INET6)
2785 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2786
2787 conn->cb_prog = se->se_callback_prog;
2788 conn->cb_ident = se->se_callback_ident;
2789 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2790 return;
2791 out_err:
2792 conn->cb_addr.ss_family = AF_UNSPEC;
2793 conn->cb_addrlen = 0;
2794 dprintk("NFSD: this client (clientid %08x/%08x) "
2795 "will not receive delegations\n",
2796 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2797
2798 return;
2799 }
2800
2801
2802
2803
2804 static void
2805 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2806 {
2807 struct xdr_buf *buf = resp->xdr.buf;
2808 struct nfsd4_slot *slot = resp->cstate.slot;
2809 unsigned int base;
2810
2811 dprintk("--> %s slot %p\n", __func__, slot);
2812
2813 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2814 slot->sl_opcnt = resp->opcnt;
2815 slot->sl_status = resp->cstate.status;
2816 free_svc_cred(&slot->sl_cred);
2817 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2818
2819 if (!nfsd4_cache_this(resp)) {
2820 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2821 return;
2822 }
2823 slot->sl_flags |= NFSD4_SLOT_CACHED;
2824
2825 base = resp->cstate.data_offset;
2826 slot->sl_datalen = buf->len - base;
2827 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2828 WARN(1, "%s: sessions DRC could not cache compound\n",
2829 __func__);
2830 return;
2831 }
2832
2833
2834
2835
2836
2837
2838
2839
2840 static __be32
2841 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2842 struct nfsd4_compoundres *resp)
2843 {
2844 struct nfsd4_op *op;
2845 struct nfsd4_slot *slot = resp->cstate.slot;
2846
2847
2848 op = &args->ops[resp->opcnt - 1];
2849 nfsd4_encode_operation(resp, op);
2850
2851 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2852 return op->status;
2853 if (args->opcnt == 1) {
2854
2855
2856
2857
2858
2859 op->status = nfserr_seq_false_retry;
2860 } else {
2861 op = &args->ops[resp->opcnt++];
2862 op->status = nfserr_retry_uncached_rep;
2863 nfsd4_encode_operation(resp, op);
2864 }
2865 return op->status;
2866 }
2867
2868
2869
2870
2871
2872 static __be32
2873 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2874 struct nfsd4_sequence *seq)
2875 {
2876 struct nfsd4_slot *slot = resp->cstate.slot;
2877 struct xdr_stream *xdr = &resp->xdr;
2878 __be32 *p;
2879 __be32 status;
2880
2881 dprintk("--> %s slot %p\n", __func__, slot);
2882
2883 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2884 if (status)
2885 return status;
2886
2887 p = xdr_reserve_space(xdr, slot->sl_datalen);
2888 if (!p) {
2889 WARN_ON_ONCE(1);
2890 return nfserr_serverfault;
2891 }
2892 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2893 xdr_commit_encode(xdr);
2894
2895 resp->opcnt = slot->sl_opcnt;
2896 return slot->sl_status;
2897 }
2898
2899
2900
2901
2902 static void
2903 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2904 {
2905 #ifdef CONFIG_NFSD_PNFS
2906 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2907 #else
2908 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2909 #endif
2910
2911
2912 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2913
2914
2915 clid->flags = new->cl_exchange_flags;
2916 }
2917
2918 static bool client_has_openowners(struct nfs4_client *clp)
2919 {
2920 struct nfs4_openowner *oo;
2921
2922 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2923 if (!list_empty(&oo->oo_owner.so_stateids))
2924 return true;
2925 }
2926 return false;
2927 }
2928
2929 static bool client_has_state(struct nfs4_client *clp)
2930 {
2931 return client_has_openowners(clp)
2932 #ifdef CONFIG_NFSD_PNFS
2933 || !list_empty(&clp->cl_lo_states)
2934 #endif
2935 || !list_empty(&clp->cl_delegations)
2936 || !list_empty(&clp->cl_sessions)
2937 || !list_empty(&clp->async_copies);
2938 }
2939
2940 static __be32 copy_impl_id(struct nfs4_client *clp,
2941 struct nfsd4_exchange_id *exid)
2942 {
2943 if (!exid->nii_domain.data)
2944 return 0;
2945 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
2946 if (!clp->cl_nii_domain.data)
2947 return nfserr_jukebox;
2948 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
2949 if (!clp->cl_nii_name.data)
2950 return nfserr_jukebox;
2951 clp->cl_nii_time.tv_sec = exid->nii_time.tv_sec;
2952 clp->cl_nii_time.tv_nsec = exid->nii_time.tv_nsec;
2953 return 0;
2954 }
2955
2956 __be32
2957 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2958 union nfsd4_op_u *u)
2959 {
2960 struct nfsd4_exchange_id *exid = &u->exchange_id;
2961 struct nfs4_client *conf, *new;
2962 struct nfs4_client *unconf = NULL;
2963 __be32 status;
2964 char addr_str[INET6_ADDRSTRLEN];
2965 nfs4_verifier verf = exid->verifier;
2966 struct sockaddr *sa = svc_addr(rqstp);
2967 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2968 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2969
2970 rpc_ntop(sa, addr_str, sizeof(addr_str));
2971 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2972 "ip_addr=%s flags %x, spa_how %d\n",
2973 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2974 addr_str, exid->flags, exid->spa_how);
2975
2976 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2977 return nfserr_inval;
2978
2979 new = create_client(exid->clname, rqstp, &verf);
2980 if (new == NULL)
2981 return nfserr_jukebox;
2982 status = copy_impl_id(new, exid);
2983 if (status)
2984 goto out_nolock;
2985
2986 switch (exid->spa_how) {
2987 case SP4_MACH_CRED:
2988 exid->spo_must_enforce[0] = 0;
2989 exid->spo_must_enforce[1] = (
2990 1 << (OP_BIND_CONN_TO_SESSION - 32) |
2991 1 << (OP_EXCHANGE_ID - 32) |
2992 1 << (OP_CREATE_SESSION - 32) |
2993 1 << (OP_DESTROY_SESSION - 32) |
2994 1 << (OP_DESTROY_CLIENTID - 32));
2995
2996 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2997 1 << (OP_OPEN_DOWNGRADE) |
2998 1 << (OP_LOCKU) |
2999 1 << (OP_DELEGRETURN));
3000
3001 exid->spo_must_allow[1] &= (
3002 1 << (OP_TEST_STATEID - 32) |
3003 1 << (OP_FREE_STATEID - 32));
3004 if (!svc_rqst_integrity_protected(rqstp)) {
3005 status = nfserr_inval;
3006 goto out_nolock;
3007 }
3008
3009
3010
3011
3012
3013 if (!new->cl_cred.cr_principal &&
3014 !new->cl_cred.cr_raw_principal) {
3015 status = nfserr_serverfault;
3016 goto out_nolock;
3017 }
3018 new->cl_mach_cred = true;
3019 case SP4_NONE:
3020 break;
3021 default:
3022 WARN_ON_ONCE(1);
3023
3024 case SP4_SSV:
3025 status = nfserr_encr_alg_unsupp;
3026 goto out_nolock;
3027 }
3028
3029
3030 spin_lock(&nn->client_lock);
3031 conf = find_confirmed_client_by_name(&exid->clname, nn);
3032 if (conf) {
3033 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3034 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3035
3036 if (update) {
3037 if (!clp_used_exchangeid(conf)) {
3038 status = nfserr_inval;
3039 goto out;
3040 }
3041 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3042 status = nfserr_wrong_cred;
3043 goto out;
3044 }
3045 if (!creds_match) {
3046 status = nfserr_perm;
3047 goto out;
3048 }
3049 if (!verfs_match) {
3050 status = nfserr_not_same;
3051 goto out;
3052 }
3053
3054 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3055 goto out_copy;
3056 }
3057 if (!creds_match) {
3058 if (client_has_state(conf)) {
3059 status = nfserr_clid_inuse;
3060 goto out;
3061 }
3062 goto out_new;
3063 }
3064 if (verfs_match) {
3065 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3066 goto out_copy;
3067 }
3068
3069 conf = NULL;
3070 goto out_new;
3071 }
3072
3073 if (update) {
3074 status = nfserr_noent;
3075 goto out;
3076 }
3077
3078 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3079 if (unconf)
3080 unhash_client_locked(unconf);
3081
3082
3083 out_new:
3084 if (conf) {
3085 status = mark_client_expired_locked(conf);
3086 if (status)
3087 goto out;
3088 }
3089 new->cl_minorversion = cstate->minorversion;
3090 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3091 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3092
3093 add_to_unconfirmed(new);
3094 swap(new, conf);
3095 out_copy:
3096 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3097 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3098
3099 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3100 nfsd4_set_ex_flags(conf, exid);
3101
3102 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3103 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3104 status = nfs_ok;
3105
3106 out:
3107 spin_unlock(&nn->client_lock);
3108 out_nolock:
3109 if (new)
3110 expire_client(new);
3111 if (unconf)
3112 expire_client(unconf);
3113 return status;
3114 }
3115
3116 static __be32
3117 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3118 {
3119 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3120 slot_seqid);
3121
3122
3123 if (slot_inuse) {
3124 if (seqid == slot_seqid)
3125 return nfserr_jukebox;
3126 else
3127 return nfserr_seq_misordered;
3128 }
3129
3130 if (likely(seqid == slot_seqid + 1))
3131 return nfs_ok;
3132 if (seqid == slot_seqid)
3133 return nfserr_replay_cache;
3134 return nfserr_seq_misordered;
3135 }
3136
3137
3138
3139
3140
3141
3142 static void
3143 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3144 struct nfsd4_clid_slot *slot, __be32 nfserr)
3145 {
3146 slot->sl_status = nfserr;
3147 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3148 }
3149
3150 static __be32
3151 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3152 struct nfsd4_clid_slot *slot)
3153 {
3154 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3155 return slot->sl_status;
3156 }
3157
3158 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3159 2 * 2 + \
3160 1 + \
3161 3 + \
3162 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3163 \
3164 4 ) * sizeof(__be32))
3165
3166 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3167 2 + \
3168 1 + \
3169 1 + \
3170 3 + \
3171 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3172 \
3173 5 ) * sizeof(__be32))
3174
3175 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3176 {
3177 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3178
3179 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3180 return nfserr_toosmall;
3181 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3182 return nfserr_toosmall;
3183 ca->headerpadsz = 0;
3184 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3185 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3186 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3187 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3188 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3189 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3201
3202 return nfs_ok;
3203 }
3204
3205
3206
3207
3208
3209 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3210 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3211
3212 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3213 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3214
3215 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3216 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3217 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3218 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3219 sizeof(__be32))
3220
3221 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3222 {
3223 ca->headerpadsz = 0;
3224
3225 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3226 return nfserr_toosmall;
3227 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3228 return nfserr_toosmall;
3229 ca->maxresp_cached = 0;
3230 if (ca->maxops < 2)
3231 return nfserr_toosmall;
3232
3233 return nfs_ok;
3234 }
3235
3236 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3237 {
3238 switch (cbs->flavor) {
3239 case RPC_AUTH_NULL:
3240 case RPC_AUTH_UNIX:
3241 return nfs_ok;
3242 default:
3243
3244
3245
3246
3247
3248
3249
3250 return nfserr_encr_alg_unsupp;
3251 }
3252 }
3253
3254 __be32
3255 nfsd4_create_session(struct svc_rqst *rqstp,
3256 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3257 {
3258 struct nfsd4_create_session *cr_ses = &u->create_session;
3259 struct sockaddr *sa = svc_addr(rqstp);
3260 struct nfs4_client *conf, *unconf;
3261 struct nfs4_client *old = NULL;
3262 struct nfsd4_session *new;
3263 struct nfsd4_conn *conn;
3264 struct nfsd4_clid_slot *cs_slot = NULL;
3265 __be32 status = 0;
3266 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3267
3268 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3269 return nfserr_inval;
3270 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3271 if (status)
3272 return status;
3273 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3274 if (status)
3275 return status;
3276 status = check_backchannel_attrs(&cr_ses->back_channel);
3277 if (status)
3278 goto out_release_drc_mem;
3279 status = nfserr_jukebox;
3280 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3281 if (!new)
3282 goto out_release_drc_mem;
3283 conn = alloc_conn_from_crses(rqstp, cr_ses);
3284 if (!conn)
3285 goto out_free_session;
3286
3287 spin_lock(&nn->client_lock);
3288 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3289 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3290 WARN_ON_ONCE(conf && unconf);
3291
3292 if (conf) {
3293 status = nfserr_wrong_cred;
3294 if (!nfsd4_mach_creds_match(conf, rqstp))
3295 goto out_free_conn;
3296 cs_slot = &conf->cl_cs_slot;
3297 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3298 if (status) {
3299 if (status == nfserr_replay_cache)
3300 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3301 goto out_free_conn;
3302 }
3303 } else if (unconf) {
3304 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3305 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3306 status = nfserr_clid_inuse;
3307 goto out_free_conn;
3308 }
3309 status = nfserr_wrong_cred;
3310 if (!nfsd4_mach_creds_match(unconf, rqstp))
3311 goto out_free_conn;
3312 cs_slot = &unconf->cl_cs_slot;
3313 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3314 if (status) {
3315
3316 status = nfserr_seq_misordered;
3317 goto out_free_conn;
3318 }
3319 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3320 if (old) {
3321 status = mark_client_expired_locked(old);
3322 if (status) {
3323 old = NULL;
3324 goto out_free_conn;
3325 }
3326 }
3327 move_to_confirmed(unconf);
3328 conf = unconf;
3329 } else {
3330 status = nfserr_stale_clientid;
3331 goto out_free_conn;
3332 }
3333 status = nfs_ok;
3334
3335 cr_ses->flags &= ~SESSION4_PERSIST;
3336
3337 cr_ses->flags &= ~SESSION4_RDMA;
3338
3339 init_session(rqstp, new, conf, cr_ses);
3340 nfsd4_get_session_locked(new);
3341
3342 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3343 NFS4_MAX_SESSIONID_LEN);
3344 cs_slot->sl_seqid++;
3345 cr_ses->seqid = cs_slot->sl_seqid;
3346
3347
3348 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3349 spin_unlock(&nn->client_lock);
3350
3351 nfsd4_init_conn(rqstp, conn, new);
3352 nfsd4_put_session(new);
3353 if (old)
3354 expire_client(old);
3355 return status;
3356 out_free_conn:
3357 spin_unlock(&nn->client_lock);
3358 free_conn(conn);
3359 if (old)
3360 expire_client(old);
3361 out_free_session:
3362 __free_session(new);
3363 out_release_drc_mem:
3364 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3365 return status;
3366 }
3367
3368 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3369 {
3370 switch (*dir) {
3371 case NFS4_CDFC4_FORE:
3372 case NFS4_CDFC4_BACK:
3373 return nfs_ok;
3374 case NFS4_CDFC4_FORE_OR_BOTH:
3375 case NFS4_CDFC4_BACK_OR_BOTH:
3376 *dir = NFS4_CDFC4_BOTH;
3377 return nfs_ok;
3378 };
3379 return nfserr_inval;
3380 }
3381
3382 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3383 struct nfsd4_compound_state *cstate,
3384 union nfsd4_op_u *u)
3385 {
3386 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3387 struct nfsd4_session *session = cstate->session;
3388 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3389 __be32 status;
3390
3391 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3392 if (status)
3393 return status;
3394 spin_lock(&nn->client_lock);
3395 session->se_cb_prog = bc->bc_cb_program;
3396 session->se_cb_sec = bc->bc_cb_sec;
3397 spin_unlock(&nn->client_lock);
3398
3399 nfsd4_probe_callback(session->se_client);
3400
3401 return nfs_ok;
3402 }
3403
3404 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3405 struct nfsd4_compound_state *cstate,
3406 union nfsd4_op_u *u)
3407 {
3408 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3409 __be32 status;
3410 struct nfsd4_conn *conn;
3411 struct nfsd4_session *session;
3412 struct net *net = SVC_NET(rqstp);
3413 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3414
3415 if (!nfsd4_last_compound_op(rqstp))
3416 return nfserr_not_only_op;
3417 spin_lock(&nn->client_lock);
3418 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3419 spin_unlock(&nn->client_lock);
3420 if (!session)
3421 goto out_no_session;
3422 status = nfserr_wrong_cred;
3423 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3424 goto out;
3425 status = nfsd4_map_bcts_dir(&bcts->dir);
3426 if (status)
3427 goto out;
3428 conn = alloc_conn(rqstp, bcts->dir);
3429 status = nfserr_jukebox;
3430 if (!conn)
3431 goto out;
3432 nfsd4_init_conn(rqstp, conn, session);
3433 status = nfs_ok;
3434 out:
3435 nfsd4_put_session(session);
3436 out_no_session:
3437 return status;
3438 }
3439
3440 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3441 {
3442 if (!cstate->session)
3443 return false;
3444 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3445 }
3446
3447 __be32
3448 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3449 union nfsd4_op_u *u)
3450 {
3451 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3452 struct nfsd4_session *ses;
3453 __be32 status;
3454 int ref_held_by_me = 0;
3455 struct net *net = SVC_NET(r);
3456 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3457
3458 status = nfserr_not_only_op;
3459 if (nfsd4_compound_in_session(cstate, sessionid)) {
3460 if (!nfsd4_last_compound_op(r))
3461 goto out;
3462 ref_held_by_me++;
3463 }
3464 dump_sessionid(__func__, sessionid);
3465 spin_lock(&nn->client_lock);
3466 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3467 if (!ses)
3468 goto out_client_lock;
3469 status = nfserr_wrong_cred;
3470 if (!nfsd4_mach_creds_match(ses->se_client, r))
3471 goto out_put_session;
3472 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3473 if (status)
3474 goto out_put_session;
3475 unhash_session(ses);
3476 spin_unlock(&nn->client_lock);
3477
3478 nfsd4_probe_callback_sync(ses->se_client);
3479
3480 spin_lock(&nn->client_lock);
3481 status = nfs_ok;
3482 out_put_session:
3483 nfsd4_put_session_locked(ses);
3484 out_client_lock:
3485 spin_unlock(&nn->client_lock);
3486 out:
3487 return status;
3488 }
3489
3490 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3491 {
3492 struct nfsd4_conn *c;
3493
3494 list_for_each_entry(c, &s->se_conns, cn_persession) {
3495 if (c->cn_xprt == xpt) {
3496 return c;
3497 }
3498 }
3499 return NULL;
3500 }
3501
3502 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3503 {
3504 struct nfs4_client *clp = ses->se_client;
3505 struct nfsd4_conn *c;
3506 __be32 status = nfs_ok;
3507 int ret;
3508
3509 spin_lock(&clp->cl_lock);
3510 c = __nfsd4_find_conn(new->cn_xprt, ses);
3511 if (c)
3512 goto out_free;
3513 status = nfserr_conn_not_bound_to_session;
3514 if (clp->cl_mach_cred)
3515 goto out_free;
3516 __nfsd4_hash_conn(new, ses);
3517 spin_unlock(&clp->cl_lock);
3518 ret = nfsd4_register_conn(new);
3519 if (ret)
3520
3521 nfsd4_conn_lost(&new->cn_xpt_user);
3522 return nfs_ok;
3523 out_free:
3524 spin_unlock(&clp->cl_lock);
3525 free_conn(new);
3526 return status;
3527 }
3528
3529 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3530 {
3531 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3532
3533 return args->opcnt > session->se_fchannel.maxops;
3534 }
3535
3536 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3537 struct nfsd4_session *session)
3538 {
3539 struct xdr_buf *xb = &rqstp->rq_arg;
3540
3541 return xb->len > session->se_fchannel.maxreq_sz;
3542 }
3543
3544 static bool replay_matches_cache(struct svc_rqst *rqstp,
3545 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3546 {
3547 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3548
3549 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3550 (bool)seq->cachethis)
3551 return false;
3552
3553
3554
3555
3556 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3557 return false;
3558
3559
3560
3561
3562
3563 if (slot->sl_opcnt > argp->opcnt)
3564 return false;
3565
3566 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3567 return false;
3568
3569
3570
3571
3572
3573
3574 return true;
3575 }
3576
3577 __be32
3578 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3579 union nfsd4_op_u *u)
3580 {
3581 struct nfsd4_sequence *seq = &u->sequence;
3582 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3583 struct xdr_stream *xdr = &resp->xdr;
3584 struct nfsd4_session *session;
3585 struct nfs4_client *clp;
3586 struct nfsd4_slot *slot;
3587 struct nfsd4_conn *conn;
3588 __be32 status;
3589 int buflen;
3590 struct net *net = SVC_NET(rqstp);
3591 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3592
3593 if (resp->opcnt != 1)
3594 return nfserr_sequence_pos;
3595
3596
3597
3598
3599
3600 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3601 if (!conn)
3602 return nfserr_jukebox;
3603
3604 spin_lock(&nn->client_lock);
3605 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3606 if (!session)
3607 goto out_no_session;
3608 clp = session->se_client;
3609
3610 status = nfserr_too_many_ops;
3611 if (nfsd4_session_too_many_ops(rqstp, session))
3612 goto out_put_session;
3613
3614 status = nfserr_req_too_big;
3615 if (nfsd4_request_too_big(rqstp, session))
3616 goto out_put_session;
3617
3618 status = nfserr_badslot;
3619 if (seq->slotid >= session->se_fchannel.maxreqs)
3620 goto out_put_session;
3621
3622 slot = session->se_slots[seq->slotid];
3623 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3624
3625
3626
3627
3628 seq->maxslots = session->se_fchannel.maxreqs;
3629
3630 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3631 slot->sl_flags & NFSD4_SLOT_INUSE);
3632 if (status == nfserr_replay_cache) {
3633 status = nfserr_seq_misordered;
3634 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3635 goto out_put_session;
3636 status = nfserr_seq_false_retry;
3637 if (!replay_matches_cache(rqstp, seq, slot))
3638 goto out_put_session;
3639 cstate->slot = slot;
3640 cstate->session = session;
3641 cstate->clp = clp;
3642
3643
3644 status = nfsd4_replay_cache_entry(resp, seq);
3645 cstate->status = nfserr_replay_cache;
3646 goto out;
3647 }
3648 if (status)
3649 goto out_put_session;
3650
3651 status = nfsd4_sequence_check_conn(conn, session);
3652 conn = NULL;
3653 if (status)
3654 goto out_put_session;
3655
3656 buflen = (seq->cachethis) ?
3657 session->se_fchannel.maxresp_cached :
3658 session->se_fchannel.maxresp_sz;
3659 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3660 nfserr_rep_too_big;
3661 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3662 goto out_put_session;
3663 svc_reserve(rqstp, buflen);
3664
3665 status = nfs_ok;
3666
3667 slot->sl_seqid = seq->seqid;
3668 slot->sl_flags |= NFSD4_SLOT_INUSE;
3669 if (seq->cachethis)
3670 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3671 else
3672 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3673
3674 cstate->slot = slot;
3675 cstate->session = session;
3676 cstate->clp = clp;
3677
3678 out:
3679 switch (clp->cl_cb_state) {
3680 case NFSD4_CB_DOWN:
3681 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3682 break;
3683 case NFSD4_CB_FAULT:
3684 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3685 break;
3686 default:
3687 seq->status_flags = 0;
3688 }
3689 if (!list_empty(&clp->cl_revoked))
3690 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3691 out_no_session:
3692 if (conn)
3693 free_conn(conn);
3694 spin_unlock(&nn->client_lock);
3695 return status;
3696 out_put_session:
3697 nfsd4_put_session_locked(session);
3698 goto out_no_session;
3699 }
3700
3701 void
3702 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3703 {
3704 struct nfsd4_compound_state *cs = &resp->cstate;
3705
3706 if (nfsd4_has_session(cs)) {
3707 if (cs->status != nfserr_replay_cache) {
3708 nfsd4_store_cache_entry(resp);
3709 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3710 }
3711
3712 nfsd4_put_session(cs->session);
3713 } else if (cs->clp)
3714 put_client_renew(cs->clp);
3715 }
3716
3717 __be32
3718 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3719 struct nfsd4_compound_state *cstate,
3720 union nfsd4_op_u *u)
3721 {
3722 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3723 struct nfs4_client *conf, *unconf;
3724 struct nfs4_client *clp = NULL;
3725 __be32 status = 0;
3726 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3727
3728 spin_lock(&nn->client_lock);
3729 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3730 conf = find_confirmed_client(&dc->clientid, true, nn);
3731 WARN_ON_ONCE(conf && unconf);
3732
3733 if (conf) {
3734 if (client_has_state(conf)) {
3735 status = nfserr_clientid_busy;
3736 goto out;
3737 }
3738 status = mark_client_expired_locked(conf);
3739 if (status)
3740 goto out;
3741 clp = conf;
3742 } else if (unconf)
3743 clp = unconf;
3744 else {
3745 status = nfserr_stale_clientid;
3746 goto out;
3747 }
3748 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3749 clp = NULL;
3750 status = nfserr_wrong_cred;
3751 goto out;
3752 }
3753 unhash_client_locked(clp);
3754 out:
3755 spin_unlock(&nn->client_lock);
3756 if (clp)
3757 expire_client(clp);
3758 return status;
3759 }
3760
3761 __be32
3762 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3763 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3764 {
3765 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3766 __be32 status = 0;
3767
3768 if (rc->rca_one_fs) {
3769 if (!cstate->current_fh.fh_dentry)
3770 return nfserr_nofilehandle;
3771
3772
3773
3774
3775 return nfs_ok;
3776 }
3777
3778 status = nfserr_complete_already;
3779 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3780 &cstate->session->se_client->cl_flags))
3781 goto out;
3782
3783 status = nfserr_stale_clientid;
3784 if (is_client_expired(cstate->session->se_client))
3785
3786
3787
3788
3789
3790
3791
3792 goto out;
3793
3794 status = nfs_ok;
3795 nfsd4_client_record_create(cstate->session->se_client);
3796 inc_reclaim_complete(cstate->session->se_client);
3797 out:
3798 return status;
3799 }
3800
3801 __be32
3802 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3803 union nfsd4_op_u *u)
3804 {
3805 struct nfsd4_setclientid *setclid = &u->setclientid;
3806 struct xdr_netobj clname = setclid->se_name;
3807 nfs4_verifier clverifier = setclid->se_verf;
3808 struct nfs4_client *conf, *new;
3809 struct nfs4_client *unconf = NULL;
3810 __be32 status;
3811 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3812
3813 new = create_client(clname, rqstp, &clverifier);
3814 if (new == NULL)
3815 return nfserr_jukebox;
3816
3817 spin_lock(&nn->client_lock);
3818 conf = find_confirmed_client_by_name(&clname, nn);
3819 if (conf && client_has_state(conf)) {
3820
3821 status = nfserr_clid_inuse;
3822 if (clp_used_exchangeid(conf))
3823 goto out;
3824 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3825 char addr_str[INET6_ADDRSTRLEN];
3826 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3827 sizeof(addr_str));
3828 dprintk("NFSD: setclientid: string in use by client "
3829 "at %s\n", addr_str);
3830 goto out;
3831 }
3832 }
3833 unconf = find_unconfirmed_client_by_name(&clname, nn);
3834 if (unconf)
3835 unhash_client_locked(unconf);
3836 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3837
3838 copy_clid(new, conf);
3839 gen_confirm(new, nn);
3840 } else
3841 ;
3842 new->cl_minorversion = 0;
3843 gen_callback(new, setclid, rqstp);
3844 add_to_unconfirmed(new);
3845 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3846 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3847 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3848 new = NULL;
3849 status = nfs_ok;
3850 out:
3851 spin_unlock(&nn->client_lock);
3852 if (new)
3853 free_client(new);
3854 if (unconf)
3855 expire_client(unconf);
3856 return status;
3857 }
3858
3859
3860 __be32
3861 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3862 struct nfsd4_compound_state *cstate,
3863 union nfsd4_op_u *u)
3864 {
3865 struct nfsd4_setclientid_confirm *setclientid_confirm =
3866 &u->setclientid_confirm;
3867 struct nfs4_client *conf, *unconf;
3868 struct nfs4_client *old = NULL;
3869 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3870 clientid_t * clid = &setclientid_confirm->sc_clientid;
3871 __be32 status;
3872 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3873
3874 if (STALE_CLIENTID(clid, nn))
3875 return nfserr_stale_clientid;
3876
3877 spin_lock(&nn->client_lock);
3878 conf = find_confirmed_client(clid, false, nn);
3879 unconf = find_unconfirmed_client(clid, false, nn);
3880
3881
3882
3883
3884
3885
3886
3887 status = nfserr_clid_inuse;
3888 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3889 goto out;
3890 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3891 goto out;
3892
3893 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3894 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3895
3896 status = nfs_ok;
3897 } else
3898 status = nfserr_stale_clientid;
3899 goto out;
3900 }
3901 status = nfs_ok;
3902 if (conf) {
3903 old = unconf;
3904 unhash_client_locked(old);
3905 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3906 } else {
3907 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3908 if (old) {
3909 status = nfserr_clid_inuse;
3910 if (client_has_state(old)
3911 && !same_creds(&unconf->cl_cred,
3912 &old->cl_cred))
3913 goto out;
3914 status = mark_client_expired_locked(old);
3915 if (status) {
3916 old = NULL;
3917 goto out;
3918 }
3919 }
3920 move_to_confirmed(unconf);
3921 conf = unconf;
3922 }
3923 get_client_locked(conf);
3924 spin_unlock(&nn->client_lock);
3925 nfsd4_probe_callback(conf);
3926 spin_lock(&nn->client_lock);
3927 put_client_renew_locked(conf);
3928 out:
3929 spin_unlock(&nn->client_lock);
3930 if (old)
3931 expire_client(old);
3932 return status;
3933 }
3934
3935 static struct nfs4_file *nfsd4_alloc_file(void)
3936 {
3937 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3938 }
3939
3940
3941 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3942 struct nfs4_file *fp)
3943 {
3944 lockdep_assert_held(&state_lock);
3945
3946 refcount_set(&fp->fi_ref, 1);
3947 spin_lock_init(&fp->fi_lock);
3948 INIT_LIST_HEAD(&fp->fi_stateids);
3949 INIT_LIST_HEAD(&fp->fi_delegations);
3950 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3951 fh_copy_shallow(&fp->fi_fhandle, fh);
3952 fp->fi_deleg_file = NULL;
3953 fp->fi_had_conflict = false;
3954 fp->fi_share_deny = 0;
3955 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3956 memset(fp->fi_access, 0, sizeof(fp->fi_access));
3957 #ifdef CONFIG_NFSD_PNFS
3958 INIT_LIST_HEAD(&fp->fi_lo_states);
3959 atomic_set(&fp->fi_lo_recalls, 0);
3960 #endif
3961 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3962 }
3963
3964 void
3965 nfsd4_free_slabs(void)
3966 {
3967 kmem_cache_destroy(client_slab);
3968 kmem_cache_destroy(openowner_slab);
3969 kmem_cache_destroy(lockowner_slab);
3970 kmem_cache_destroy(file_slab);
3971 kmem_cache_destroy(stateid_slab);
3972 kmem_cache_destroy(deleg_slab);
3973 kmem_cache_destroy(odstate_slab);
3974 }
3975
3976 int
3977 nfsd4_init_slabs(void)
3978 {
3979 client_slab = kmem_cache_create("nfsd4_clients",
3980 sizeof(struct nfs4_client), 0, 0, NULL);
3981 if (client_slab == NULL)
3982 goto out;
3983 openowner_slab = kmem_cache_create("nfsd4_openowners",
3984 sizeof(struct nfs4_openowner), 0, 0, NULL);
3985 if (openowner_slab == NULL)
3986 goto out_free_client_slab;
3987 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3988 sizeof(struct nfs4_lockowner), 0, 0, NULL);
3989 if (lockowner_slab == NULL)
3990 goto out_free_openowner_slab;
3991 file_slab = kmem_cache_create("nfsd4_files",
3992 sizeof(struct nfs4_file), 0, 0, NULL);
3993 if (file_slab == NULL)
3994 goto out_free_lockowner_slab;
3995 stateid_slab = kmem_cache_create("nfsd4_stateids",
3996 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3997 if (stateid_slab == NULL)
3998 goto out_free_file_slab;
3999 deleg_slab = kmem_cache_create("nfsd4_delegations",
4000 sizeof(struct nfs4_delegation), 0, 0, NULL);
4001 if (deleg_slab == NULL)
4002 goto out_free_stateid_slab;
4003 odstate_slab = kmem_cache_create("nfsd4_odstate",
4004 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4005 if (odstate_slab == NULL)
4006 goto out_free_deleg_slab;
4007 return 0;
4008
4009 out_free_deleg_slab:
4010 kmem_cache_destroy(deleg_slab);
4011 out_free_stateid_slab:
4012 kmem_cache_destroy(stateid_slab);
4013 out_free_file_slab:
4014 kmem_cache_destroy(file_slab);
4015 out_free_lockowner_slab:
4016 kmem_cache_destroy(lockowner_slab);
4017 out_free_openowner_slab:
4018 kmem_cache_destroy(openowner_slab);
4019 out_free_client_slab:
4020 kmem_cache_destroy(client_slab);
4021 out:
4022 dprintk("nfsd4: out of memory while initializing nfsv4\n");
4023 return -ENOMEM;
4024 }
4025
4026 static void init_nfs4_replay(struct nfs4_replay *rp)
4027 {
4028 rp->rp_status = nfserr_serverfault;
4029 rp->rp_buflen = 0;
4030 rp->rp_buf = rp->rp_ibuf;
4031 mutex_init(&rp->rp_mutex);
4032 }
4033
4034 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4035 struct nfs4_stateowner *so)
4036 {
4037 if (!nfsd4_has_session(cstate)) {
4038 mutex_lock(&so->so_replay.rp_mutex);
4039 cstate->replay_owner = nfs4_get_stateowner(so);
4040 }
4041 }
4042
4043 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4044 {
4045 struct nfs4_stateowner *so = cstate->replay_owner;
4046
4047 if (so != NULL) {
4048 cstate->replay_owner = NULL;
4049 mutex_unlock(&so->so_replay.rp_mutex);
4050 nfs4_put_stateowner(so);
4051 }
4052 }
4053
4054 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4055 {
4056 struct nfs4_stateowner *sop;
4057
4058 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4059 if (!sop)
4060 return NULL;
4061
4062 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4063 if (!sop->so_owner.data) {
4064 kmem_cache_free(slab, sop);
4065 return NULL;
4066 }
4067
4068 INIT_LIST_HEAD(&sop->so_stateids);
4069 sop->so_client = clp;
4070 init_nfs4_replay(&sop->so_replay);
4071 atomic_set(&sop->so_count, 1);
4072 return sop;
4073 }
4074
4075 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4076 {
4077 lockdep_assert_held(&clp->cl_lock);
4078
4079 list_add(&oo->oo_owner.so_strhash,
4080 &clp->cl_ownerstr_hashtbl[strhashval]);
4081 list_add(&oo->oo_perclient, &clp->cl_openowners);
4082 }
4083
4084 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4085 {
4086 unhash_openowner_locked(openowner(so));
4087 }
4088
4089 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4090 {
4091 struct nfs4_openowner *oo = openowner(so);
4092
4093 kmem_cache_free(openowner_slab, oo);
4094 }
4095
4096 static const struct nfs4_stateowner_operations openowner_ops = {
4097 .so_unhash = nfs4_unhash_openowner,
4098 .so_free = nfs4_free_openowner,
4099 };
4100
4101 static struct nfs4_ol_stateid *
4102 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4103 {
4104 struct nfs4_ol_stateid *local, *ret = NULL;
4105 struct nfs4_openowner *oo = open->op_openowner;
4106
4107 lockdep_assert_held(&fp->fi_lock);
4108
4109 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4110
4111 if (local->st_stateowner->so_is_open_owner == 0)
4112 continue;
4113 if (local->st_stateowner != &oo->oo_owner)
4114 continue;
4115 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4116 ret = local;
4117 refcount_inc(&ret->st_stid.sc_count);
4118 break;
4119 }
4120 }
4121 return ret;
4122 }
4123
4124 static __be32
4125 nfsd4_verify_open_stid(struct nfs4_stid *s)
4126 {
4127 __be32 ret = nfs_ok;
4128
4129 switch (s->sc_type) {
4130 default:
4131 break;
4132 case 0:
4133 case NFS4_CLOSED_STID:
4134 case NFS4_CLOSED_DELEG_STID:
4135 ret = nfserr_bad_stateid;
4136 break;
4137 case NFS4_REVOKED_DELEG_STID:
4138 ret = nfserr_deleg_revoked;
4139 }
4140 return ret;
4141 }
4142
4143
4144 static __be32
4145 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4146 {
4147 __be32 ret;
4148
4149 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4150 ret = nfsd4_verify_open_stid(&stp->st_stid);
4151 if (ret != nfs_ok)
4152 mutex_unlock(&stp->st_mutex);
4153 return ret;
4154 }
4155
4156 static struct nfs4_ol_stateid *
4157 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4158 {
4159 struct nfs4_ol_stateid *stp;
4160 for (;;) {
4161 spin_lock(&fp->fi_lock);
4162 stp = nfsd4_find_existing_open(fp, open);
4163 spin_unlock(&fp->fi_lock);
4164 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4165 break;
4166 nfs4_put_stid(&stp->st_stid);
4167 }
4168 return stp;
4169 }
4170
4171 static struct nfs4_openowner *
4172 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4173 struct nfsd4_compound_state *cstate)
4174 {
4175 struct nfs4_client *clp = cstate->clp;
4176 struct nfs4_openowner *oo, *ret;
4177
4178 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4179 if (!oo)
4180 return NULL;
4181 oo->oo_owner.so_ops = &openowner_ops;
4182 oo->oo_owner.so_is_open_owner = 1;
4183 oo->oo_owner.so_seqid = open->op_seqid;
4184 oo->oo_flags = 0;
4185 if (nfsd4_has_session(cstate))
4186 oo->oo_flags |= NFS4_OO_CONFIRMED;
4187 oo->oo_time = 0;
4188 oo->oo_last_closed_stid = NULL;
4189 INIT_LIST_HEAD(&oo->oo_close_lru);
4190 spin_lock(&clp->cl_lock);
4191 ret = find_openstateowner_str_locked(strhashval, open, clp);
4192 if (ret == NULL) {
4193 hash_openowner(oo, clp, strhashval);
4194 ret = oo;
4195 } else
4196 nfs4_free_stateowner(&oo->oo_owner);
4197
4198 spin_unlock(&clp->cl_lock);
4199 return ret;
4200 }
4201
4202 static struct nfs4_ol_stateid *
4203 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4204 {
4205
4206 struct nfs4_openowner *oo = open->op_openowner;
4207 struct nfs4_ol_stateid *retstp = NULL;
4208 struct nfs4_ol_stateid *stp;
4209
4210 stp = open->op_stp;
4211
4212 mutex_init(&stp->st_mutex);
4213 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4214
4215 retry:
4216 spin_lock(&oo->oo_owner.so_client->cl_lock);
4217 spin_lock(&fp->fi_lock);
4218
4219 retstp = nfsd4_find_existing_open(fp, open);
4220 if (retstp)
4221 goto out_unlock;
4222
4223 open->op_stp = NULL;
4224 refcount_inc(&stp->st_stid.sc_count);
4225 stp->st_stid.sc_type = NFS4_OPEN_STID;
4226 INIT_LIST_HEAD(&stp->st_locks);
4227 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4228 get_nfs4_file(fp);
4229 stp->st_stid.sc_file = fp;
4230 stp->st_access_bmap = 0;
4231 stp->st_deny_bmap = 0;
4232 stp->st_openstp = NULL;
4233 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4234 list_add(&stp->st_perfile, &fp->fi_stateids);
4235
4236 out_unlock:
4237 spin_unlock(&fp->fi_lock);
4238 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4239 if (retstp) {
4240
4241 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4242 nfs4_put_stid(&retstp->st_stid);
4243 goto retry;
4244 }
4245
4246 mutex_unlock(&stp->st_mutex);
4247 stp = retstp;
4248 }
4249 return stp;
4250 }
4251
4252
4253
4254
4255
4256
4257 static void
4258 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4259 {
4260 struct nfs4_ol_stateid *last;
4261 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4262 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4263 nfsd_net_id);
4264
4265 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4277
4278 release_all_access(s);
4279 if (s->st_stid.sc_file) {
4280 put_nfs4_file(s->st_stid.sc_file);
4281 s->st_stid.sc_file = NULL;
4282 }
4283
4284 spin_lock(&nn->client_lock);
4285 last = oo->oo_last_closed_stid;
4286 oo->oo_last_closed_stid = s;
4287 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4288 oo->oo_time = get_seconds();
4289 spin_unlock(&nn->client_lock);
4290 if (last)
4291 nfs4_put_stid(&last->st_stid);
4292 }
4293
4294
4295 static struct nfs4_file *
4296 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
4297 {
4298 struct nfs4_file *fp;
4299
4300 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
4301 if (fh_match(&fp->fi_fhandle, fh)) {
4302 if (refcount_inc_not_zero(&fp->fi_ref))
4303 return fp;
4304 }
4305 }
4306 return NULL;
4307 }
4308
4309 struct nfs4_file *
4310 find_file(struct knfsd_fh *fh)
4311 {
4312 struct nfs4_file *fp;
4313 unsigned int hashval = file_hashval(fh);
4314
4315 rcu_read_lock();
4316 fp = find_file_locked(fh, hashval);
4317 rcu_read_unlock();
4318 return fp;
4319 }
4320
4321 static struct nfs4_file *
4322 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
4323 {
4324 struct nfs4_file *fp;
4325 unsigned int hashval = file_hashval(fh);
4326
4327 rcu_read_lock();
4328 fp = find_file_locked(fh, hashval);
4329 rcu_read_unlock();
4330 if (fp)
4331 return fp;
4332
4333 spin_lock(&state_lock);
4334 fp = find_file_locked(fh, hashval);
4335 if (likely(fp == NULL)) {
4336 nfsd4_init_file(fh, hashval, new);
4337 fp = new;
4338 }
4339 spin_unlock(&state_lock);
4340
4341 return fp;
4342 }
4343
4344
4345
4346
4347
4348 static __be32
4349 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4350 {
4351 struct nfs4_file *fp;
4352 __be32 ret = nfs_ok;
4353
4354 fp = find_file(¤t_fh->fh_handle);
4355 if (!fp)
4356 return ret;
4357
4358 spin_lock(&fp->fi_lock);
4359 if (fp->fi_share_deny & deny_type)
4360 ret = nfserr_locked;
4361 spin_unlock(&fp->fi_lock);
4362 put_nfs4_file(fp);
4363 return ret;
4364 }
4365
4366 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4367 {
4368 struct nfs4_delegation *dp = cb_to_delegation(cb);
4369 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4370 nfsd_net_id);
4371
4372 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4373
4374
4375
4376
4377
4378
4379
4380
4381 spin_lock(&state_lock);
4382 if (dp->dl_time == 0) {
4383 dp->dl_time = get_seconds();
4384 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4385 }
4386 spin_unlock(&state_lock);
4387 }
4388
4389 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4390 struct rpc_task *task)
4391 {
4392 struct nfs4_delegation *dp = cb_to_delegation(cb);
4393
4394 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
4395 return 1;
4396
4397 switch (task->tk_status) {
4398 case 0:
4399 return 1;
4400 case -NFS4ERR_DELAY:
4401 rpc_delay(task, 2 * HZ);
4402 return 0;
4403 case -EBADHANDLE:
4404 case -NFS4ERR_BAD_STATEID:
4405
4406
4407
4408
4409 if (dp->dl_retries--) {
4410 rpc_delay(task, 2 * HZ);
4411 return 0;
4412 }
4413
4414 default:
4415 return 1;
4416 }
4417 }
4418
4419 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4420 {
4421 struct nfs4_delegation *dp = cb_to_delegation(cb);
4422
4423 nfs4_put_stid(&dp->dl_stid);
4424 }
4425
4426 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4427 .prepare = nfsd4_cb_recall_prepare,
4428 .done = nfsd4_cb_recall_done,
4429 .release = nfsd4_cb_recall_release,
4430 };
4431
4432 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4433 {
4434
4435
4436
4437
4438
4439
4440
4441 refcount_inc(&dp->dl_stid.sc_count);
4442 nfsd4_run_cb(&dp->dl_recall);
4443 }
4444
4445
4446 static bool
4447 nfsd_break_deleg_cb(struct file_lock *fl)
4448 {
4449 bool ret = false;
4450 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4451 struct nfs4_file *fp = dp->dl_stid.sc_file;
4452
4453
4454
4455
4456
4457
4458 fl->fl_break_time = 0;
4459
4460 spin_lock(&fp->fi_lock);
4461 fp->fi_had_conflict = true;
4462 nfsd_break_one_deleg(dp);
4463 spin_unlock(&fp->fi_lock);
4464 return ret;
4465 }
4466
4467 static int
4468 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4469 struct list_head *dispose)
4470 {
4471 if (arg & F_UNLCK)
4472 return lease_modify(onlist, arg, dispose);
4473 else
4474 return -EAGAIN;
4475 }
4476
4477 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4478 .lm_break = nfsd_break_deleg_cb,
4479 .lm_change = nfsd_change_deleg_cb,
4480 };
4481
4482 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4483 {
4484 if (nfsd4_has_session(cstate))
4485 return nfs_ok;
4486 if (seqid == so->so_seqid - 1)
4487 return nfserr_replay_me;
4488 if (seqid == so->so_seqid)
4489 return nfs_ok;
4490 return nfserr_bad_seqid;
4491 }
4492
4493 static __be32 lookup_clientid(clientid_t *clid,
4494 struct nfsd4_compound_state *cstate,
4495 struct nfsd_net *nn)
4496 {
4497 struct nfs4_client *found;
4498
4499 if (cstate->clp) {
4500 found = cstate->clp;
4501 if (!same_clid(&found->cl_clientid, clid))
4502 return nfserr_stale_clientid;
4503 return nfs_ok;
4504 }
4505
4506 if (STALE_CLIENTID(clid, nn))
4507 return nfserr_stale_clientid;
4508
4509
4510
4511
4512
4513
4514 WARN_ON_ONCE(cstate->session);
4515 spin_lock(&nn->client_lock);
4516 found = find_confirmed_client(clid, false, nn);
4517 if (!found) {
4518 spin_unlock(&nn->client_lock);
4519 return nfserr_expired;
4520 }
4521 atomic_inc(&found->cl_rpc_users);
4522 spin_unlock(&nn->client_lock);
4523
4524
4525 cstate->clp = found;
4526 return nfs_ok;
4527 }
4528
4529 __be32
4530 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4531 struct nfsd4_open *open, struct nfsd_net *nn)
4532 {
4533 clientid_t *clientid = &open->op_clientid;
4534 struct nfs4_client *clp = NULL;
4535 unsigned int strhashval;
4536 struct nfs4_openowner *oo = NULL;
4537 __be32 status;
4538
4539 if (STALE_CLIENTID(&open->op_clientid, nn))
4540 return nfserr_stale_clientid;
4541
4542
4543
4544
4545 open->op_file = nfsd4_alloc_file();
4546 if (open->op_file == NULL)
4547 return nfserr_jukebox;
4548
4549 status = lookup_clientid(clientid, cstate, nn);
4550 if (status)
4551 return status;
4552 clp = cstate->clp;
4553
4554 strhashval = ownerstr_hashval(&open->op_owner);
4555 oo = find_openstateowner_str(strhashval, open, clp);
4556 open->op_openowner = oo;
4557 if (!oo) {
4558 goto new_owner;
4559 }
4560 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4561
4562 release_openowner(oo);
4563 open->op_openowner = NULL;
4564 goto new_owner;
4565 }
4566 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4567 if (status)
4568 return status;
4569 goto alloc_stateid;
4570 new_owner:
4571 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4572 if (oo == NULL)
4573 return nfserr_jukebox;
4574 open->op_openowner = oo;
4575 alloc_stateid:
4576 open->op_stp = nfs4_alloc_open_stateid(clp);
4577 if (!open->op_stp)
4578 return nfserr_jukebox;
4579
4580 if (nfsd4_has_session(cstate) &&
4581 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4582 open->op_odstate = alloc_clnt_odstate(clp);
4583 if (!open->op_odstate)
4584 return nfserr_jukebox;
4585 }
4586
4587 return nfs_ok;
4588 }
4589
4590 static inline __be32
4591 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4592 {
4593 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4594 return nfserr_openmode;
4595 else
4596 return nfs_ok;
4597 }
4598
4599 static int share_access_to_flags(u32 share_access)
4600 {
4601 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4602 }
4603
4604 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4605 {
4606 struct nfs4_stid *ret;
4607
4608 ret = find_stateid_by_type(cl, s,
4609 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4610 if (!ret)
4611 return NULL;
4612 return delegstateid(ret);
4613 }
4614
4615 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4616 {
4617 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4618 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4619 }
4620
4621 static __be32
4622 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4623 struct nfs4_delegation **dp)
4624 {
4625 int flags;
4626 __be32 status = nfserr_bad_stateid;
4627 struct nfs4_delegation *deleg;
4628
4629 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4630 if (deleg == NULL)
4631 goto out;
4632 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4633 nfs4_put_stid(&deleg->dl_stid);
4634 if (cl->cl_minorversion)
4635 status = nfserr_deleg_revoked;
4636 goto out;
4637 }
4638 flags = share_access_to_flags(open->op_share_access);
4639 status = nfs4_check_delegmode(deleg, flags);
4640 if (status) {
4641 nfs4_put_stid(&deleg->dl_stid);
4642 goto out;
4643 }
4644 *dp = deleg;
4645 out:
4646 if (!nfsd4_is_deleg_cur(open))
4647 return nfs_ok;
4648 if (status)
4649 return status;
4650 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4651 return nfs_ok;
4652 }
4653
4654 static inline int nfs4_access_to_access(u32 nfs4_access)
4655 {
4656 int flags = 0;
4657
4658 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4659 flags |= NFSD_MAY_READ;
4660 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4661 flags |= NFSD_MAY_WRITE;
4662 return flags;
4663 }
4664
4665 static inline __be32
4666 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4667 struct nfsd4_open *open)
4668 {
4669 struct iattr iattr = {
4670 .ia_valid = ATTR_SIZE,
4671 .ia_size = 0,
4672 };
4673 if (!open->op_truncate)
4674 return 0;
4675 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4676 return nfserr_inval;
4677 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4678 }
4679
4680 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4681 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4682 struct nfsd4_open *open)
4683 {
4684 struct nfsd_file *nf = NULL;
4685 __be32 status;
4686 int oflag = nfs4_access_to_omode(open->op_share_access);
4687 int access = nfs4_access_to_access(open->op_share_access);
4688 unsigned char old_access_bmap, old_deny_bmap;
4689
4690 spin_lock(&fp->fi_lock);
4691
4692
4693
4694
4695
4696 status = nfs4_file_check_deny(fp, open->op_share_deny);
4697 if (status != nfs_ok) {
4698 spin_unlock(&fp->fi_lock);
4699 goto out;
4700 }
4701
4702
4703 status = nfs4_file_get_access(fp, open->op_share_access);
4704 if (status != nfs_ok) {
4705 spin_unlock(&fp->fi_lock);
4706 goto out;
4707 }
4708
4709
4710 old_access_bmap = stp->st_access_bmap;
4711 set_access(open->op_share_access, stp);
4712
4713
4714 old_deny_bmap = stp->st_deny_bmap;
4715 set_deny(open->op_share_deny, stp);
4716 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4717
4718 if (!fp->fi_fds[oflag]) {
4719 spin_unlock(&fp->fi_lock);
4720 status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
4721 if (status)
4722 goto out_put_access;
4723 spin_lock(&fp->fi_lock);
4724 if (!fp->fi_fds[oflag]) {
4725 fp->fi_fds[oflag] = nf;
4726 nf = NULL;
4727 }
4728 }
4729 spin_unlock(&fp->fi_lock);
4730 if (nf)
4731 nfsd_file_put(nf);
4732
4733 status = nfsd4_truncate(rqstp, cur_fh, open);
4734 if (status)
4735 goto out_put_access;
4736 out:
4737 return status;
4738 out_put_access:
4739 stp->st_access_bmap = old_access_bmap;
4740 nfs4_file_put_access(fp, open->op_share_access);
4741 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4742 goto out;
4743 }
4744
4745 static __be32
4746 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4747 {
4748 __be32 status;
4749 unsigned char old_deny_bmap = stp->st_deny_bmap;
4750
4751 if (!test_access(open->op_share_access, stp))
4752 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4753
4754
4755 spin_lock(&fp->fi_lock);
4756 status = nfs4_file_check_deny(fp, open->op_share_deny);
4757 if (status == nfs_ok) {
4758 set_deny(open->op_share_deny, stp);
4759 fp->fi_share_deny |=
4760 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4761 }
4762 spin_unlock(&fp->fi_lock);
4763
4764 if (status != nfs_ok)
4765 return status;
4766
4767 status = nfsd4_truncate(rqstp, cur_fh, open);
4768 if (status != nfs_ok)
4769 reset_union_bmap_deny(old_deny_bmap, stp);
4770 return status;
4771 }
4772
4773
4774 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4775 {
4776 if (clp->cl_cb_state == NFSD4_CB_UP)
4777 return true;
4778
4779
4780
4781
4782
4783 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4784 }
4785
4786 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
4787 int flag)
4788 {
4789 struct file_lock *fl;
4790
4791 fl = locks_alloc_lock();
4792 if (!fl)
4793 return NULL;
4794 fl->fl_lmops = &nfsd_lease_mng_ops;
4795 fl->fl_flags = FL_DELEG;
4796 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4797 fl->fl_end = OFFSET_MAX;
4798 fl->fl_owner = (fl_owner_t)dp;
4799 fl->fl_pid = current->tgid;
4800 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
4801 return fl;
4802 }
4803
4804 static struct nfs4_delegation *
4805 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4806 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4807 {
4808 int status = 0;
4809 struct nfs4_delegation *dp;
4810 struct nfsd_file *nf;
4811 struct file_lock *fl;
4812
4813
4814
4815
4816
4817
4818 if (fp->fi_had_conflict)
4819 return ERR_PTR(-EAGAIN);
4820
4821 nf = find_readable_file(fp);
4822 if (!nf) {
4823
4824 WARN_ON_ONCE(1);
4825 return ERR_PTR(-EBADF);
4826 }
4827 spin_lock(&state_lock);
4828 spin_lock(&fp->fi_lock);
4829 if (nfs4_delegation_exists(clp, fp))
4830 status = -EAGAIN;
4831 else if (!fp->fi_deleg_file) {
4832 fp->fi_deleg_file = nf;
4833
4834
4835 fp->fi_delegees = 1;
4836 nf = NULL;
4837 } else
4838 fp->fi_delegees++;
4839 spin_unlock(&fp->fi_lock);
4840 spin_unlock(&state_lock);
4841 if (nf)
4842 nfsd_file_put(nf);
4843 if (status)
4844 return ERR_PTR(status);
4845
4846 status = -ENOMEM;
4847 dp = alloc_init_deleg(clp, fp, fh, odstate);
4848 if (!dp)
4849 goto out_delegees;
4850
4851 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
4852 if (!fl)
4853 goto out_clnt_odstate;
4854
4855 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
4856 if (fl)
4857 locks_free_lock(fl);
4858 if (status)
4859 goto out_clnt_odstate;
4860
4861 spin_lock(&state_lock);
4862 spin_lock(&fp->fi_lock);
4863 if (fp->fi_had_conflict)
4864 status = -EAGAIN;
4865 else
4866 status = hash_delegation_locked(dp, fp);
4867 spin_unlock(&fp->fi_lock);
4868 spin_unlock(&state_lock);
4869
4870 if (status)
4871 goto out_unlock;
4872
4873 return dp;
4874 out_unlock:
4875 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
4876 out_clnt_odstate:
4877 put_clnt_odstate(dp->dl_clnt_odstate);
4878 nfs4_put_stid(&dp->dl_stid);
4879 out_delegees:
4880 put_deleg_file(fp);
4881 return ERR_PTR(status);
4882 }
4883
4884 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4885 {
4886 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4887 if (status == -EAGAIN)
4888 open->op_why_no_deleg = WND4_CONTENTION;
4889 else {
4890 open->op_why_no_deleg = WND4_RESOURCE;
4891 switch (open->op_deleg_want) {
4892 case NFS4_SHARE_WANT_READ_DELEG:
4893 case NFS4_SHARE_WANT_WRITE_DELEG:
4894 case NFS4_SHARE_WANT_ANY_DELEG:
4895 break;
4896 case NFS4_SHARE_WANT_CANCEL:
4897 open->op_why_no_deleg = WND4_CANCELLED;
4898 break;
4899 case NFS4_SHARE_WANT_NO_DELEG:
4900 WARN_ON_ONCE(1);
4901 }
4902 }
4903 }
4904
4905
4906
4907
4908
4909
4910
4911 static void
4912 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4913 struct nfs4_ol_stateid *stp)
4914 {
4915 struct nfs4_delegation *dp;
4916 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4917 struct nfs4_client *clp = stp->st_stid.sc_client;
4918 int cb_up;
4919 int status = 0;
4920
4921 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4922 open->op_recall = 0;
4923 switch (open->op_claim_type) {
4924 case NFS4_OPEN_CLAIM_PREVIOUS:
4925 if (!cb_up)
4926 open->op_recall = 1;
4927 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4928 goto out_no_deleg;
4929 break;
4930 case NFS4_OPEN_CLAIM_NULL:
4931 case NFS4_OPEN_CLAIM_FH:
4932
4933
4934
4935
4936
4937 if (locks_in_grace(clp->net))
4938 goto out_no_deleg;
4939 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4940 goto out_no_deleg;
4941
4942
4943
4944
4945
4946
4947
4948 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4949 goto out_no_deleg;
4950 if (open->op_create == NFS4_OPEN_CREATE)
4951 goto out_no_deleg;
4952 break;
4953 default:
4954 goto out_no_deleg;
4955 }
4956 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4957 if (IS_ERR(dp))
4958 goto out_no_deleg;
4959
4960 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4961
4962 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4963 STATEID_VAL(&dp->dl_stid.sc_stateid));
4964 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4965 nfs4_put_stid(&dp->dl_stid);
4966 return;
4967 out_no_deleg:
4968 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4969 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4970 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4971 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4972 open->op_recall = 1;
4973 }
4974
4975
4976 if (open->op_deleg_want)
4977 nfsd4_open_deleg_none_ext(open, status);
4978 return;
4979 }
4980
4981 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4982 struct nfs4_delegation *dp)
4983 {
4984 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4985 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4986 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4987 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4988 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4989 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4990 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4991 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4992 }
4993
4994
4995
4996
4997 }
4998
4999 __be32
5000 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5001 {
5002 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5003 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5004 struct nfs4_file *fp = NULL;
5005 struct nfs4_ol_stateid *stp = NULL;
5006 struct nfs4_delegation *dp = NULL;
5007 __be32 status;
5008 bool new_stp = false;
5009
5010
5011
5012
5013
5014
5015 fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle);
5016 if (fp != open->op_file) {
5017 status = nfs4_check_deleg(cl, open, &dp);
5018 if (status)
5019 goto out;
5020 stp = nfsd4_find_and_lock_existing_open(fp, open);
5021 } else {
5022 open->op_file = NULL;
5023 status = nfserr_bad_stateid;
5024 if (nfsd4_is_deleg_cur(open))
5025 goto out;
5026 }
5027
5028 if (!stp) {
5029 stp = init_open_stateid(fp, open);
5030 if (!open->op_stp)
5031 new_stp = true;
5032 }
5033
5034
5035
5036
5037
5038
5039
5040 if (!new_stp) {
5041
5042 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5043 if (status) {
5044 mutex_unlock(&stp->st_mutex);
5045 goto out;
5046 }
5047 } else {
5048 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5049 if (status) {
5050 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5051 release_open_stateid(stp);
5052 mutex_unlock(&stp->st_mutex);
5053 goto out;
5054 }
5055
5056 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5057 open->op_odstate);
5058 if (stp->st_clnt_odstate == open->op_odstate)
5059 open->op_odstate = NULL;
5060 }
5061
5062 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5063 mutex_unlock(&stp->st_mutex);
5064
5065 if (nfsd4_has_session(&resp->cstate)) {
5066 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5067 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5068 open->op_why_no_deleg = WND4_NOT_WANTED;
5069 goto nodeleg;
5070 }
5071 }
5072
5073
5074
5075
5076
5077 nfs4_open_delegation(current_fh, open, stp);
5078 nodeleg:
5079 status = nfs_ok;
5080
5081 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
5082 STATEID_VAL(&stp->st_stid.sc_stateid));
5083 out:
5084
5085 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5086 open->op_deleg_want)
5087 nfsd4_deleg_xgrade_none_ext(open, dp);
5088
5089 if (fp)
5090 put_nfs4_file(fp);
5091 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5092 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5093
5094
5095
5096 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5097 if (nfsd4_has_session(&resp->cstate))
5098 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5099 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5100 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5101
5102 if (dp)
5103 nfs4_put_stid(&dp->dl_stid);
5104 if (stp)
5105 nfs4_put_stid(&stp->st_stid);
5106
5107 return status;
5108 }
5109
5110 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5111 struct nfsd4_open *open)
5112 {
5113 if (open->op_openowner) {
5114 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5115
5116 nfsd4_cstate_assign_replay(cstate, so);
5117 nfs4_put_stateowner(so);
5118 }
5119 if (open->op_file)
5120 kmem_cache_free(file_slab, open->op_file);
5121 if (open->op_stp)
5122 nfs4_put_stid(&open->op_stp->st_stid);
5123 if (open->op_odstate)
5124 kmem_cache_free(odstate_slab, open->op_odstate);
5125 }
5126
5127 __be32
5128 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5129 union nfsd4_op_u *u)
5130 {
5131 clientid_t *clid = &u->renew;
5132 struct nfs4_client *clp;
5133 __be32 status;
5134 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5135
5136 dprintk("process_renew(%08x/%08x): starting\n",
5137 clid->cl_boot, clid->cl_id);
5138 status = lookup_clientid(clid, cstate, nn);
5139 if (status)
5140 goto out;
5141 clp = cstate->clp;
5142 status = nfserr_cb_path_down;
5143 if (!list_empty(&clp->cl_delegations)
5144 && clp->cl_cb_state != NFSD4_CB_UP)
5145 goto out;
5146 status = nfs_ok;
5147 out:
5148 return status;
5149 }
5150
5151 void
5152 nfsd4_end_grace(struct nfsd_net *nn)
5153 {
5154
5155 if (nn->grace_ended)
5156 return;
5157
5158 nn->grace_ended = true;
5159
5160
5161
5162
5163
5164
5165 nfsd4_record_grace_done(nn);
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175 locks_end_grace(&nn->nfsd4_manager);
5176
5177
5178
5179
5180
5181 }
5182
5183
5184
5185
5186
5187 static bool clients_still_reclaiming(struct nfsd_net *nn)
5188 {
5189 unsigned long now = get_seconds();
5190 unsigned long double_grace_period_end = nn->boot_time +
5191 2 * nn->nfsd4_lease;
5192
5193 if (nn->track_reclaim_completes &&
5194 atomic_read(&nn->nr_reclaim_complete) ==
5195 nn->reclaim_str_hashtbl_size)
5196 return false;
5197 if (!nn->somebody_reclaimed)
5198 return false;
5199 nn->somebody_reclaimed = false;
5200
5201
5202
5203
5204 if (time_after(now, double_grace_period_end))
5205 return false;
5206 return true;
5207 }
5208
5209 static time_t
5210 nfs4_laundromat(struct nfsd_net *nn)
5211 {
5212 struct nfs4_client *clp;
5213 struct nfs4_openowner *oo;
5214 struct nfs4_delegation *dp;
5215 struct nfs4_ol_stateid *stp;
5216 struct nfsd4_blocked_lock *nbl;
5217 struct list_head *pos, *next, reaplist;
5218 time_t cutoff = get_seconds() - nn->nfsd4_lease;
5219 time_t t, new_timeo = nn->nfsd4_lease;
5220
5221 dprintk("NFSD: laundromat service - starting\n");
5222
5223 if (clients_still_reclaiming(nn)) {
5224 new_timeo = 0;
5225 goto out;
5226 }
5227 dprintk("NFSD: end of grace period\n");
5228 nfsd4_end_grace(nn);
5229 INIT_LIST_HEAD(&reaplist);
5230 spin_lock(&nn->client_lock);
5231 list_for_each_safe(pos, next, &nn->client_lru) {
5232 clp = list_entry(pos, struct nfs4_client, cl_lru);
5233 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
5234 t = clp->cl_time - cutoff;
5235 new_timeo = min(new_timeo, t);
5236 break;
5237 }
5238 if (mark_client_expired_locked(clp)) {
5239 dprintk("NFSD: client in use (clientid %08x)\n",
5240 clp->cl_clientid.cl_id);
5241 continue;
5242 }
5243 list_add(&clp->cl_lru, &reaplist);
5244 }
5245 spin_unlock(&nn->client_lock);
5246 list_for_each_safe(pos, next, &reaplist) {
5247 clp = list_entry(pos, struct nfs4_client, cl_lru);
5248 dprintk("NFSD: purging unused client (clientid %08x)\n",
5249 clp->cl_clientid.cl_id);
5250 list_del_init(&clp->cl_lru);
5251 expire_client(clp);
5252 }
5253 spin_lock(&state_lock);
5254 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5255 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5256 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
5257 t = dp->dl_time - cutoff;
5258 new_timeo = min(new_timeo, t);
5259 break;
5260 }
5261 WARN_ON(!unhash_delegation_locked(dp));
5262 list_add(&dp->dl_recall_lru, &reaplist);
5263 }
5264 spin_unlock(&state_lock);
5265 while (!list_empty(&reaplist)) {
5266 dp = list_first_entry(&reaplist, struct nfs4_delegation,
5267 dl_recall_lru);
5268 list_del_init(&dp->dl_recall_lru);
5269 revoke_delegation(dp);
5270 }
5271
5272 spin_lock(&nn->client_lock);
5273 while (!list_empty(&nn->close_lru)) {
5274 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5275 oo_close_lru);
5276 if (time_after((unsigned long)oo->oo_time,
5277 (unsigned long)cutoff)) {
5278 t = oo->oo_time - cutoff;
5279 new_timeo = min(new_timeo, t);
5280 break;
5281 }
5282 list_del_init(&oo->oo_close_lru);
5283 stp = oo->oo_last_closed_stid;
5284 oo->oo_last_closed_stid = NULL;
5285 spin_unlock(&nn->client_lock);
5286 nfs4_put_stid(&stp->st_stid);
5287 spin_lock(&nn->client_lock);
5288 }
5289 spin_unlock(&nn->client_lock);
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302 BUG_ON(!list_empty(&reaplist));
5303 spin_lock(&nn->blocked_locks_lock);
5304 while (!list_empty(&nn->blocked_locks_lru)) {
5305 nbl = list_first_entry(&nn->blocked_locks_lru,
5306 struct nfsd4_blocked_lock, nbl_lru);
5307 if (time_after((unsigned long)nbl->nbl_time,
5308 (unsigned long)cutoff)) {
5309 t = nbl->nbl_time - cutoff;
5310 new_timeo = min(new_timeo, t);
5311 break;
5312 }
5313 list_move(&nbl->nbl_lru, &reaplist);
5314 list_del_init(&nbl->nbl_list);
5315 }
5316 spin_unlock(&nn->blocked_locks_lock);
5317
5318 while (!list_empty(&reaplist)) {
5319 nbl = list_first_entry(&reaplist,
5320 struct nfsd4_blocked_lock, nbl_lru);
5321 list_del_init(&nbl->nbl_lru);
5322 free_blocked_lock(nbl);
5323 }
5324 out:
5325 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
5326 return new_timeo;
5327 }
5328
5329 static struct workqueue_struct *laundry_wq;
5330 static void laundromat_main(struct work_struct *);
5331
5332 static void
5333 laundromat_main(struct work_struct *laundry)
5334 {
5335 time_t t;
5336 struct delayed_work *dwork = to_delayed_work(laundry);
5337 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
5338 laundromat_work);
5339
5340 t = nfs4_laundromat(nn);
5341 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
5342 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
5343 }
5344
5345 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
5346 {
5347 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
5348 return nfserr_bad_stateid;
5349 return nfs_ok;
5350 }
5351
5352 static inline int
5353 access_permit_read(struct nfs4_ol_stateid *stp)
5354 {
5355 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
5356 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
5357 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
5358 }
5359
5360 static inline int
5361 access_permit_write(struct nfs4_ol_stateid *stp)
5362 {
5363 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
5364 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
5365 }
5366
5367 static
5368 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
5369 {
5370 __be32 status = nfserr_openmode;
5371
5372
5373 if (stp->st_openstp)
5374 stp = stp->st_openstp;
5375 if ((flags & WR_STATE) && !access_permit_write(stp))
5376 goto out;
5377 if ((flags & RD_STATE) && !access_permit_read(stp))
5378 goto out;
5379 status = nfs_ok;
5380 out:
5381 return status;
5382 }
5383
5384 static inline __be32
5385 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
5386 {
5387 if (ONE_STATEID(stateid) && (flags & RD_STATE))
5388 return nfs_ok;
5389 else if (opens_in_grace(net)) {
5390
5391
5392 return nfserr_grace;
5393 } else if (flags & WR_STATE)
5394 return nfs4_share_conflict(current_fh,
5395 NFS4_SHARE_DENY_WRITE);
5396 else
5397 return nfs4_share_conflict(current_fh,
5398 NFS4_SHARE_DENY_READ);
5399 }
5400
5401
5402
5403
5404
5405 static inline int
5406 grace_disallows_io(struct net *net, struct inode *inode)
5407 {
5408 return opens_in_grace(net) && mandatory_lock(inode);
5409 }
5410
5411 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
5412 {
5413
5414
5415
5416
5417 if (has_session && in->si_generation == 0)
5418 return nfs_ok;
5419
5420 if (in->si_generation == ref->si_generation)
5421 return nfs_ok;
5422
5423
5424 if (nfsd4_stateid_generation_after(in, ref))
5425 return nfserr_bad_stateid;
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436 return nfserr_old_stateid;
5437 }
5438
5439 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
5440 {
5441 __be32 ret;
5442
5443 spin_lock(&s->sc_lock);
5444 ret = nfsd4_verify_open_stid(s);
5445 if (ret == nfs_ok)
5446 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
5447 spin_unlock(&s->sc_lock);
5448 return ret;
5449 }
5450
5451 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
5452 {
5453 if (ols->st_stateowner->so_is_open_owner &&
5454 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
5455 return nfserr_bad_stateid;
5456 return nfs_ok;
5457 }
5458
5459 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
5460 {
5461 struct nfs4_stid *s;
5462 __be32 status = nfserr_bad_stateid;
5463
5464 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5465 CLOSE_STATEID(stateid))
5466 return status;
5467
5468 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
5469 char addr_str[INET6_ADDRSTRLEN];
5470 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
5471 sizeof(addr_str));
5472 pr_warn_ratelimited("NFSD: client %s testing state ID "
5473 "with incorrect client ID\n", addr_str);
5474 return status;
5475 }
5476 spin_lock(&cl->cl_lock);
5477 s = find_stateid_locked(cl, stateid);
5478 if (!s)
5479 goto out_unlock;
5480 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5481 if (status)
5482 goto out_unlock;
5483 switch (s->sc_type) {
5484 case NFS4_DELEG_STID:
5485 status = nfs_ok;
5486 break;
5487 case NFS4_REVOKED_DELEG_STID:
5488 status = nfserr_deleg_revoked;
5489 break;
5490 case NFS4_OPEN_STID:
5491 case NFS4_LOCK_STID:
5492 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5493 break;
5494 default:
5495 printk("unknown stateid type %x\n", s->sc_type);
5496
5497 case NFS4_CLOSED_STID:
5498 case NFS4_CLOSED_DELEG_STID:
5499 status = nfserr_bad_stateid;
5500 }
5501 out_unlock:
5502 spin_unlock(&cl->cl_lock);
5503 return status;
5504 }
5505
5506 __be32
5507 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5508 stateid_t *stateid, unsigned char typemask,
5509 struct nfs4_stid **s, struct nfsd_net *nn)
5510 {
5511 __be32 status;
5512 bool return_revoked = false;
5513
5514
5515
5516
5517
5518 if (typemask & NFS4_REVOKED_DELEG_STID)
5519 return_revoked = true;
5520 else if (typemask & NFS4_DELEG_STID)
5521 typemask |= NFS4_REVOKED_DELEG_STID;
5522
5523 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5524 CLOSE_STATEID(stateid))
5525 return nfserr_bad_stateid;
5526 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5527 if (status == nfserr_stale_clientid) {
5528 if (cstate->session)
5529 return nfserr_bad_stateid;
5530 return nfserr_stale_stateid;
5531 }
5532 if (status)
5533 return status;
5534 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5535 if (!*s)
5536 return nfserr_bad_stateid;
5537 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5538 nfs4_put_stid(*s);
5539 if (cstate->minorversion)
5540 return nfserr_deleg_revoked;
5541 return nfserr_bad_stateid;
5542 }
5543 return nfs_ok;
5544 }
5545
5546 static struct nfsd_file *
5547 nfs4_find_file(struct nfs4_stid *s, int flags)
5548 {
5549 if (!s)
5550 return NULL;
5551
5552 switch (s->sc_type) {
5553 case NFS4_DELEG_STID:
5554 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5555 return NULL;
5556 return nfsd_file_get(s->sc_file->fi_deleg_file);
5557 case NFS4_OPEN_STID:
5558 case NFS4_LOCK_STID:
5559 if (flags & RD_STATE)
5560 return find_readable_file(s->sc_file);
5561 else
5562 return find_writeable_file(s->sc_file);
5563 break;
5564 }
5565
5566 return NULL;
5567 }
5568
5569 static __be32
5570 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
5571 {
5572 __be32 status;
5573
5574 status = nfsd4_check_openowner_confirmed(ols);
5575 if (status)
5576 return status;
5577 return nfs4_check_openmode(ols, flags);
5578 }
5579
5580 static __be32
5581 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5582 struct nfsd_file **nfp, int flags)
5583 {
5584 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5585 struct nfsd_file *nf;
5586 __be32 status;
5587
5588 nf = nfs4_find_file(s, flags);
5589 if (nf) {
5590 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5591 acc | NFSD_MAY_OWNER_OVERRIDE);
5592 if (status) {
5593 nfsd_file_put(nf);
5594 goto out;
5595 }
5596 } else {
5597 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
5598 if (status)
5599 return status;
5600 }
5601 *nfp = nf;
5602 out:
5603 return status;
5604 }
5605
5606
5607
5608
5609 __be32
5610 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5611 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5612 stateid_t *stateid, int flags, struct nfsd_file **nfp)
5613 {
5614 struct inode *ino = d_inode(fhp->fh_dentry);
5615 struct net *net = SVC_NET(rqstp);
5616 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5617 struct nfs4_stid *s = NULL;
5618 __be32 status;
5619
5620 if (nfp)
5621 *nfp = NULL;
5622
5623 if (grace_disallows_io(net, ino))
5624 return nfserr_grace;
5625
5626 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5627 status = check_special_stateids(net, fhp, stateid, flags);
5628 goto done;
5629 }
5630
5631 status = nfsd4_lookup_stateid(cstate, stateid,
5632 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5633 &s, nn);
5634 if (status)
5635 return status;
5636 status = nfsd4_stid_check_stateid_generation(stateid, s,
5637 nfsd4_has_session(cstate));
5638 if (status)
5639 goto out;
5640
5641 switch (s->sc_type) {
5642 case NFS4_DELEG_STID:
5643 status = nfs4_check_delegmode(delegstateid(s), flags);
5644 break;
5645 case NFS4_OPEN_STID:
5646 case NFS4_LOCK_STID:
5647 status = nfs4_check_olstateid(openlockstateid(s), flags);
5648 break;
5649 default:
5650 status = nfserr_bad_stateid;
5651 break;
5652 }
5653 if (status)
5654 goto out;
5655 status = nfs4_check_fh(fhp, s);
5656
5657 done:
5658 if (status == nfs_ok && nfp)
5659 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
5660 out:
5661 if (s)
5662 nfs4_put_stid(s);
5663 return status;
5664 }
5665
5666
5667
5668
5669 __be32
5670 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5671 union nfsd4_op_u *u)
5672 {
5673 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5674 struct nfsd4_test_stateid_id *stateid;
5675 struct nfs4_client *cl = cstate->session->se_client;
5676
5677 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5678 stateid->ts_id_status =
5679 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5680
5681 return nfs_ok;
5682 }
5683
5684 static __be32
5685 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5686 {
5687 struct nfs4_ol_stateid *stp = openlockstateid(s);
5688 __be32 ret;
5689
5690 ret = nfsd4_lock_ol_stateid(stp);
5691 if (ret)
5692 goto out_put_stid;
5693
5694 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5695 if (ret)
5696 goto out;
5697
5698 ret = nfserr_locks_held;
5699 if (check_for_locks(stp->st_stid.sc_file,
5700 lockowner(stp->st_stateowner)))
5701 goto out;
5702
5703 release_lock_stateid(stp);
5704 ret = nfs_ok;
5705
5706 out:
5707 mutex_unlock(&stp->st_mutex);
5708 out_put_stid:
5709 nfs4_put_stid(s);
5710 return ret;
5711 }
5712
5713 __be32
5714 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5715 union nfsd4_op_u *u)
5716 {
5717 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5718 stateid_t *stateid = &free_stateid->fr_stateid;
5719 struct nfs4_stid *s;
5720 struct nfs4_delegation *dp;
5721 struct nfs4_client *cl = cstate->session->se_client;
5722 __be32 ret = nfserr_bad_stateid;
5723
5724 spin_lock(&cl->cl_lock);
5725 s = find_stateid_locked(cl, stateid);
5726 if (!s)
5727 goto out_unlock;
5728 spin_lock(&s->sc_lock);
5729 switch (s->sc_type) {
5730 case NFS4_DELEG_STID:
5731 ret = nfserr_locks_held;
5732 break;
5733 case NFS4_OPEN_STID:
5734 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5735 if (ret)
5736 break;
5737 ret = nfserr_locks_held;
5738 break;
5739 case NFS4_LOCK_STID:
5740 spin_unlock(&s->sc_lock);
5741 refcount_inc(&s->sc_count);
5742 spin_unlock(&cl->cl_lock);
5743 ret = nfsd4_free_lock_stateid(stateid, s);
5744 goto out;
5745 case NFS4_REVOKED_DELEG_STID:
5746 spin_unlock(&s->sc_lock);
5747 dp = delegstateid(s);
5748 list_del_init(&dp->dl_recall_lru);
5749 spin_unlock(&cl->cl_lock);
5750 nfs4_put_stid(s);
5751 ret = nfs_ok;
5752 goto out;
5753
5754 }
5755 spin_unlock(&s->sc_lock);
5756 out_unlock:
5757 spin_unlock(&cl->cl_lock);
5758 out:
5759 return ret;
5760 }
5761
5762 static inline int
5763 setlkflg (int type)
5764 {
5765 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5766 RD_STATE : WR_STATE;
5767 }
5768
5769 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5770 {
5771 struct svc_fh *current_fh = &cstate->current_fh;
5772 struct nfs4_stateowner *sop = stp->st_stateowner;
5773 __be32 status;
5774
5775 status = nfsd4_check_seqid(cstate, sop, seqid);
5776 if (status)
5777 return status;
5778 status = nfsd4_lock_ol_stateid(stp);
5779 if (status != nfs_ok)
5780 return status;
5781 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5782 if (status == nfs_ok)
5783 status = nfs4_check_fh(current_fh, &stp->st_stid);
5784 if (status != nfs_ok)
5785 mutex_unlock(&stp->st_mutex);
5786 return status;
5787 }
5788
5789
5790
5791
5792 static __be32
5793 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5794 stateid_t *stateid, char typemask,
5795 struct nfs4_ol_stateid **stpp,
5796 struct nfsd_net *nn)
5797 {
5798 __be32 status;
5799 struct nfs4_stid *s;
5800 struct nfs4_ol_stateid *stp = NULL;
5801
5802 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5803 seqid, STATEID_VAL(stateid));
5804
5805 *stpp = NULL;
5806 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5807 if (status)
5808 return status;
5809 stp = openlockstateid(s);
5810 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5811
5812 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5813 if (!status)
5814 *stpp = stp;
5815 else
5816 nfs4_put_stid(&stp->st_stid);
5817 return status;
5818 }
5819
5820 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5821 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5822 {
5823 __be32 status;
5824 struct nfs4_openowner *oo;
5825 struct nfs4_ol_stateid *stp;
5826
5827 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5828 NFS4_OPEN_STID, &stp, nn);
5829 if (status)
5830 return status;
5831 oo = openowner(stp->st_stateowner);
5832 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5833 mutex_unlock(&stp->st_mutex);
5834 nfs4_put_stid(&stp->st_stid);
5835 return nfserr_bad_stateid;
5836 }
5837 *stpp = stp;
5838 return nfs_ok;
5839 }
5840
5841 __be32
5842 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5843 union nfsd4_op_u *u)
5844 {
5845 struct nfsd4_open_confirm *oc = &u->open_confirm;
5846 __be32 status;
5847 struct nfs4_openowner *oo;
5848 struct nfs4_ol_stateid *stp;
5849 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5850
5851 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5852 cstate->current_fh.fh_dentry);
5853
5854 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5855 if (status)
5856 return status;
5857
5858 status = nfs4_preprocess_seqid_op(cstate,
5859 oc->oc_seqid, &oc->oc_req_stateid,
5860 NFS4_OPEN_STID, &stp, nn);
5861 if (status)
5862 goto out;
5863 oo = openowner(stp->st_stateowner);
5864 status = nfserr_bad_stateid;
5865 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5866 mutex_unlock(&stp->st_mutex);
5867 goto put_stateid;
5868 }
5869 oo->oo_flags |= NFS4_OO_CONFIRMED;
5870 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5871 mutex_unlock(&stp->st_mutex);
5872 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5873 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5874
5875 nfsd4_client_record_create(oo->oo_owner.so_client);
5876 status = nfs_ok;
5877 put_stateid:
5878 nfs4_put_stid(&stp->st_stid);
5879 out:
5880 nfsd4_bump_seqid(cstate, status);
5881 return status;
5882 }
5883
5884 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5885 {
5886 if (!test_access(access, stp))
5887 return;
5888 nfs4_file_put_access(stp->st_stid.sc_file, access);
5889 clear_access(access, stp);
5890 }
5891
5892 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5893 {
5894 switch (to_access) {
5895 case NFS4_SHARE_ACCESS_READ:
5896 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5897 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5898 break;
5899 case NFS4_SHARE_ACCESS_WRITE:
5900 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5901 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5902 break;
5903 case NFS4_SHARE_ACCESS_BOTH:
5904 break;
5905 default:
5906 WARN_ON_ONCE(1);
5907 }
5908 }
5909
5910 __be32
5911 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5912 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5913 {
5914 struct nfsd4_open_downgrade *od = &u->open_downgrade;
5915 __be32 status;
5916 struct nfs4_ol_stateid *stp;
5917 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5918
5919 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5920 cstate->current_fh.fh_dentry);
5921
5922
5923 if (od->od_deleg_want)
5924 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5925 od->od_deleg_want);
5926
5927 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5928 &od->od_stateid, &stp, nn);
5929 if (status)
5930 goto out;
5931 status = nfserr_inval;
5932 if (!test_access(od->od_share_access, stp)) {
5933 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5934 stp->st_access_bmap, od->od_share_access);
5935 goto put_stateid;
5936 }
5937 if (!test_deny(od->od_share_deny, stp)) {
5938 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5939 stp->st_deny_bmap, od->od_share_deny);
5940 goto put_stateid;
5941 }
5942 nfs4_stateid_downgrade(stp, od->od_share_access);
5943 reset_union_bmap_deny(od->od_share_deny, stp);
5944 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5945 status = nfs_ok;
5946 put_stateid:
5947 mutex_unlock(&stp->st_mutex);
5948 nfs4_put_stid(&stp->st_stid);
5949 out:
5950 nfsd4_bump_seqid(cstate, status);
5951 return status;
5952 }
5953
5954 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5955 {
5956 struct nfs4_client *clp = s->st_stid.sc_client;
5957 bool unhashed;
5958 LIST_HEAD(reaplist);
5959
5960 spin_lock(&clp->cl_lock);
5961 unhashed = unhash_open_stateid(s, &reaplist);
5962
5963 if (clp->cl_minorversion) {
5964 if (unhashed)
5965 put_ol_stateid_locked(s, &reaplist);
5966 spin_unlock(&clp->cl_lock);
5967 free_ol_stateid_reaplist(&reaplist);
5968 } else {
5969 spin_unlock(&clp->cl_lock);
5970 free_ol_stateid_reaplist(&reaplist);
5971 if (unhashed)
5972 move_to_close_lru(s, clp->net);
5973 }
5974 }
5975
5976
5977
5978
5979 __be32
5980 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5981 union nfsd4_op_u *u)
5982 {
5983 struct nfsd4_close *close = &u->close;
5984 __be32 status;
5985 struct nfs4_ol_stateid *stp;
5986 struct net *net = SVC_NET(rqstp);
5987 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5988
5989 dprintk("NFSD: nfsd4_close on file %pd\n",
5990 cstate->current_fh.fh_dentry);
5991
5992 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5993 &close->cl_stateid,
5994 NFS4_OPEN_STID|NFS4_CLOSED_STID,
5995 &stp, nn);
5996 nfsd4_bump_seqid(cstate, status);
5997 if (status)
5998 goto out;
5999
6000 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6001
6002
6003
6004
6005
6006
6007
6008 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6009
6010 nfsd4_close_open_stateid(stp);
6011 mutex_unlock(&stp->st_mutex);
6012
6013
6014
6015
6016
6017
6018
6019
6020 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6021
6022
6023 nfs4_put_stid(&stp->st_stid);
6024 out:
6025 return status;
6026 }
6027
6028 __be32
6029 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6030 union nfsd4_op_u *u)
6031 {
6032 struct nfsd4_delegreturn *dr = &u->delegreturn;
6033 struct nfs4_delegation *dp;
6034 stateid_t *stateid = &dr->dr_stateid;
6035 struct nfs4_stid *s;
6036 __be32 status;
6037 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6038
6039 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6040 return status;
6041
6042 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6043 if (status)
6044 goto out;
6045 dp = delegstateid(s);
6046 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6047 if (status)
6048 goto put_stateid;
6049
6050 destroy_delegation(dp);
6051 put_stateid:
6052 nfs4_put_stid(&dp->dl_stid);
6053 out:
6054 return status;
6055 }
6056
6057 static inline u64
6058 end_offset(u64 start, u64 len)
6059 {
6060 u64 end;
6061
6062 end = start + len;
6063 return end >= start ? end: NFS4_MAX_UINT64;
6064 }
6065
6066
6067 static inline u64
6068 last_byte_offset(u64 start, u64 len)
6069 {
6070 u64 end;
6071
6072 WARN_ON_ONCE(!len);
6073 end = start + len;
6074 return end > start ? end - 1: NFS4_MAX_UINT64;
6075 }
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085 static inline void
6086 nfs4_transform_lock_offset(struct file_lock *lock)
6087 {
6088 if (lock->fl_start < 0)
6089 lock->fl_start = OFFSET_MAX;
6090 if (lock->fl_end < 0)
6091 lock->fl_end = OFFSET_MAX;
6092 }
6093
6094 static fl_owner_t
6095 nfsd4_fl_get_owner(fl_owner_t owner)
6096 {
6097 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6098
6099 nfs4_get_stateowner(&lo->lo_owner);
6100 return owner;
6101 }
6102
6103 static void
6104 nfsd4_fl_put_owner(fl_owner_t owner)
6105 {
6106 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6107
6108 if (lo)
6109 nfs4_put_stateowner(&lo->lo_owner);
6110 }
6111
6112 static void
6113 nfsd4_lm_notify(struct file_lock *fl)
6114 {
6115 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
6116 struct net *net = lo->lo_owner.so_client->net;
6117 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6118 struct nfsd4_blocked_lock *nbl = container_of(fl,
6119 struct nfsd4_blocked_lock, nbl_lock);
6120 bool queue = false;
6121
6122
6123 spin_lock(&nn->blocked_locks_lock);
6124 if (!list_empty(&nbl->nbl_list)) {
6125 list_del_init(&nbl->nbl_list);
6126 list_del_init(&nbl->nbl_lru);
6127 queue = true;
6128 }
6129 spin_unlock(&nn->blocked_locks_lock);
6130
6131 if (queue)
6132 nfsd4_run_cb(&nbl->nbl_cb);
6133 }
6134
6135 static const struct lock_manager_operations nfsd_posix_mng_ops = {
6136 .lm_notify = nfsd4_lm_notify,
6137 .lm_get_owner = nfsd4_fl_get_owner,
6138 .lm_put_owner = nfsd4_fl_put_owner,
6139 };
6140
6141 static inline void
6142 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6143 {
6144 struct nfs4_lockowner *lo;
6145
6146 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6147 lo = (struct nfs4_lockowner *) fl->fl_owner;
6148 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6149 GFP_KERNEL);
6150 if (!deny->ld_owner.data)
6151
6152 goto nevermind;
6153 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6154 } else {
6155 nevermind:
6156 deny->ld_owner.len = 0;
6157 deny->ld_owner.data = NULL;
6158 deny->ld_clientid.cl_boot = 0;
6159 deny->ld_clientid.cl_id = 0;
6160 }
6161 deny->ld_start = fl->fl_start;
6162 deny->ld_length = NFS4_MAX_UINT64;
6163 if (fl->fl_end != NFS4_MAX_UINT64)
6164 deny->ld_length = fl->fl_end - fl->fl_start + 1;
6165 deny->ld_type = NFS4_READ_LT;
6166 if (fl->fl_type != F_RDLCK)
6167 deny->ld_type = NFS4_WRITE_LT;
6168 }
6169
6170 static struct nfs4_lockowner *
6171 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6172 {
6173 unsigned int strhashval = ownerstr_hashval(owner);
6174 struct nfs4_stateowner *so;
6175
6176 lockdep_assert_held(&clp->cl_lock);
6177
6178 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6179 so_strhash) {
6180 if (so->so_is_open_owner)
6181 continue;
6182 if (same_owner_str(so, owner))
6183 return lockowner(nfs4_get_stateowner(so));
6184 }
6185 return NULL;
6186 }
6187
6188 static struct nfs4_lockowner *
6189 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6190 {
6191 struct nfs4_lockowner *lo;
6192
6193 spin_lock(&clp->cl_lock);
6194 lo = find_lockowner_str_locked(clp, owner);
6195 spin_unlock(&clp->cl_lock);
6196 return lo;
6197 }
6198
6199 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6200 {
6201 unhash_lockowner_locked(lockowner(sop));
6202 }
6203
6204 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6205 {
6206 struct nfs4_lockowner *lo = lockowner(sop);
6207
6208 kmem_cache_free(lockowner_slab, lo);
6209 }
6210
6211 static const struct nfs4_stateowner_operations lockowner_ops = {
6212 .so_unhash = nfs4_unhash_lockowner,
6213 .so_free = nfs4_free_lockowner,
6214 };
6215
6216
6217
6218
6219
6220
6221
6222
6223 static struct nfs4_lockowner *
6224 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6225 struct nfs4_ol_stateid *open_stp,
6226 struct nfsd4_lock *lock)
6227 {
6228 struct nfs4_lockowner *lo, *ret;
6229
6230 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6231 if (!lo)
6232 return NULL;
6233 INIT_LIST_HEAD(&lo->lo_blocked);
6234 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6235 lo->lo_owner.so_is_open_owner = 0;
6236 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6237 lo->lo_owner.so_ops = &lockowner_ops;
6238 spin_lock(&clp->cl_lock);
6239 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6240 if (ret == NULL) {
6241 list_add(&lo->lo_owner.so_strhash,
6242 &clp->cl_ownerstr_hashtbl[strhashval]);
6243 ret = lo;
6244 } else
6245 nfs4_free_stateowner(&lo->lo_owner);
6246
6247 spin_unlock(&clp->cl_lock);
6248 return ret;
6249 }
6250
6251 static struct nfs4_ol_stateid *
6252 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
6253 {
6254 struct nfs4_ol_stateid *lst;
6255 struct nfs4_client *clp = lo->lo_owner.so_client;
6256
6257 lockdep_assert_held(&clp->cl_lock);
6258
6259 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
6260 if (lst->st_stid.sc_type != NFS4_LOCK_STID)
6261 continue;
6262 if (lst->st_stid.sc_file == fp) {
6263 refcount_inc(&lst->st_stid.sc_count);
6264 return lst;
6265 }
6266 }
6267 return NULL;
6268 }
6269
6270 static struct nfs4_ol_stateid *
6271 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
6272 struct nfs4_file *fp, struct inode *inode,
6273 struct nfs4_ol_stateid *open_stp)
6274 {
6275 struct nfs4_client *clp = lo->lo_owner.so_client;
6276 struct nfs4_ol_stateid *retstp;
6277
6278 mutex_init(&stp->st_mutex);
6279 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
6280 retry:
6281 spin_lock(&clp->cl_lock);
6282 spin_lock(&fp->fi_lock);
6283 retstp = find_lock_stateid(lo, fp);
6284 if (retstp)
6285 goto out_unlock;
6286
6287 refcount_inc(&stp->st_stid.sc_count);
6288 stp->st_stid.sc_type = NFS4_LOCK_STID;
6289 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
6290 get_nfs4_file(fp);
6291 stp->st_stid.sc_file = fp;
6292 stp->st_access_bmap = 0;
6293 stp->st_deny_bmap = open_stp->st_deny_bmap;
6294 stp->st_openstp = open_stp;
6295 list_add(&stp->st_locks, &open_stp->st_locks);
6296 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
6297 list_add(&stp->st_perfile, &fp->fi_stateids);
6298 out_unlock:
6299 spin_unlock(&fp->fi_lock);
6300 spin_unlock(&clp->cl_lock);
6301 if (retstp) {
6302 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
6303 nfs4_put_stid(&retstp->st_stid);
6304 goto retry;
6305 }
6306
6307 mutex_unlock(&stp->st_mutex);
6308 stp = retstp;
6309 }
6310 return stp;
6311 }
6312
6313 static struct nfs4_ol_stateid *
6314 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
6315 struct inode *inode, struct nfs4_ol_stateid *ost,
6316 bool *new)
6317 {
6318 struct nfs4_stid *ns = NULL;
6319 struct nfs4_ol_stateid *lst;
6320 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6321 struct nfs4_client *clp = oo->oo_owner.so_client;
6322
6323 *new = false;
6324 spin_lock(&clp->cl_lock);
6325 lst = find_lock_stateid(lo, fi);
6326 spin_unlock(&clp->cl_lock);
6327 if (lst != NULL) {
6328 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
6329 goto out;
6330 nfs4_put_stid(&lst->st_stid);
6331 }
6332 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
6333 if (ns == NULL)
6334 return NULL;
6335
6336 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
6337 if (lst == openlockstateid(ns))
6338 *new = true;
6339 else
6340 nfs4_put_stid(ns);
6341 out:
6342 return lst;
6343 }
6344
6345 static int
6346 check_lock_length(u64 offset, u64 length)
6347 {
6348 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
6349 (length > ~offset)));
6350 }
6351
6352 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
6353 {
6354 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
6355
6356 lockdep_assert_held(&fp->fi_lock);
6357
6358 if (test_access(access, lock_stp))
6359 return;
6360 __nfs4_file_get_access(fp, access);
6361 set_access(access, lock_stp);
6362 }
6363
6364 static __be32
6365 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6366 struct nfs4_ol_stateid *ost,
6367 struct nfsd4_lock *lock,
6368 struct nfs4_ol_stateid **plst, bool *new)
6369 {
6370 __be32 status;
6371 struct nfs4_file *fi = ost->st_stid.sc_file;
6372 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6373 struct nfs4_client *cl = oo->oo_owner.so_client;
6374 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6375 struct nfs4_lockowner *lo;
6376 struct nfs4_ol_stateid *lst;
6377 unsigned int strhashval;
6378
6379 lo = find_lockowner_str(cl, &lock->lk_new_owner);
6380 if (!lo) {
6381 strhashval = ownerstr_hashval(&lock->lk_new_owner);
6382 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
6383 if (lo == NULL)
6384 return nfserr_jukebox;
6385 } else {
6386
6387 status = nfserr_bad_seqid;
6388 if (!cstate->minorversion &&
6389 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
6390 goto out;
6391 }
6392
6393 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6394 if (lst == NULL) {
6395 status = nfserr_jukebox;
6396 goto out;
6397 }
6398
6399 status = nfs_ok;
6400 *plst = lst;
6401 out:
6402 nfs4_put_stateowner(&lo->lo_owner);
6403 return status;
6404 }
6405
6406
6407
6408
6409 __be32
6410 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6411 union nfsd4_op_u *u)
6412 {
6413 struct nfsd4_lock *lock = &u->lock;
6414 struct nfs4_openowner *open_sop = NULL;
6415 struct nfs4_lockowner *lock_sop = NULL;
6416 struct nfs4_ol_stateid *lock_stp = NULL;
6417 struct nfs4_ol_stateid *open_stp = NULL;
6418 struct nfs4_file *fp;
6419 struct nfsd_file *nf = NULL;
6420 struct nfsd4_blocked_lock *nbl = NULL;
6421 struct file_lock *file_lock = NULL;
6422 struct file_lock *conflock = NULL;
6423 __be32 status = 0;
6424 int lkflg;
6425 int err;
6426 bool new = false;
6427 unsigned char fl_type;
6428 unsigned int fl_flags = FL_POSIX;
6429 struct net *net = SVC_NET(rqstp);
6430 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6431
6432 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6433 (long long) lock->lk_offset,
6434 (long long) lock->lk_length);
6435
6436 if (check_lock_length(lock->lk_offset, lock->lk_length))
6437 return nfserr_inval;
6438
6439 if ((status = fh_verify(rqstp, &cstate->current_fh,
6440 S_IFREG, NFSD_MAY_LOCK))) {
6441 dprintk("NFSD: nfsd4_lock: permission denied!\n");
6442 return status;
6443 }
6444
6445 if (lock->lk_is_new) {
6446 if (nfsd4_has_session(cstate))
6447
6448 memcpy(&lock->lk_new_clientid,
6449 &cstate->session->se_client->cl_clientid,
6450 sizeof(clientid_t));
6451
6452 status = nfserr_stale_clientid;
6453 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
6454 goto out;
6455
6456
6457 status = nfs4_preprocess_confirmed_seqid_op(cstate,
6458 lock->lk_new_open_seqid,
6459 &lock->lk_new_open_stateid,
6460 &open_stp, nn);
6461 if (status)
6462 goto out;
6463 mutex_unlock(&open_stp->st_mutex);
6464 open_sop = openowner(open_stp->st_stateowner);
6465 status = nfserr_bad_stateid;
6466 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6467 &lock->lk_new_clientid))
6468 goto out;
6469 status = lookup_or_create_lock_state(cstate, open_stp, lock,
6470 &lock_stp, &new);
6471 } else {
6472 status = nfs4_preprocess_seqid_op(cstate,
6473 lock->lk_old_lock_seqid,
6474 &lock->lk_old_lock_stateid,
6475 NFS4_LOCK_STID, &lock_stp, nn);
6476 }
6477 if (status)
6478 goto out;
6479 lock_sop = lockowner(lock_stp->st_stateowner);
6480
6481 lkflg = setlkflg(lock->lk_type);
6482 status = nfs4_check_openmode(lock_stp, lkflg);
6483 if (status)
6484 goto out;
6485
6486 status = nfserr_grace;
6487 if (locks_in_grace(net) && !lock->lk_reclaim)
6488 goto out;
6489 status = nfserr_no_grace;
6490 if (!locks_in_grace(net) && lock->lk_reclaim)
6491 goto out;
6492
6493 fp = lock_stp->st_stid.sc_file;
6494 switch (lock->lk_type) {
6495 case NFS4_READW_LT:
6496 if (nfsd4_has_session(cstate))
6497 fl_flags |= FL_SLEEP;
6498
6499 case NFS4_READ_LT:
6500 spin_lock(&fp->fi_lock);
6501 nf = find_readable_file_locked(fp);
6502 if (nf)
6503 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6504 spin_unlock(&fp->fi_lock);
6505 fl_type = F_RDLCK;
6506 break;
6507 case NFS4_WRITEW_LT:
6508 if (nfsd4_has_session(cstate))
6509 fl_flags |= FL_SLEEP;
6510
6511 case NFS4_WRITE_LT:
6512 spin_lock(&fp->fi_lock);
6513 nf = find_writeable_file_locked(fp);
6514 if (nf)
6515 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6516 spin_unlock(&fp->fi_lock);
6517 fl_type = F_WRLCK;
6518 break;
6519 default:
6520 status = nfserr_inval;
6521 goto out;
6522 }
6523
6524 if (!nf) {
6525 status = nfserr_openmode;
6526 goto out;
6527 }
6528
6529 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6530 if (!nbl) {
6531 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6532 status = nfserr_jukebox;
6533 goto out;
6534 }
6535
6536 file_lock = &nbl->nbl_lock;
6537 file_lock->fl_type = fl_type;
6538 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6539 file_lock->fl_pid = current->tgid;
6540 file_lock->fl_file = nf->nf_file;
6541 file_lock->fl_flags = fl_flags;
6542 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6543 file_lock->fl_start = lock->lk_offset;
6544 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6545 nfs4_transform_lock_offset(file_lock);
6546
6547 conflock = locks_alloc_lock();
6548 if (!conflock) {
6549 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6550 status = nfserr_jukebox;
6551 goto out;
6552 }
6553
6554 if (fl_flags & FL_SLEEP) {
6555 nbl->nbl_time = get_seconds();
6556 spin_lock(&nn->blocked_locks_lock);
6557 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6558 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6559 spin_unlock(&nn->blocked_locks_lock);
6560 }
6561
6562 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
6563 switch (err) {
6564 case 0:
6565 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6566 status = 0;
6567 if (lock->lk_reclaim)
6568 nn->somebody_reclaimed = true;
6569 break;
6570 case FILE_LOCK_DEFERRED:
6571 nbl = NULL;
6572
6573 case -EAGAIN:
6574 status = nfserr_denied;
6575 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6576 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6577 break;
6578 case -EDEADLK:
6579 status = nfserr_deadlock;
6580 break;
6581 default:
6582 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6583 status = nfserrno(err);
6584 break;
6585 }
6586 out:
6587 if (nbl) {
6588
6589 if (fl_flags & FL_SLEEP) {
6590 spin_lock(&nn->blocked_locks_lock);
6591 list_del_init(&nbl->nbl_list);
6592 list_del_init(&nbl->nbl_lru);
6593 spin_unlock(&nn->blocked_locks_lock);
6594 }
6595 free_blocked_lock(nbl);
6596 }
6597 if (nf)
6598 nfsd_file_put(nf);
6599 if (lock_stp) {
6600
6601 if (cstate->replay_owner &&
6602 cstate->replay_owner != &lock_sop->lo_owner &&
6603 seqid_mutating_err(ntohl(status)))
6604 lock_sop->lo_owner.so_seqid++;
6605
6606
6607
6608
6609
6610 if (status && new)
6611 release_lock_stateid(lock_stp);
6612
6613 mutex_unlock(&lock_stp->st_mutex);
6614
6615 nfs4_put_stid(&lock_stp->st_stid);
6616 }
6617 if (open_stp)
6618 nfs4_put_stid(&open_stp->st_stid);
6619 nfsd4_bump_seqid(cstate, status);
6620 if (conflock)
6621 locks_free_lock(conflock);
6622 return status;
6623 }
6624
6625
6626
6627
6628
6629
6630
6631 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6632 {
6633 struct nfsd_file *nf;
6634 __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
6635 if (!err) {
6636 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
6637 nfsd_file_put(nf);
6638 }
6639 return err;
6640 }
6641
6642
6643
6644
6645 __be32
6646 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6647 union nfsd4_op_u *u)
6648 {
6649 struct nfsd4_lockt *lockt = &u->lockt;
6650 struct file_lock *file_lock = NULL;
6651 struct nfs4_lockowner *lo = NULL;
6652 __be32 status;
6653 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6654
6655 if (locks_in_grace(SVC_NET(rqstp)))
6656 return nfserr_grace;
6657
6658 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6659 return nfserr_inval;
6660
6661 if (!nfsd4_has_session(cstate)) {
6662 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6663 if (status)
6664 goto out;
6665 }
6666
6667 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6668 goto out;
6669
6670 file_lock = locks_alloc_lock();
6671 if (!file_lock) {
6672 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6673 status = nfserr_jukebox;
6674 goto out;
6675 }
6676
6677 switch (lockt->lt_type) {
6678 case NFS4_READ_LT:
6679 case NFS4_READW_LT:
6680 file_lock->fl_type = F_RDLCK;
6681 break;
6682 case NFS4_WRITE_LT:
6683 case NFS4_WRITEW_LT:
6684 file_lock->fl_type = F_WRLCK;
6685 break;
6686 default:
6687 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6688 status = nfserr_inval;
6689 goto out;
6690 }
6691
6692 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6693 if (lo)
6694 file_lock->fl_owner = (fl_owner_t)lo;
6695 file_lock->fl_pid = current->tgid;
6696 file_lock->fl_flags = FL_POSIX;
6697
6698 file_lock->fl_start = lockt->lt_offset;
6699 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6700
6701 nfs4_transform_lock_offset(file_lock);
6702
6703 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6704 if (status)
6705 goto out;
6706
6707 if (file_lock->fl_type != F_UNLCK) {
6708 status = nfserr_denied;
6709 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6710 }
6711 out:
6712 if (lo)
6713 nfs4_put_stateowner(&lo->lo_owner);
6714 if (file_lock)
6715 locks_free_lock(file_lock);
6716 return status;
6717 }
6718
6719 __be32
6720 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6721 union nfsd4_op_u *u)
6722 {
6723 struct nfsd4_locku *locku = &u->locku;
6724 struct nfs4_ol_stateid *stp;
6725 struct nfsd_file *nf = NULL;
6726 struct file_lock *file_lock = NULL;
6727 __be32 status;
6728 int err;
6729 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6730
6731 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6732 (long long) locku->lu_offset,
6733 (long long) locku->lu_length);
6734
6735 if (check_lock_length(locku->lu_offset, locku->lu_length))
6736 return nfserr_inval;
6737
6738 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6739 &locku->lu_stateid, NFS4_LOCK_STID,
6740 &stp, nn);
6741 if (status)
6742 goto out;
6743 nf = find_any_file(stp->st_stid.sc_file);
6744 if (!nf) {
6745 status = nfserr_lock_range;
6746 goto put_stateid;
6747 }
6748 file_lock = locks_alloc_lock();
6749 if (!file_lock) {
6750 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6751 status = nfserr_jukebox;
6752 goto put_file;
6753 }
6754
6755 file_lock->fl_type = F_UNLCK;
6756 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6757 file_lock->fl_pid = current->tgid;
6758 file_lock->fl_file = nf->nf_file;
6759 file_lock->fl_flags = FL_POSIX;
6760 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6761 file_lock->fl_start = locku->lu_offset;
6762
6763 file_lock->fl_end = last_byte_offset(locku->lu_offset,
6764 locku->lu_length);
6765 nfs4_transform_lock_offset(file_lock);
6766
6767 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
6768 if (err) {
6769 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6770 goto out_nfserr;
6771 }
6772 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6773 put_file:
6774 nfsd_file_put(nf);
6775 put_stateid:
6776 mutex_unlock(&stp->st_mutex);
6777 nfs4_put_stid(&stp->st_stid);
6778 out:
6779 nfsd4_bump_seqid(cstate, status);
6780 if (file_lock)
6781 locks_free_lock(file_lock);
6782 return status;
6783
6784 out_nfserr:
6785 status = nfserrno(err);
6786 goto put_file;
6787 }
6788
6789
6790
6791
6792
6793
6794 static bool
6795 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6796 {
6797 struct file_lock *fl;
6798 int status = false;
6799 struct nfsd_file *nf = find_any_file(fp);
6800 struct inode *inode;
6801 struct file_lock_context *flctx;
6802
6803 if (!nf) {
6804
6805 WARN_ON_ONCE(1);
6806 return status;
6807 }
6808
6809 inode = locks_inode(nf->nf_file);
6810 flctx = inode->i_flctx;
6811
6812 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6813 spin_lock(&flctx->flc_lock);
6814 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6815 if (fl->fl_owner == (fl_owner_t)lowner) {
6816 status = true;
6817 break;
6818 }
6819 }
6820 spin_unlock(&flctx->flc_lock);
6821 }
6822 nfsd_file_put(nf);
6823 return status;
6824 }
6825
6826 __be32
6827 nfsd4_release_lockowner(struct svc_rqst *rqstp,
6828 struct nfsd4_compound_state *cstate,
6829 union nfsd4_op_u *u)
6830 {
6831 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6832 clientid_t *clid = &rlockowner->rl_clientid;
6833 struct nfs4_stateowner *sop;
6834 struct nfs4_lockowner *lo = NULL;
6835 struct nfs4_ol_stateid *stp;
6836 struct xdr_netobj *owner = &rlockowner->rl_owner;
6837 unsigned int hashval = ownerstr_hashval(owner);
6838 __be32 status;
6839 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6840 struct nfs4_client *clp;
6841 LIST_HEAD (reaplist);
6842
6843 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6844 clid->cl_boot, clid->cl_id);
6845
6846 status = lookup_clientid(clid, cstate, nn);
6847 if (status)
6848 return status;
6849
6850 clp = cstate->clp;
6851
6852 spin_lock(&clp->cl_lock);
6853 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6854 so_strhash) {
6855
6856 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6857 continue;
6858
6859
6860 lo = lockowner(sop);
6861 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6862 if (check_for_locks(stp->st_stid.sc_file, lo)) {
6863 status = nfserr_locks_held;
6864 spin_unlock(&clp->cl_lock);
6865 return status;
6866 }
6867 }
6868
6869 nfs4_get_stateowner(sop);
6870 break;
6871 }
6872 if (!lo) {
6873 spin_unlock(&clp->cl_lock);
6874 return status;
6875 }
6876
6877 unhash_lockowner_locked(lo);
6878 while (!list_empty(&lo->lo_owner.so_stateids)) {
6879 stp = list_first_entry(&lo->lo_owner.so_stateids,
6880 struct nfs4_ol_stateid,
6881 st_perstateowner);
6882 WARN_ON(!unhash_lock_stateid(stp));
6883 put_ol_stateid_locked(stp, &reaplist);
6884 }
6885 spin_unlock(&clp->cl_lock);
6886 free_ol_stateid_reaplist(&reaplist);
6887 remove_blocked_locks(lo);
6888 nfs4_put_stateowner(&lo->lo_owner);
6889
6890 return status;
6891 }
6892
6893 static inline struct nfs4_client_reclaim *
6894 alloc_reclaim(void)
6895 {
6896 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6897 }
6898
6899 bool
6900 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
6901 {
6902 struct nfs4_client_reclaim *crp;
6903
6904 crp = nfsd4_find_reclaim_client(name, nn);
6905 return (crp && crp->cr_clp);
6906 }
6907
6908
6909
6910
6911
6912
6913
6914 struct nfs4_client_reclaim *
6915 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
6916 struct nfsd_net *nn)
6917 {
6918 unsigned int strhashval;
6919 struct nfs4_client_reclaim *crp;
6920
6921 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", name.len, name.data);
6922 crp = alloc_reclaim();
6923 if (crp) {
6924 strhashval = clientstr_hashval(name);
6925 INIT_LIST_HEAD(&crp->cr_strhash);
6926 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6927 crp->cr_name.data = name.data;
6928 crp->cr_name.len = name.len;
6929 crp->cr_princhash.data = princhash.data;
6930 crp->cr_princhash.len = princhash.len;
6931 crp->cr_clp = NULL;
6932 nn->reclaim_str_hashtbl_size++;
6933 }
6934 return crp;
6935 }
6936
6937 void
6938 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6939 {
6940 list_del(&crp->cr_strhash);
6941 kfree(crp->cr_name.data);
6942 kfree(crp->cr_princhash.data);
6943 kfree(crp);
6944 nn->reclaim_str_hashtbl_size--;
6945 }
6946
6947 void
6948 nfs4_release_reclaim(struct nfsd_net *nn)
6949 {
6950 struct nfs4_client_reclaim *crp = NULL;
6951 int i;
6952
6953 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6954 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6955 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6956 struct nfs4_client_reclaim, cr_strhash);
6957 nfs4_remove_reclaim_record(crp, nn);
6958 }
6959 }
6960 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6961 }
6962
6963
6964
6965 struct nfs4_client_reclaim *
6966 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
6967 {
6968 unsigned int strhashval;
6969 struct nfs4_client_reclaim *crp = NULL;
6970
6971 dprintk("NFSD: nfs4_find_reclaim_client for name %.*s\n", name.len, name.data);
6972
6973 strhashval = clientstr_hashval(name);
6974 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6975 if (compare_blob(&crp->cr_name, &name) == 0) {
6976 return crp;
6977 }
6978 }
6979 return NULL;
6980 }
6981
6982
6983
6984
6985 __be32
6986 nfs4_check_open_reclaim(clientid_t *clid,
6987 struct nfsd4_compound_state *cstate,
6988 struct nfsd_net *nn)
6989 {
6990 __be32 status;
6991
6992
6993 status = lookup_clientid(clid, cstate, nn);
6994 if (status)
6995 return nfserr_reclaim_bad;
6996
6997 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6998 return nfserr_no_grace;
6999
7000 if (nfsd4_client_record_check(cstate->clp))
7001 return nfserr_reclaim_bad;
7002
7003 return nfs_ok;
7004 }
7005
7006 #ifdef CONFIG_NFSD_FAULT_INJECTION
7007 static inline void
7008 put_client(struct nfs4_client *clp)
7009 {
7010 atomic_dec(&clp->cl_rpc_users);
7011 }
7012
7013 static struct nfs4_client *
7014 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
7015 {
7016 struct nfs4_client *clp;
7017 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7018 nfsd_net_id);
7019
7020 if (!nfsd_netns_ready(nn))
7021 return NULL;
7022
7023 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7024 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
7025 return clp;
7026 }
7027 return NULL;
7028 }
7029
7030 u64
7031 nfsd_inject_print_clients(void)
7032 {
7033 struct nfs4_client *clp;
7034 u64 count = 0;
7035 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7036 nfsd_net_id);
7037 char buf[INET6_ADDRSTRLEN];
7038
7039 if (!nfsd_netns_ready(nn))
7040 return 0;
7041
7042 spin_lock(&nn->client_lock);
7043 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7044 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
7045 pr_info("NFS Client: %s\n", buf);
7046 ++count;
7047 }
7048 spin_unlock(&nn->client_lock);
7049
7050 return count;
7051 }
7052
7053 u64
7054 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
7055 {
7056 u64 count = 0;
7057 struct nfs4_client *clp;
7058 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7059 nfsd_net_id);
7060
7061 if (!nfsd_netns_ready(nn))
7062 return count;
7063
7064 spin_lock(&nn->client_lock);
7065 clp = nfsd_find_client(addr, addr_size);
7066 if (clp) {
7067 if (mark_client_expired_locked(clp) == nfs_ok)
7068 ++count;
7069 else
7070 clp = NULL;
7071 }
7072 spin_unlock(&nn->client_lock);
7073
7074 if (clp)
7075 expire_client(clp);
7076
7077 return count;
7078 }
7079
7080 u64
7081 nfsd_inject_forget_clients(u64 max)
7082 {
7083 u64 count = 0;
7084 struct nfs4_client *clp, *next;
7085 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7086 nfsd_net_id);
7087 LIST_HEAD(reaplist);
7088
7089 if (!nfsd_netns_ready(nn))
7090 return count;
7091
7092 spin_lock(&nn->client_lock);
7093 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7094 if (mark_client_expired_locked(clp) == nfs_ok) {
7095 list_add(&clp->cl_lru, &reaplist);
7096 if (max != 0 && ++count >= max)
7097 break;
7098 }
7099 }
7100 spin_unlock(&nn->client_lock);
7101
7102 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
7103 expire_client(clp);
7104
7105 return count;
7106 }
7107
7108 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
7109 const char *type)
7110 {
7111 char buf[INET6_ADDRSTRLEN];
7112 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
7113 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
7114 }
7115
7116 static void
7117 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
7118 struct list_head *collect)
7119 {
7120 struct nfs4_client *clp = lst->st_stid.sc_client;
7121 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7122 nfsd_net_id);
7123
7124 if (!collect)
7125 return;
7126
7127 lockdep_assert_held(&nn->client_lock);
7128 atomic_inc(&clp->cl_rpc_users);
7129 list_add(&lst->st_locks, collect);
7130 }
7131
7132 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
7133 struct list_head *collect,
7134 bool (*func)(struct nfs4_ol_stateid *))
7135 {
7136 struct nfs4_openowner *oop;
7137 struct nfs4_ol_stateid *stp, *st_next;
7138 struct nfs4_ol_stateid *lst, *lst_next;
7139 u64 count = 0;
7140
7141 spin_lock(&clp->cl_lock);
7142 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
7143 list_for_each_entry_safe(stp, st_next,
7144 &oop->oo_owner.so_stateids, st_perstateowner) {
7145 list_for_each_entry_safe(lst, lst_next,
7146 &stp->st_locks, st_locks) {
7147 if (func) {
7148 if (func(lst))
7149 nfsd_inject_add_lock_to_list(lst,
7150 collect);
7151 }
7152 ++count;
7153
7154
7155
7156
7157
7158
7159
7160 WARN_ON_ONCE(count == (INT_MAX / 2));
7161 if (count == max)
7162 goto out;
7163 }
7164 }
7165 }
7166 out:
7167 spin_unlock(&clp->cl_lock);
7168
7169 return count;
7170 }
7171
7172 static u64
7173 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
7174 u64 max)
7175 {
7176 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
7177 }
7178
7179 static u64
7180 nfsd_print_client_locks(struct nfs4_client *clp)
7181 {
7182 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
7183 nfsd_print_count(clp, count, "locked files");
7184 return count;
7185 }
7186
7187 u64
7188 nfsd_inject_print_locks(void)
7189 {
7190 struct nfs4_client *clp;
7191 u64 count = 0;
7192 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7193 nfsd_net_id);
7194
7195 if (!nfsd_netns_ready(nn))
7196 return 0;
7197
7198 spin_lock(&nn->client_lock);
7199 list_for_each_entry(clp, &nn->client_lru, cl_lru)
7200 count += nfsd_print_client_locks(clp);
7201 spin_unlock(&nn->client_lock);
7202
7203 return count;
7204 }
7205
7206 static void
7207 nfsd_reap_locks(struct list_head *reaplist)
7208 {
7209 struct nfs4_client *clp;
7210 struct nfs4_ol_stateid *stp, *next;
7211
7212 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
7213 list_del_init(&stp->st_locks);
7214 clp = stp->st_stid.sc_client;
7215 nfs4_put_stid(&stp->st_stid);
7216 put_client(clp);
7217 }
7218 }
7219
7220 u64
7221 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
7222 {
7223 unsigned int count = 0;
7224 struct nfs4_client *clp;
7225 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7226 nfsd_net_id);
7227 LIST_HEAD(reaplist);
7228
7229 if (!nfsd_netns_ready(nn))
7230 return count;
7231
7232 spin_lock(&nn->client_lock);
7233 clp = nfsd_find_client(addr, addr_size);
7234 if (clp)
7235 count = nfsd_collect_client_locks(clp, &reaplist, 0);
7236 spin_unlock(&nn->client_lock);
7237 nfsd_reap_locks(&reaplist);
7238 return count;
7239 }
7240
7241 u64
7242 nfsd_inject_forget_locks(u64 max)
7243 {
7244 u64 count = 0;
7245 struct nfs4_client *clp;
7246 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7247 nfsd_net_id);
7248 LIST_HEAD(reaplist);
7249
7250 if (!nfsd_netns_ready(nn))
7251 return count;
7252
7253 spin_lock(&nn->client_lock);
7254 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7255 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
7256 if (max != 0 && count >= max)
7257 break;
7258 }
7259 spin_unlock(&nn->client_lock);
7260 nfsd_reap_locks(&reaplist);
7261 return count;
7262 }
7263
7264 static u64
7265 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
7266 struct list_head *collect,
7267 void (*func)(struct nfs4_openowner *))
7268 {
7269 struct nfs4_openowner *oop, *next;
7270 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7271 nfsd_net_id);
7272 u64 count = 0;
7273
7274 lockdep_assert_held(&nn->client_lock);
7275
7276 spin_lock(&clp->cl_lock);
7277 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
7278 if (func) {
7279 func(oop);
7280 if (collect) {
7281 atomic_inc(&clp->cl_rpc_users);
7282 list_add(&oop->oo_perclient, collect);
7283 }
7284 }
7285 ++count;
7286
7287
7288
7289
7290
7291
7292 WARN_ON_ONCE(count == (INT_MAX / 2));
7293 if (count == max)
7294 break;
7295 }
7296 spin_unlock(&clp->cl_lock);
7297
7298 return count;
7299 }
7300
7301 static u64
7302 nfsd_print_client_openowners(struct nfs4_client *clp)
7303 {
7304 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
7305
7306 nfsd_print_count(clp, count, "openowners");
7307 return count;
7308 }
7309
7310 static u64
7311 nfsd_collect_client_openowners(struct nfs4_client *clp,
7312 struct list_head *collect, u64 max)
7313 {
7314 return nfsd_foreach_client_openowner(clp, max, collect,
7315 unhash_openowner_locked);
7316 }
7317
7318 u64
7319 nfsd_inject_print_openowners(void)
7320 {
7321 struct nfs4_client *clp;
7322 u64 count = 0;
7323 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7324 nfsd_net_id);
7325
7326 if (!nfsd_netns_ready(nn))
7327 return 0;
7328
7329 spin_lock(&nn->client_lock);
7330 list_for_each_entry(clp, &nn->client_lru, cl_lru)
7331 count += nfsd_print_client_openowners(clp);
7332 spin_unlock(&nn->client_lock);
7333
7334 return count;
7335 }
7336
7337 static void
7338 nfsd_reap_openowners(struct list_head *reaplist)
7339 {
7340 struct nfs4_client *clp;
7341 struct nfs4_openowner *oop, *next;
7342
7343 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
7344 list_del_init(&oop->oo_perclient);
7345 clp = oop->oo_owner.so_client;
7346 release_openowner(oop);
7347 put_client(clp);
7348 }
7349 }
7350
7351 u64
7352 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
7353 size_t addr_size)
7354 {
7355 unsigned int count = 0;
7356 struct nfs4_client *clp;
7357 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7358 nfsd_net_id);
7359 LIST_HEAD(reaplist);
7360
7361 if (!nfsd_netns_ready(nn))
7362 return count;
7363
7364 spin_lock(&nn->client_lock);
7365 clp = nfsd_find_client(addr, addr_size);
7366 if (clp)
7367 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
7368 spin_unlock(&nn->client_lock);
7369 nfsd_reap_openowners(&reaplist);
7370 return count;
7371 }
7372
7373 u64
7374 nfsd_inject_forget_openowners(u64 max)
7375 {
7376 u64 count = 0;
7377 struct nfs4_client *clp;
7378 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7379 nfsd_net_id);
7380 LIST_HEAD(reaplist);
7381
7382 if (!nfsd_netns_ready(nn))
7383 return count;
7384
7385 spin_lock(&nn->client_lock);
7386 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7387 count += nfsd_collect_client_openowners(clp, &reaplist,
7388 max - count);
7389 if (max != 0 && count >= max)
7390 break;
7391 }
7392 spin_unlock(&nn->client_lock);
7393 nfsd_reap_openowners(&reaplist);
7394 return count;
7395 }
7396
7397 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
7398 struct list_head *victims)
7399 {
7400 struct nfs4_delegation *dp, *next;
7401 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7402 nfsd_net_id);
7403 u64 count = 0;
7404
7405 lockdep_assert_held(&nn->client_lock);
7406
7407 spin_lock(&state_lock);
7408 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
7409 if (victims) {
7410
7411
7412
7413
7414
7415
7416 if (dp->dl_time != 0)
7417 continue;
7418
7419 atomic_inc(&clp->cl_rpc_users);
7420 WARN_ON(!unhash_delegation_locked(dp));
7421 list_add(&dp->dl_recall_lru, victims);
7422 }
7423 ++count;
7424
7425
7426
7427
7428
7429
7430 WARN_ON_ONCE(count == (INT_MAX / 2));
7431 if (count == max)
7432 break;
7433 }
7434 spin_unlock(&state_lock);
7435 return count;
7436 }
7437
7438 static u64
7439 nfsd_print_client_delegations(struct nfs4_client *clp)
7440 {
7441 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
7442
7443 nfsd_print_count(clp, count, "delegations");
7444 return count;
7445 }
7446
7447 u64
7448 nfsd_inject_print_delegations(void)
7449 {
7450 struct nfs4_client *clp;
7451 u64 count = 0;
7452 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7453 nfsd_net_id);
7454
7455 if (!nfsd_netns_ready(nn))
7456 return 0;
7457
7458 spin_lock(&nn->client_lock);
7459 list_for_each_entry(clp, &nn->client_lru, cl_lru)
7460 count += nfsd_print_client_delegations(clp);
7461 spin_unlock(&nn->client_lock);
7462
7463 return count;
7464 }
7465
7466 static void
7467 nfsd_forget_delegations(struct list_head *reaplist)
7468 {
7469 struct nfs4_client *clp;
7470 struct nfs4_delegation *dp, *next;
7471
7472 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7473 list_del_init(&dp->dl_recall_lru);
7474 clp = dp->dl_stid.sc_client;
7475 revoke_delegation(dp);
7476 put_client(clp);
7477 }
7478 }
7479
7480 u64
7481 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
7482 size_t addr_size)
7483 {
7484 u64 count = 0;
7485 struct nfs4_client *clp;
7486 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7487 nfsd_net_id);
7488 LIST_HEAD(reaplist);
7489
7490 if (!nfsd_netns_ready(nn))
7491 return count;
7492
7493 spin_lock(&nn->client_lock);
7494 clp = nfsd_find_client(addr, addr_size);
7495 if (clp)
7496 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7497 spin_unlock(&nn->client_lock);
7498
7499 nfsd_forget_delegations(&reaplist);
7500 return count;
7501 }
7502
7503 u64
7504 nfsd_inject_forget_delegations(u64 max)
7505 {
7506 u64 count = 0;
7507 struct nfs4_client *clp;
7508 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7509 nfsd_net_id);
7510 LIST_HEAD(reaplist);
7511
7512 if (!nfsd_netns_ready(nn))
7513 return count;
7514
7515 spin_lock(&nn->client_lock);
7516 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7517 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7518 if (max != 0 && count >= max)
7519 break;
7520 }
7521 spin_unlock(&nn->client_lock);
7522 nfsd_forget_delegations(&reaplist);
7523 return count;
7524 }
7525
7526 static void
7527 nfsd_recall_delegations(struct list_head *reaplist)
7528 {
7529 struct nfs4_client *clp;
7530 struct nfs4_delegation *dp, *next;
7531
7532 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7533 list_del_init(&dp->dl_recall_lru);
7534 clp = dp->dl_stid.sc_client;
7535
7536
7537
7538
7539
7540
7541 spin_lock(&state_lock);
7542 dp->dl_time = 0;
7543 spin_unlock(&state_lock);
7544 nfsd_break_one_deleg(dp);
7545 put_client(clp);
7546 }
7547 }
7548
7549 u64
7550 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7551 size_t addr_size)
7552 {
7553 u64 count = 0;
7554 struct nfs4_client *clp;
7555 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7556 nfsd_net_id);
7557 LIST_HEAD(reaplist);
7558
7559 if (!nfsd_netns_ready(nn))
7560 return count;
7561
7562 spin_lock(&nn->client_lock);
7563 clp = nfsd_find_client(addr, addr_size);
7564 if (clp)
7565 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7566 spin_unlock(&nn->client_lock);
7567
7568 nfsd_recall_delegations(&reaplist);
7569 return count;
7570 }
7571
7572 u64
7573 nfsd_inject_recall_delegations(u64 max)
7574 {
7575 u64 count = 0;
7576 struct nfs4_client *clp, *next;
7577 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7578 nfsd_net_id);
7579 LIST_HEAD(reaplist);
7580
7581 if (!nfsd_netns_ready(nn))
7582 return count;
7583
7584 spin_lock(&nn->client_lock);
7585 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7586 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7587 if (max != 0 && ++count >= max)
7588 break;
7589 }
7590 spin_unlock(&nn->client_lock);
7591 nfsd_recall_delegations(&reaplist);
7592 return count;
7593 }
7594 #endif
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605 static void
7606 set_max_delegations(void)
7607 {
7608
7609
7610
7611
7612
7613
7614 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7615 }
7616
7617 static int nfs4_state_create_net(struct net *net)
7618 {
7619 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7620 int i;
7621
7622 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7623 sizeof(struct list_head),
7624 GFP_KERNEL);
7625 if (!nn->conf_id_hashtbl)
7626 goto err;
7627 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7628 sizeof(struct list_head),
7629 GFP_KERNEL);
7630 if (!nn->unconf_id_hashtbl)
7631 goto err_unconf_id;
7632 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7633 sizeof(struct list_head),
7634 GFP_KERNEL);
7635 if (!nn->sessionid_hashtbl)
7636 goto err_sessionid;
7637
7638 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7639 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7640 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7641 }
7642 for (i = 0; i < SESSION_HASH_SIZE; i++)
7643 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7644 nn->conf_name_tree = RB_ROOT;
7645 nn->unconf_name_tree = RB_ROOT;
7646 nn->boot_time = get_seconds();
7647 nn->grace_ended = false;
7648 nn->nfsd4_manager.block_opens = true;
7649 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7650 INIT_LIST_HEAD(&nn->client_lru);
7651 INIT_LIST_HEAD(&nn->close_lru);
7652 INIT_LIST_HEAD(&nn->del_recall_lru);
7653 spin_lock_init(&nn->client_lock);
7654 spin_lock_init(&nn->s2s_cp_lock);
7655 idr_init(&nn->s2s_cp_stateids);
7656
7657 spin_lock_init(&nn->blocked_locks_lock);
7658 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7659
7660 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7661 get_net(net);
7662
7663 return 0;
7664
7665 err_sessionid:
7666 kfree(nn->unconf_id_hashtbl);
7667 err_unconf_id:
7668 kfree(nn->conf_id_hashtbl);
7669 err:
7670 return -ENOMEM;
7671 }
7672
7673 static void
7674 nfs4_state_destroy_net(struct net *net)
7675 {
7676 int i;
7677 struct nfs4_client *clp = NULL;
7678 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7679
7680 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7681 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7682 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7683 destroy_client(clp);
7684 }
7685 }
7686
7687 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7688
7689 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7690 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7691 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7692 destroy_client(clp);
7693 }
7694 }
7695
7696 kfree(nn->sessionid_hashtbl);
7697 kfree(nn->unconf_id_hashtbl);
7698 kfree(nn->conf_id_hashtbl);
7699 put_net(net);
7700 }
7701
7702 int
7703 nfs4_state_start_net(struct net *net)
7704 {
7705 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7706 int ret;
7707
7708 ret = nfs4_state_create_net(net);
7709 if (ret)
7710 return ret;
7711 locks_start_grace(net, &nn->nfsd4_manager);
7712 nfsd4_client_tracking_init(net);
7713 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7714 goto skip_grace;
7715 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
7716 nn->nfsd4_grace, net->ns.inum);
7717 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7718 return 0;
7719
7720 skip_grace:
7721 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7722 net->ns.inum);
7723 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7724 nfsd4_end_grace(nn);
7725 return 0;
7726 }
7727
7728
7729
7730 int
7731 nfs4_state_start(void)
7732 {
7733 int ret;
7734
7735 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7736 if (laundry_wq == NULL) {
7737 ret = -ENOMEM;
7738 goto out;
7739 }
7740 ret = nfsd4_create_callback_queue();
7741 if (ret)
7742 goto out_free_laundry;
7743
7744 set_max_delegations();
7745 return 0;
7746
7747 out_free_laundry:
7748 destroy_workqueue(laundry_wq);
7749 out:
7750 return ret;
7751 }
7752
7753 void
7754 nfs4_state_shutdown_net(struct net *net)
7755 {
7756 struct nfs4_delegation *dp = NULL;
7757 struct list_head *pos, *next, reaplist;
7758 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7759
7760 cancel_delayed_work_sync(&nn->laundromat_work);
7761 locks_end_grace(&nn->nfsd4_manager);
7762
7763 INIT_LIST_HEAD(&reaplist);
7764 spin_lock(&state_lock);
7765 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7766 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7767 WARN_ON(!unhash_delegation_locked(dp));
7768 list_add(&dp->dl_recall_lru, &reaplist);
7769 }
7770 spin_unlock(&state_lock);
7771 list_for_each_safe(pos, next, &reaplist) {
7772 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7773 list_del_init(&dp->dl_recall_lru);
7774 destroy_unhashed_deleg(dp);
7775 }
7776
7777 nfsd4_client_tracking_exit(net);
7778 nfs4_state_destroy_net(net);
7779 }
7780
7781 void
7782 nfs4_state_shutdown(void)
7783 {
7784 destroy_workqueue(laundry_wq);
7785 nfsd4_destroy_callback_queue();
7786 }
7787
7788 static void
7789 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7790 {
7791 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7792 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7793 }
7794
7795 static void
7796 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7797 {
7798 if (cstate->minorversion) {
7799 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7800 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7801 }
7802 }
7803
7804 void
7805 clear_current_stateid(struct nfsd4_compound_state *cstate)
7806 {
7807 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7808 }
7809
7810
7811
7812
7813 void
7814 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7815 union nfsd4_op_u *u)
7816 {
7817 put_stateid(cstate, &u->open_downgrade.od_stateid);
7818 }
7819
7820 void
7821 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7822 union nfsd4_op_u *u)
7823 {
7824 put_stateid(cstate, &u->open.op_stateid);
7825 }
7826
7827 void
7828 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7829 union nfsd4_op_u *u)
7830 {
7831 put_stateid(cstate, &u->close.cl_stateid);
7832 }
7833
7834 void
7835 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7836 union nfsd4_op_u *u)
7837 {
7838 put_stateid(cstate, &u->lock.lk_resp_stateid);
7839 }
7840
7841
7842
7843
7844
7845 void
7846 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7847 union nfsd4_op_u *u)
7848 {
7849 get_stateid(cstate, &u->open_downgrade.od_stateid);
7850 }
7851
7852 void
7853 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7854 union nfsd4_op_u *u)
7855 {
7856 get_stateid(cstate, &u->delegreturn.dr_stateid);
7857 }
7858
7859 void
7860 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7861 union nfsd4_op_u *u)
7862 {
7863 get_stateid(cstate, &u->free_stateid.fr_stateid);
7864 }
7865
7866 void
7867 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7868 union nfsd4_op_u *u)
7869 {
7870 get_stateid(cstate, &u->setattr.sa_stateid);
7871 }
7872
7873 void
7874 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7875 union nfsd4_op_u *u)
7876 {
7877 get_stateid(cstate, &u->close.cl_stateid);
7878 }
7879
7880 void
7881 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7882 union nfsd4_op_u *u)
7883 {
7884 get_stateid(cstate, &u->locku.lu_stateid);
7885 }
7886
7887 void
7888 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7889 union nfsd4_op_u *u)
7890 {
7891 get_stateid(cstate, &u->read.rd_stateid);
7892 }
7893
7894 void
7895 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7896 union nfsd4_op_u *u)
7897 {
7898 get_stateid(cstate, &u->write.wr_stateid);
7899 }