This source file includes following definitions.
- xdr_encode_empty_array
- encode_nfs_cb_opnum4
- encode_nfs_fh4
- encode_stateid4
- encode_sessionid4
- nfs_cb_stat_to_errno
- decode_cb_op_status
- encode_cb_compound4args
- encode_cb_nops
- decode_cb_compound4res
- encode_cb_recall4args
- encode_cb_sequence4args
- decode_cb_sequence4resok
- decode_cb_sequence4res
- nfs4_xdr_enc_cb_null
- nfs4_xdr_enc_cb_recall
- nfs4_xdr_dec_cb_null
- nfs4_xdr_dec_cb_recall
- encode_cb_layout4args
- nfs4_xdr_enc_cb_layout
- nfs4_xdr_dec_cb_layout
- encode_stateowner
- nfs4_xdr_enc_cb_notify_lock
- nfs4_xdr_dec_cb_notify_lock
- encode_offload_info4
- encode_cb_offload4args
- nfs4_xdr_enc_cb_offload
- nfs4_xdr_dec_cb_offload
- max_cb_time
- get_backchannel_cred
- setup_callback_client
- warn_no_callback_path
- nfsd4_mark_cb_down
- nfsd4_mark_cb_fault
- nfsd4_cb_probe_done
- nfsd4_probe_callback
- nfsd4_probe_callback_sync
- nfsd4_change_callback
- nfsd41_cb_get_slot
- nfsd4_cb_prepare
- nfsd4_cb_sequence_done
- nfsd4_cb_done
- nfsd4_cb_release
- nfsd4_create_callback_queue
- nfsd4_destroy_callback_queue
- nfsd4_shutdown_callback
- __nfsd4_find_backchannel
- nfsd4_process_cb_update
- nfsd4_run_cb_work
- nfsd4_init_cb
- nfsd4_run_cb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include <linux/sunrpc/clnt.h>
35 #include <linux/sunrpc/xprt.h>
36 #include <linux/sunrpc/svc_xprt.h>
37 #include <linux/slab.h>
38 #include "nfsd.h"
39 #include "state.h"
40 #include "netns.h"
41 #include "xdr4cb.h"
42 #include "xdr4.h"
43
44 #define NFSDDBG_FACILITY NFSDDBG_PROC
45
46 static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
47
48 #define NFSPROC4_CB_NULL 0
49 #define NFSPROC4_CB_COMPOUND 1
50
51
52
53 struct nfs4_cb_compound_hdr {
54
55 u32 ident;
56 u32 nops;
57 __be32 *nops_p;
58 u32 minorversion;
59
60 int status;
61 };
62
63 static __be32 *xdr_encode_empty_array(__be32 *p)
64 {
65 *p++ = xdr_zero;
66 return p;
67 }
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 enum nfs_cb_opnum4 {
87 OP_CB_GETATTR = 3,
88 OP_CB_RECALL = 4,
89 OP_CB_LAYOUTRECALL = 5,
90 OP_CB_NOTIFY = 6,
91 OP_CB_PUSH_DELEG = 7,
92 OP_CB_RECALL_ANY = 8,
93 OP_CB_RECALLABLE_OBJ_AVAIL = 9,
94 OP_CB_RECALL_SLOT = 10,
95 OP_CB_SEQUENCE = 11,
96 OP_CB_WANTS_CANCELLED = 12,
97 OP_CB_NOTIFY_LOCK = 13,
98 OP_CB_NOTIFY_DEVICEID = 14,
99 OP_CB_OFFLOAD = 15,
100 OP_CB_ILLEGAL = 10044
101 };
102
103 static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
104 {
105 __be32 *p;
106
107 p = xdr_reserve_space(xdr, 4);
108 *p = cpu_to_be32(op);
109 }
110
111
112
113
114
115
116 static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
117 {
118 u32 length = fh->fh_size;
119 __be32 *p;
120
121 BUG_ON(length > NFS4_FHSIZE);
122 p = xdr_reserve_space(xdr, 4 + length);
123 xdr_encode_opaque(p, &fh->fh_base, length);
124 }
125
126
127
128
129
130
131
132
133
134 static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
135 {
136 __be32 *p;
137
138 p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
139 *p++ = cpu_to_be32(sid->si_generation);
140 xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
141 }
142
143
144
145
146
147
148 static void encode_sessionid4(struct xdr_stream *xdr,
149 const struct nfsd4_session *session)
150 {
151 __be32 *p;
152
153 p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
154 xdr_encode_opaque_fixed(p, session->se_sessionid.data,
155 NFS4_MAX_SESSIONID_LEN);
156 }
157
158
159
160
161 static const struct {
162 int stat;
163 int errno;
164 } nfs_cb_errtbl[] = {
165 { NFS4_OK, 0 },
166 { NFS4ERR_PERM, -EPERM },
167 { NFS4ERR_NOENT, -ENOENT },
168 { NFS4ERR_IO, -EIO },
169 { NFS4ERR_NXIO, -ENXIO },
170 { NFS4ERR_ACCESS, -EACCES },
171 { NFS4ERR_EXIST, -EEXIST },
172 { NFS4ERR_XDEV, -EXDEV },
173 { NFS4ERR_NOTDIR, -ENOTDIR },
174 { NFS4ERR_ISDIR, -EISDIR },
175 { NFS4ERR_INVAL, -EINVAL },
176 { NFS4ERR_FBIG, -EFBIG },
177 { NFS4ERR_NOSPC, -ENOSPC },
178 { NFS4ERR_ROFS, -EROFS },
179 { NFS4ERR_MLINK, -EMLINK },
180 { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
181 { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
182 { NFS4ERR_DQUOT, -EDQUOT },
183 { NFS4ERR_STALE, -ESTALE },
184 { NFS4ERR_BADHANDLE, -EBADHANDLE },
185 { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
186 { NFS4ERR_NOTSUPP, -ENOTSUPP },
187 { NFS4ERR_TOOSMALL, -ETOOSMALL },
188 { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
189 { NFS4ERR_BADTYPE, -EBADTYPE },
190 { NFS4ERR_LOCKED, -EAGAIN },
191 { NFS4ERR_RESOURCE, -EREMOTEIO },
192 { NFS4ERR_SYMLINK, -ELOOP },
193 { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
194 { NFS4ERR_DEADLOCK, -EDEADLK },
195 { -1, -EIO }
196 };
197
198
199
200
201
202
203
204
205 static int nfs_cb_stat_to_errno(int status)
206 {
207 int i;
208
209 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
210 if (nfs_cb_errtbl[i].stat == status)
211 return nfs_cb_errtbl[i].errno;
212 }
213
214 dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
215 return -status;
216 }
217
218 static int decode_cb_op_status(struct xdr_stream *xdr,
219 enum nfs_cb_opnum4 expected, int *status)
220 {
221 __be32 *p;
222 u32 op;
223
224 p = xdr_inline_decode(xdr, 4 + 4);
225 if (unlikely(p == NULL))
226 goto out_overflow;
227 op = be32_to_cpup(p++);
228 if (unlikely(op != expected))
229 goto out_unexpected;
230 *status = nfs_cb_stat_to_errno(be32_to_cpup(p));
231 return 0;
232 out_overflow:
233 return -EIO;
234 out_unexpected:
235 dprintk("NFSD: Callback server returned operation %d but "
236 "we issued a request for %d\n", op, expected);
237 return -EIO;
238 }
239
240
241
242
243
244
245
246
247
248
249
250 static void encode_cb_compound4args(struct xdr_stream *xdr,
251 struct nfs4_cb_compound_hdr *hdr)
252 {
253 __be32 * p;
254
255 p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
256 p = xdr_encode_empty_array(p);
257 *p++ = cpu_to_be32(hdr->minorversion);
258 *p++ = cpu_to_be32(hdr->ident);
259
260 hdr->nops_p = p;
261 *p = cpu_to_be32(hdr->nops);
262 }
263
264
265
266
267 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
268 {
269 BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
270 *hdr->nops_p = cpu_to_be32(hdr->nops);
271 }
272
273
274
275
276
277
278
279
280
281
282 static int decode_cb_compound4res(struct xdr_stream *xdr,
283 struct nfs4_cb_compound_hdr *hdr)
284 {
285 u32 length;
286 __be32 *p;
287
288 p = xdr_inline_decode(xdr, 4 + 4);
289 if (unlikely(p == NULL))
290 goto out_overflow;
291 hdr->status = be32_to_cpup(p++);
292
293 length = be32_to_cpup(p++);
294 p = xdr_inline_decode(xdr, length + 4);
295 if (unlikely(p == NULL))
296 goto out_overflow;
297 p += XDR_QUADLEN(length);
298 hdr->nops = be32_to_cpup(p);
299 return 0;
300 out_overflow:
301 return -EIO;
302 }
303
304
305
306
307
308
309
310
311
312
313 static void encode_cb_recall4args(struct xdr_stream *xdr,
314 const struct nfs4_delegation *dp,
315 struct nfs4_cb_compound_hdr *hdr)
316 {
317 __be32 *p;
318
319 encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
320 encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
321
322 p = xdr_reserve_space(xdr, 4);
323 *p++ = xdr_zero;
324
325 encode_nfs_fh4(xdr, &dp->dl_stid.sc_file->fi_fhandle);
326
327 hdr->nops++;
328 }
329
330
331
332
333
334
335
336
337
338
339
340
341
342 static void encode_cb_sequence4args(struct xdr_stream *xdr,
343 const struct nfsd4_callback *cb,
344 struct nfs4_cb_compound_hdr *hdr)
345 {
346 struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
347 __be32 *p;
348
349 if (hdr->minorversion == 0)
350 return;
351
352 encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
353 encode_sessionid4(xdr, session);
354
355 p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
356 *p++ = cpu_to_be32(session->se_cb_seq_nr);
357 *p++ = xdr_zero;
358 *p++ = xdr_zero;
359 *p++ = xdr_zero;
360 xdr_encode_empty_array(p);
361
362 hdr->nops++;
363 }
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386 static int decode_cb_sequence4resok(struct xdr_stream *xdr,
387 struct nfsd4_callback *cb)
388 {
389 struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
390 int status = -ESERVERFAULT;
391 __be32 *p;
392 u32 dummy;
393
394
395
396
397
398 p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
399 if (unlikely(p == NULL))
400 goto out_overflow;
401
402 if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
403 dprintk("NFS: %s Invalid session id\n", __func__);
404 goto out;
405 }
406 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
407
408 dummy = be32_to_cpup(p++);
409 if (dummy != session->se_cb_seq_nr) {
410 dprintk("NFS: %s Invalid sequence number\n", __func__);
411 goto out;
412 }
413
414 dummy = be32_to_cpup(p++);
415 if (dummy != 0) {
416 dprintk("NFS: %s Invalid slotid\n", __func__);
417 goto out;
418 }
419
420
421
422
423 status = 0;
424 out:
425 cb->cb_seq_status = status;
426 return status;
427 out_overflow:
428 status = -EIO;
429 goto out;
430 }
431
432 static int decode_cb_sequence4res(struct xdr_stream *xdr,
433 struct nfsd4_callback *cb)
434 {
435 int status;
436
437 if (cb->cb_clp->cl_minorversion == 0)
438 return 0;
439
440 status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
441 if (unlikely(status || cb->cb_seq_status))
442 return status;
443
444 return decode_cb_sequence4resok(xdr, cb);
445 }
446
447
448
449
450
451
452
453
454
455
456
457
458
459 static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
460 const void *__unused)
461 {
462 xdr_reserve_space(xdr, 0);
463 }
464
465
466
467
468 static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
469 const void *data)
470 {
471 const struct nfsd4_callback *cb = data;
472 const struct nfs4_delegation *dp = cb_to_delegation(cb);
473 struct nfs4_cb_compound_hdr hdr = {
474 .ident = cb->cb_clp->cl_cb_ident,
475 .minorversion = cb->cb_clp->cl_minorversion,
476 };
477
478 encode_cb_compound4args(xdr, &hdr);
479 encode_cb_sequence4args(xdr, cb, &hdr);
480 encode_cb_recall4args(xdr, dp, &hdr);
481 encode_cb_nops(&hdr);
482 }
483
484
485
486
487
488
489
490
491
492
493
494 static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
495 void *__unused)
496 {
497 return 0;
498 }
499
500
501
502
503 static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
504 struct xdr_stream *xdr,
505 void *data)
506 {
507 struct nfsd4_callback *cb = data;
508 struct nfs4_cb_compound_hdr hdr;
509 int status;
510
511 status = decode_cb_compound4res(xdr, &hdr);
512 if (unlikely(status))
513 return status;
514
515 status = decode_cb_sequence4res(xdr, cb);
516 if (unlikely(status || cb->cb_seq_status))
517 return status;
518
519 return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
520 }
521
522 #ifdef CONFIG_NFSD_PNFS
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549 static void encode_cb_layout4args(struct xdr_stream *xdr,
550 const struct nfs4_layout_stateid *ls,
551 struct nfs4_cb_compound_hdr *hdr)
552 {
553 __be32 *p;
554
555 BUG_ON(hdr->minorversion == 0);
556
557 p = xdr_reserve_space(xdr, 5 * 4);
558 *p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
559 *p++ = cpu_to_be32(ls->ls_layout_type);
560 *p++ = cpu_to_be32(IOMODE_ANY);
561 *p++ = cpu_to_be32(1);
562 *p = cpu_to_be32(RETURN_FILE);
563
564 encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
565
566 p = xdr_reserve_space(xdr, 2 * 8);
567 p = xdr_encode_hyper(p, 0);
568 xdr_encode_hyper(p, NFS4_MAX_UINT64);
569
570 encode_stateid4(xdr, &ls->ls_recall_sid);
571
572 hdr->nops++;
573 }
574
575 static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
576 struct xdr_stream *xdr,
577 const void *data)
578 {
579 const struct nfsd4_callback *cb = data;
580 const struct nfs4_layout_stateid *ls =
581 container_of(cb, struct nfs4_layout_stateid, ls_recall);
582 struct nfs4_cb_compound_hdr hdr = {
583 .ident = 0,
584 .minorversion = cb->cb_clp->cl_minorversion,
585 };
586
587 encode_cb_compound4args(xdr, &hdr);
588 encode_cb_sequence4args(xdr, cb, &hdr);
589 encode_cb_layout4args(xdr, ls, &hdr);
590 encode_cb_nops(&hdr);
591 }
592
593 static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
594 struct xdr_stream *xdr,
595 void *data)
596 {
597 struct nfsd4_callback *cb = data;
598 struct nfs4_cb_compound_hdr hdr;
599 int status;
600
601 status = decode_cb_compound4res(xdr, &hdr);
602 if (unlikely(status))
603 return status;
604
605 status = decode_cb_sequence4res(xdr, cb);
606 if (unlikely(status || cb->cb_seq_status))
607 return status;
608
609 return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
610 }
611 #endif
612
613 static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
614 {
615 __be32 *p;
616
617 p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
618 p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
619 xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
620 }
621
622 static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
623 struct xdr_stream *xdr,
624 const void *data)
625 {
626 const struct nfsd4_callback *cb = data;
627 const struct nfsd4_blocked_lock *nbl =
628 container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
629 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
630 struct nfs4_cb_compound_hdr hdr = {
631 .ident = 0,
632 .minorversion = cb->cb_clp->cl_minorversion,
633 };
634
635 __be32 *p;
636
637 BUG_ON(hdr.minorversion == 0);
638
639 encode_cb_compound4args(xdr, &hdr);
640 encode_cb_sequence4args(xdr, cb, &hdr);
641
642 p = xdr_reserve_space(xdr, 4);
643 *p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
644 encode_nfs_fh4(xdr, &nbl->nbl_fh);
645 encode_stateowner(xdr, &lo->lo_owner);
646 hdr.nops++;
647
648 encode_cb_nops(&hdr);
649 }
650
651 static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
652 struct xdr_stream *xdr,
653 void *data)
654 {
655 struct nfsd4_callback *cb = data;
656 struct nfs4_cb_compound_hdr hdr;
657 int status;
658
659 status = decode_cb_compound4res(xdr, &hdr);
660 if (unlikely(status))
661 return status;
662
663 status = decode_cb_sequence4res(xdr, cb);
664 if (unlikely(status || cb->cb_seq_status))
665 return status;
666
667 return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
668 }
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689 static void encode_offload_info4(struct xdr_stream *xdr,
690 __be32 nfserr,
691 const struct nfsd4_copy *cp)
692 {
693 __be32 *p;
694
695 p = xdr_reserve_space(xdr, 4);
696 *p++ = nfserr;
697 if (!nfserr) {
698 p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
699 p = xdr_encode_empty_array(p);
700 p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
701 *p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
702 p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
703 NFS4_VERIFIER_SIZE);
704 } else {
705 p = xdr_reserve_space(xdr, 8);
706
707 p = xdr_encode_hyper(p, 0);
708 }
709 }
710
711 static void encode_cb_offload4args(struct xdr_stream *xdr,
712 __be32 nfserr,
713 const struct knfsd_fh *fh,
714 const struct nfsd4_copy *cp,
715 struct nfs4_cb_compound_hdr *hdr)
716 {
717 __be32 *p;
718
719 p = xdr_reserve_space(xdr, 4);
720 *p++ = cpu_to_be32(OP_CB_OFFLOAD);
721 encode_nfs_fh4(xdr, fh);
722 encode_stateid4(xdr, &cp->cp_res.cb_stateid);
723 encode_offload_info4(xdr, nfserr, cp);
724
725 hdr->nops++;
726 }
727
728 static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
729 struct xdr_stream *xdr,
730 const void *data)
731 {
732 const struct nfsd4_callback *cb = data;
733 const struct nfsd4_copy *cp =
734 container_of(cb, struct nfsd4_copy, cp_cb);
735 struct nfs4_cb_compound_hdr hdr = {
736 .ident = 0,
737 .minorversion = cb->cb_clp->cl_minorversion,
738 };
739
740 encode_cb_compound4args(xdr, &hdr);
741 encode_cb_sequence4args(xdr, cb, &hdr);
742 encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
743 encode_cb_nops(&hdr);
744 }
745
746 static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
747 struct xdr_stream *xdr,
748 void *data)
749 {
750 struct nfsd4_callback *cb = data;
751 struct nfs4_cb_compound_hdr hdr;
752 int status;
753
754 status = decode_cb_compound4res(xdr, &hdr);
755 if (unlikely(status))
756 return status;
757
758 status = decode_cb_sequence4res(xdr, cb);
759 if (unlikely(status || cb->cb_seq_status))
760 return status;
761
762 return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
763 }
764
765
766
767 #define PROC(proc, call, argtype, restype) \
768 [NFSPROC4_CLNT_##proc] = { \
769 .p_proc = NFSPROC4_CB_##call, \
770 .p_encode = nfs4_xdr_enc_##argtype, \
771 .p_decode = nfs4_xdr_dec_##restype, \
772 .p_arglen = NFS4_enc_##argtype##_sz, \
773 .p_replen = NFS4_dec_##restype##_sz, \
774 .p_statidx = NFSPROC4_CB_##call, \
775 .p_name = #proc, \
776 }
777
778 static const struct rpc_procinfo nfs4_cb_procedures[] = {
779 PROC(CB_NULL, NULL, cb_null, cb_null),
780 PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
781 #ifdef CONFIG_NFSD_PNFS
782 PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
783 #endif
784 PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
785 PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
786 };
787
788 static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
789 static const struct rpc_version nfs_cb_version4 = {
790
791
792
793
794
795
796
797 .number = 1,
798 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
799 .procs = nfs4_cb_procedures,
800 .counts = nfs4_cb_counts,
801 };
802
803 static const struct rpc_version *nfs_cb_version[2] = {
804 [1] = &nfs_cb_version4,
805 };
806
807 static const struct rpc_program cb_program;
808
809 static struct rpc_stat cb_stats = {
810 .program = &cb_program
811 };
812
813 #define NFS4_CALLBACK 0x40000000
814 static const struct rpc_program cb_program = {
815 .name = "nfs4_cb",
816 .number = NFS4_CALLBACK,
817 .nrvers = ARRAY_SIZE(nfs_cb_version),
818 .version = nfs_cb_version,
819 .stats = &cb_stats,
820 .pipe_dir_name = "nfsd4_cb",
821 };
822
823 static int max_cb_time(struct net *net)
824 {
825 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
826 return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
827 }
828
829 static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
830 {
831 if (clp->cl_minorversion == 0) {
832 client->cl_principal = clp->cl_cred.cr_targ_princ ?
833 clp->cl_cred.cr_targ_princ : "nfs";
834
835 return get_cred(rpc_machine_cred());
836 } else {
837 struct cred *kcred;
838
839 kcred = prepare_kernel_cred(NULL);
840 if (!kcred)
841 return NULL;
842
843 kcred->uid = ses->se_cb_sec.uid;
844 kcred->gid = ses->se_cb_sec.gid;
845 return kcred;
846 }
847 }
848
849 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
850 {
851 int maxtime = max_cb_time(clp->net);
852 struct rpc_timeout timeparms = {
853 .to_initval = maxtime,
854 .to_retries = 0,
855 .to_maxval = maxtime,
856 };
857 struct rpc_create_args args = {
858 .net = clp->net,
859 .address = (struct sockaddr *) &conn->cb_addr,
860 .addrsize = conn->cb_addrlen,
861 .saddress = (struct sockaddr *) &conn->cb_saddr,
862 .timeout = &timeparms,
863 .program = &cb_program,
864 .version = 1,
865 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
866 .cred = current_cred(),
867 };
868 struct rpc_clnt *client;
869 const struct cred *cred;
870
871 if (clp->cl_minorversion == 0) {
872 if (!clp->cl_cred.cr_principal &&
873 (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
874 return -EINVAL;
875 args.client_name = clp->cl_cred.cr_principal;
876 args.prognumber = conn->cb_prog;
877 args.protocol = XPRT_TRANSPORT_TCP;
878 args.authflavor = clp->cl_cred.cr_flavor;
879 clp->cl_cb_ident = conn->cb_ident;
880 } else {
881 if (!conn->cb_xprt)
882 return -EINVAL;
883 clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
884 clp->cl_cb_session = ses;
885 args.bc_xprt = conn->cb_xprt;
886 args.prognumber = clp->cl_cb_session->se_cb_prog;
887 args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
888 XPRT_TRANSPORT_BC;
889 args.authflavor = ses->se_cb_sec.flavor;
890 }
891
892 client = rpc_create(&args);
893 if (IS_ERR(client)) {
894 dprintk("NFSD: couldn't create callback client: %ld\n",
895 PTR_ERR(client));
896 return PTR_ERR(client);
897 }
898 cred = get_backchannel_cred(clp, client, ses);
899 if (!cred) {
900 rpc_shutdown_client(client);
901 return -ENOMEM;
902 }
903 clp->cl_cb_client = client;
904 clp->cl_cb_cred = cred;
905 return 0;
906 }
907
908 static void warn_no_callback_path(struct nfs4_client *clp, int reason)
909 {
910 dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
911 (int)clp->cl_name.len, clp->cl_name.data, reason);
912 }
913
914 static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
915 {
916 if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
917 return;
918 clp->cl_cb_state = NFSD4_CB_DOWN;
919 warn_no_callback_path(clp, reason);
920 }
921
922 static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
923 {
924 if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
925 return;
926 clp->cl_cb_state = NFSD4_CB_FAULT;
927 warn_no_callback_path(clp, reason);
928 }
929
930 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
931 {
932 struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
933
934 if (task->tk_status)
935 nfsd4_mark_cb_down(clp, task->tk_status);
936 else
937 clp->cl_cb_state = NFSD4_CB_UP;
938 }
939
940 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
941
942
943 .rpc_call_done = nfsd4_cb_probe_done,
944 };
945
946 static struct workqueue_struct *callback_wq;
947
948
949
950
951
952 void nfsd4_probe_callback(struct nfs4_client *clp)
953 {
954 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
955 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
956 nfsd4_run_cb(&clp->cl_cb_null);
957 }
958
959 void nfsd4_probe_callback_sync(struct nfs4_client *clp)
960 {
961 nfsd4_probe_callback(clp);
962 flush_workqueue(callback_wq);
963 }
964
965 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
966 {
967 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
968 spin_lock(&clp->cl_lock);
969 memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
970 spin_unlock(&clp->cl_lock);
971 }
972
973
974
975
976
977
978 static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
979 {
980 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
981 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
982
983 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
984 dprintk("%s slot is busy\n", __func__);
985 return false;
986 }
987 rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
988 }
989 return true;
990 }
991
992
993
994
995
996 static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
997 {
998 struct nfsd4_callback *cb = calldata;
999 struct nfs4_client *clp = cb->cb_clp;
1000 u32 minorversion = clp->cl_minorversion;
1001
1002
1003
1004
1005
1006 cb->cb_seq_status = 1;
1007 cb->cb_status = 0;
1008 if (minorversion) {
1009 if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
1010 return;
1011 cb->cb_holds_slot = true;
1012 }
1013 rpc_call_start(task);
1014 }
1015
1016 static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
1017 {
1018 struct nfs4_client *clp = cb->cb_clp;
1019 struct nfsd4_session *session = clp->cl_cb_session;
1020 bool ret = true;
1021
1022 if (!clp->cl_minorversion) {
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032 if (RPC_SIGNALLED(task))
1033 goto need_restart;
1034
1035 return true;
1036 }
1037
1038 if (!cb->cb_holds_slot)
1039 goto need_restart;
1040
1041 switch (cb->cb_seq_status) {
1042 case 0:
1043
1044
1045
1046
1047
1048
1049
1050 ++session->se_cb_seq_nr;
1051 break;
1052 case -ESERVERFAULT:
1053 ++session->se_cb_seq_nr;
1054
1055 case 1:
1056 case -NFS4ERR_BADSESSION:
1057 nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
1058 ret = false;
1059 break;
1060 case -NFS4ERR_DELAY:
1061 if (!rpc_restart_call(task))
1062 goto out;
1063
1064 rpc_delay(task, 2 * HZ);
1065 return false;
1066 case -NFS4ERR_BADSLOT:
1067 goto retry_nowait;
1068 case -NFS4ERR_SEQ_MISORDERED:
1069 if (session->se_cb_seq_nr != 1) {
1070 session->se_cb_seq_nr = 1;
1071 goto retry_nowait;
1072 }
1073 break;
1074 default:
1075 dprintk("%s: unprocessed error %d\n", __func__,
1076 cb->cb_seq_status);
1077 }
1078
1079 cb->cb_holds_slot = false;
1080 clear_bit(0, &clp->cl_cb_slot_busy);
1081 rpc_wake_up_next(&clp->cl_cb_waitq);
1082 dprintk("%s: freed slot, new seqid=%d\n", __func__,
1083 clp->cl_cb_session->se_cb_seq_nr);
1084
1085 if (RPC_SIGNALLED(task))
1086 goto need_restart;
1087 out:
1088 return ret;
1089 retry_nowait:
1090 if (rpc_restart_call_prepare(task))
1091 ret = false;
1092 goto out;
1093 need_restart:
1094 task->tk_status = 0;
1095 cb->cb_need_restart = true;
1096 return false;
1097 }
1098
1099 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
1100 {
1101 struct nfsd4_callback *cb = calldata;
1102 struct nfs4_client *clp = cb->cb_clp;
1103
1104 dprintk("%s: minorversion=%d\n", __func__,
1105 clp->cl_minorversion);
1106
1107 if (!nfsd4_cb_sequence_done(task, cb))
1108 return;
1109
1110 if (cb->cb_status) {
1111 WARN_ON_ONCE(task->tk_status);
1112 task->tk_status = cb->cb_status;
1113 }
1114
1115 switch (cb->cb_ops->done(cb, task)) {
1116 case 0:
1117 task->tk_status = 0;
1118 rpc_restart_call_prepare(task);
1119 return;
1120 case 1:
1121 switch (task->tk_status) {
1122 case -EIO:
1123 case -ETIMEDOUT:
1124 nfsd4_mark_cb_down(clp, task->tk_status);
1125 }
1126 break;
1127 default:
1128 BUG();
1129 }
1130 }
1131
1132 static void nfsd4_cb_release(void *calldata)
1133 {
1134 struct nfsd4_callback *cb = calldata;
1135
1136 if (cb->cb_need_restart)
1137 nfsd4_run_cb(cb);
1138 else
1139 cb->cb_ops->release(cb);
1140
1141 }
1142
1143 static const struct rpc_call_ops nfsd4_cb_ops = {
1144 .rpc_call_prepare = nfsd4_cb_prepare,
1145 .rpc_call_done = nfsd4_cb_done,
1146 .rpc_release = nfsd4_cb_release,
1147 };
1148
1149 int nfsd4_create_callback_queue(void)
1150 {
1151 callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
1152 if (!callback_wq)
1153 return -ENOMEM;
1154 return 0;
1155 }
1156
1157 void nfsd4_destroy_callback_queue(void)
1158 {
1159 destroy_workqueue(callback_wq);
1160 }
1161
1162
1163 void nfsd4_shutdown_callback(struct nfs4_client *clp)
1164 {
1165 set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
1166
1167
1168
1169
1170
1171 nfsd4_run_cb(&clp->cl_cb_null);
1172 flush_workqueue(callback_wq);
1173 }
1174
1175
1176 static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
1177 {
1178 struct nfsd4_session *s;
1179 struct nfsd4_conn *c;
1180
1181 list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
1182 list_for_each_entry(c, &s->se_conns, cn_persession) {
1183 if (c->cn_flags & NFS4_CDFC4_BACK)
1184 return c;
1185 }
1186 }
1187 return NULL;
1188 }
1189
1190 static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
1191 {
1192 struct nfs4_cb_conn conn;
1193 struct nfs4_client *clp = cb->cb_clp;
1194 struct nfsd4_session *ses = NULL;
1195 struct nfsd4_conn *c;
1196 int err;
1197
1198
1199
1200
1201
1202 if (clp->cl_cb_client) {
1203 rpc_shutdown_client(clp->cl_cb_client);
1204 clp->cl_cb_client = NULL;
1205 put_cred(clp->cl_cb_cred);
1206 clp->cl_cb_cred = NULL;
1207 }
1208 if (clp->cl_cb_conn.cb_xprt) {
1209 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1210 clp->cl_cb_conn.cb_xprt = NULL;
1211 }
1212 if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
1213 return;
1214 spin_lock(&clp->cl_lock);
1215
1216
1217
1218
1219 BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
1220 clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
1221 memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
1222 c = __nfsd4_find_backchannel(clp);
1223 if (c) {
1224 svc_xprt_get(c->cn_xprt);
1225 conn.cb_xprt = c->cn_xprt;
1226 ses = c->cn_session;
1227 }
1228 spin_unlock(&clp->cl_lock);
1229
1230 err = setup_callback_client(clp, &conn, ses);
1231 if (err) {
1232 nfsd4_mark_cb_down(clp, err);
1233 return;
1234 }
1235 }
1236
1237 static void
1238 nfsd4_run_cb_work(struct work_struct *work)
1239 {
1240 struct nfsd4_callback *cb =
1241 container_of(work, struct nfsd4_callback, cb_work);
1242 struct nfs4_client *clp = cb->cb_clp;
1243 struct rpc_clnt *clnt;
1244 int flags;
1245
1246 if (cb->cb_need_restart) {
1247 cb->cb_need_restart = false;
1248 } else {
1249 if (cb->cb_ops && cb->cb_ops->prepare)
1250 cb->cb_ops->prepare(cb);
1251 }
1252
1253 if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
1254 nfsd4_process_cb_update(cb);
1255
1256 clnt = clp->cl_cb_client;
1257 if (!clnt) {
1258
1259 if (cb->cb_ops && cb->cb_ops->release)
1260 cb->cb_ops->release(cb);
1261 return;
1262 }
1263
1264
1265
1266
1267 if (!cb->cb_ops && clp->cl_minorversion) {
1268 clp->cl_cb_state = NFSD4_CB_UP;
1269 return;
1270 }
1271
1272 cb->cb_msg.rpc_cred = clp->cl_cb_cred;
1273 flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN;
1274 rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
1275 cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
1276 }
1277
1278 void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1279 const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op)
1280 {
1281 cb->cb_clp = clp;
1282 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
1283 cb->cb_msg.rpc_argp = cb;
1284 cb->cb_msg.rpc_resp = cb;
1285 cb->cb_ops = ops;
1286 INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
1287 cb->cb_seq_status = 1;
1288 cb->cb_status = 0;
1289 cb->cb_need_restart = false;
1290 cb->cb_holds_slot = false;
1291 }
1292
1293 void nfsd4_run_cb(struct nfsd4_callback *cb)
1294 {
1295 queue_work(callback_wq, &cb->cb_work);
1296 }