1/* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38#include <linux/mm.h> 39#include <linux/delay.h> 40#include <linux/errno.h> 41#include <linux/file.h> 42#include <linux/string.h> 43#include <linux/ratelimit.h> 44#include <linux/printk.h> 45#include <linux/slab.h> 46#include <linux/sunrpc/clnt.h> 47#include <linux/nfs.h> 48#include <linux/nfs4.h> 49#include <linux/nfs_fs.h> 50#include <linux/nfs_page.h> 51#include <linux/nfs_mount.h> 52#include <linux/namei.h> 53#include <linux/mount.h> 54#include <linux/module.h> 55#include <linux/xattr.h> 56#include <linux/utsname.h> 57#include <linux/freezer.h> 58 59#include "nfs4_fs.h" 60#include "delegation.h" 61#include "internal.h" 62#include "iostat.h" 63#include "callback.h" 64#include "pnfs.h" 65#include "netns.h" 66#include "nfs4idmap.h" 67#include "nfs4session.h" 68#include "fscache.h" 69 70#include "nfs4trace.h" 71 72#define NFSDBG_FACILITY NFSDBG_PROC 73 74#define NFS4_POLL_RETRY_MIN (HZ/10) 75#define NFS4_POLL_RETRY_MAX (15*HZ) 76 77struct nfs4_opendata; 78static int _nfs4_proc_open(struct nfs4_opendata *data); 79static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 80static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 81static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *); 82static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 83static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 84static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); 85static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 86 struct nfs_fattr *fattr, struct iattr *sattr, 87 struct nfs4_state *state, struct nfs4_label *ilabel, 88 struct nfs4_label *olabel); 89#ifdef CONFIG_NFS_V4_1 90static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 91 struct rpc_cred *); 92static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 93 struct rpc_cred *); 94#endif 95 96#ifdef CONFIG_NFS_V4_SECURITY_LABEL 97static inline struct nfs4_label * 98nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 99 struct iattr *sattr, struct nfs4_label *label) 100{ 101 int err; 102 103 if (label == NULL) 104 return NULL; 105 106 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 107 return NULL; 108 109 err = security_dentry_init_security(dentry, sattr->ia_mode, 110 &dentry->d_name, (void **)&label->label, &label->len); 111 if (err == 0) 112 return label; 113 114 return NULL; 115} 116static inline void 117nfs4_label_release_security(struct nfs4_label *label) 118{ 119 if (label) 120 security_release_secctx(label->label, label->len); 121} 122static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 123{ 124 if (label) 125 return server->attr_bitmask; 126 127 return server->attr_bitmask_nl; 128} 129#else 130static inline struct nfs4_label * 131nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 132 struct iattr *sattr, struct nfs4_label *l) 133{ return NULL; } 134static inline void 135nfs4_label_release_security(struct nfs4_label *label) 136{ return; } 137static inline u32 * 138nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 139{ return server->attr_bitmask; } 140#endif 141 142/* Prevent leaks of NFSv4 errors into userland */ 143static int nfs4_map_errors(int err) 144{ 145 if (err >= -1000) 146 return err; 147 switch (err) { 148 case -NFS4ERR_RESOURCE: 149 case -NFS4ERR_LAYOUTTRYLATER: 150 case -NFS4ERR_RECALLCONFLICT: 151 return -EREMOTEIO; 152 case -NFS4ERR_WRONGSEC: 153 case -NFS4ERR_WRONG_CRED: 154 return -EPERM; 155 case -NFS4ERR_BADOWNER: 156 case -NFS4ERR_BADNAME: 157 return -EINVAL; 158 case -NFS4ERR_SHARE_DENIED: 159 return -EACCES; 160 case -NFS4ERR_MINOR_VERS_MISMATCH: 161 return -EPROTONOSUPPORT; 162 case -NFS4ERR_FILE_OPEN: 163 return -EBUSY; 164 default: 165 dprintk("%s could not handle NFSv4 error %d\n", 166 __func__, -err); 167 break; 168 } 169 return -EIO; 170} 171 172/* 173 * This is our standard bitmap for GETATTR requests. 174 */ 175const u32 nfs4_fattr_bitmap[3] = { 176 FATTR4_WORD0_TYPE 177 | FATTR4_WORD0_CHANGE 178 | FATTR4_WORD0_SIZE 179 | FATTR4_WORD0_FSID 180 | FATTR4_WORD0_FILEID, 181 FATTR4_WORD1_MODE 182 | FATTR4_WORD1_NUMLINKS 183 | FATTR4_WORD1_OWNER 184 | FATTR4_WORD1_OWNER_GROUP 185 | FATTR4_WORD1_RAWDEV 186 | FATTR4_WORD1_SPACE_USED 187 | FATTR4_WORD1_TIME_ACCESS 188 | FATTR4_WORD1_TIME_METADATA 189 | FATTR4_WORD1_TIME_MODIFY 190 | FATTR4_WORD1_MOUNTED_ON_FILEID, 191#ifdef CONFIG_NFS_V4_SECURITY_LABEL 192 FATTR4_WORD2_SECURITY_LABEL 193#endif 194}; 195 196static const u32 nfs4_pnfs_open_bitmap[3] = { 197 FATTR4_WORD0_TYPE 198 | FATTR4_WORD0_CHANGE 199 | FATTR4_WORD0_SIZE 200 | FATTR4_WORD0_FSID 201 | FATTR4_WORD0_FILEID, 202 FATTR4_WORD1_MODE 203 | FATTR4_WORD1_NUMLINKS 204 | FATTR4_WORD1_OWNER 205 | FATTR4_WORD1_OWNER_GROUP 206 | FATTR4_WORD1_RAWDEV 207 | FATTR4_WORD1_SPACE_USED 208 | FATTR4_WORD1_TIME_ACCESS 209 | FATTR4_WORD1_TIME_METADATA 210 | FATTR4_WORD1_TIME_MODIFY, 211 FATTR4_WORD2_MDSTHRESHOLD 212}; 213 214static const u32 nfs4_open_noattr_bitmap[3] = { 215 FATTR4_WORD0_TYPE 216 | FATTR4_WORD0_CHANGE 217 | FATTR4_WORD0_FILEID, 218}; 219 220const u32 nfs4_statfs_bitmap[3] = { 221 FATTR4_WORD0_FILES_AVAIL 222 | FATTR4_WORD0_FILES_FREE 223 | FATTR4_WORD0_FILES_TOTAL, 224 FATTR4_WORD1_SPACE_AVAIL 225 | FATTR4_WORD1_SPACE_FREE 226 | FATTR4_WORD1_SPACE_TOTAL 227}; 228 229const u32 nfs4_pathconf_bitmap[3] = { 230 FATTR4_WORD0_MAXLINK 231 | FATTR4_WORD0_MAXNAME, 232 0 233}; 234 235const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 236 | FATTR4_WORD0_MAXREAD 237 | FATTR4_WORD0_MAXWRITE 238 | FATTR4_WORD0_LEASE_TIME, 239 FATTR4_WORD1_TIME_DELTA 240 | FATTR4_WORD1_FS_LAYOUT_TYPES, 241 FATTR4_WORD2_LAYOUT_BLKSIZE 242}; 243 244const u32 nfs4_fs_locations_bitmap[3] = { 245 FATTR4_WORD0_TYPE 246 | FATTR4_WORD0_CHANGE 247 | FATTR4_WORD0_SIZE 248 | FATTR4_WORD0_FSID 249 | FATTR4_WORD0_FILEID 250 | FATTR4_WORD0_FS_LOCATIONS, 251 FATTR4_WORD1_MODE 252 | FATTR4_WORD1_NUMLINKS 253 | FATTR4_WORD1_OWNER 254 | FATTR4_WORD1_OWNER_GROUP 255 | FATTR4_WORD1_RAWDEV 256 | FATTR4_WORD1_SPACE_USED 257 | FATTR4_WORD1_TIME_ACCESS 258 | FATTR4_WORD1_TIME_METADATA 259 | FATTR4_WORD1_TIME_MODIFY 260 | FATTR4_WORD1_MOUNTED_ON_FILEID, 261}; 262 263static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 264 struct nfs4_readdir_arg *readdir) 265{ 266 __be32 *start, *p; 267 268 if (cookie > 2) { 269 readdir->cookie = cookie; 270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 271 return; 272 } 273 274 readdir->cookie = 0; 275 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 276 if (cookie == 2) 277 return; 278 279 /* 280 * NFSv4 servers do not return entries for '.' and '..' 281 * Therefore, we fake these entries here. We let '.' 282 * have cookie 0 and '..' have cookie 1. Note that 283 * when talking to the server, we always send cookie 0 284 * instead of 1 or 2. 285 */ 286 start = p = kmap_atomic(*readdir->pages); 287 288 if (cookie == 0) { 289 *p++ = xdr_one; /* next */ 290 *p++ = xdr_zero; /* cookie, first word */ 291 *p++ = xdr_one; /* cookie, second word */ 292 *p++ = xdr_one; /* entry len */ 293 memcpy(p, ".\0\0\0", 4); /* entry */ 294 p++; 295 *p++ = xdr_one; /* bitmap length */ 296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 297 *p++ = htonl(8); /* attribute buffer length */ 298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 299 } 300 301 *p++ = xdr_one; /* next */ 302 *p++ = xdr_zero; /* cookie, first word */ 303 *p++ = xdr_two; /* cookie, second word */ 304 *p++ = xdr_two; /* entry len */ 305 memcpy(p, "..\0\0", 4); /* entry */ 306 p++; 307 *p++ = xdr_one; /* bitmap length */ 308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 309 *p++ = htonl(8); /* attribute buffer length */ 310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 311 312 readdir->pgbase = (char *)p - (char *)start; 313 readdir->count -= readdir->pgbase; 314 kunmap_atomic(start); 315} 316 317static long nfs4_update_delay(long *timeout) 318{ 319 long ret; 320 if (!timeout) 321 return NFS4_POLL_RETRY_MAX; 322 if (*timeout <= 0) 323 *timeout = NFS4_POLL_RETRY_MIN; 324 if (*timeout > NFS4_POLL_RETRY_MAX) 325 *timeout = NFS4_POLL_RETRY_MAX; 326 ret = *timeout; 327 *timeout <<= 1; 328 return ret; 329} 330 331static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 332{ 333 int res = 0; 334 335 might_sleep(); 336 337 freezable_schedule_timeout_killable_unsafe( 338 nfs4_update_delay(timeout)); 339 if (fatal_signal_pending(current)) 340 res = -ERESTARTSYS; 341 return res; 342} 343 344/* This is the error handling routine for processes that are allowed 345 * to sleep. 346 */ 347int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 348{ 349 struct nfs_client *clp = server->nfs_client; 350 struct nfs4_state *state = exception->state; 351 struct inode *inode = exception->inode; 352 int ret = errorcode; 353 354 exception->retry = 0; 355 switch(errorcode) { 356 case 0: 357 return 0; 358 case -NFS4ERR_OPENMODE: 359 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 360 nfs4_inode_return_delegation(inode); 361 exception->retry = 1; 362 return 0; 363 } 364 if (state == NULL) 365 break; 366 ret = nfs4_schedule_stateid_recovery(server, state); 367 if (ret < 0) 368 break; 369 goto wait_on_recovery; 370 case -NFS4ERR_DELEG_REVOKED: 371 case -NFS4ERR_ADMIN_REVOKED: 372 case -NFS4ERR_BAD_STATEID: 373 if (state == NULL) 374 break; 375 ret = nfs4_schedule_stateid_recovery(server, state); 376 if (ret < 0) 377 break; 378 goto wait_on_recovery; 379 case -NFS4ERR_EXPIRED: 380 if (state != NULL) { 381 ret = nfs4_schedule_stateid_recovery(server, state); 382 if (ret < 0) 383 break; 384 } 385 case -NFS4ERR_STALE_STATEID: 386 case -NFS4ERR_STALE_CLIENTID: 387 nfs4_schedule_lease_recovery(clp); 388 goto wait_on_recovery; 389 case -NFS4ERR_MOVED: 390 ret = nfs4_schedule_migration_recovery(server); 391 if (ret < 0) 392 break; 393 goto wait_on_recovery; 394 case -NFS4ERR_LEASE_MOVED: 395 nfs4_schedule_lease_moved_recovery(clp); 396 goto wait_on_recovery; 397#if defined(CONFIG_NFS_V4_1) 398 case -NFS4ERR_BADSESSION: 399 case -NFS4ERR_BADSLOT: 400 case -NFS4ERR_BAD_HIGH_SLOT: 401 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 402 case -NFS4ERR_DEADSESSION: 403 case -NFS4ERR_SEQ_FALSE_RETRY: 404 case -NFS4ERR_SEQ_MISORDERED: 405 dprintk("%s ERROR: %d Reset session\n", __func__, 406 errorcode); 407 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 408 goto wait_on_recovery; 409#endif /* defined(CONFIG_NFS_V4_1) */ 410 case -NFS4ERR_FILE_OPEN: 411 if (exception->timeout > HZ) { 412 /* We have retried a decent amount, time to 413 * fail 414 */ 415 ret = -EBUSY; 416 break; 417 } 418 case -NFS4ERR_GRACE: 419 case -NFS4ERR_DELAY: 420 ret = nfs4_delay(server->client, &exception->timeout); 421 if (ret != 0) 422 break; 423 case -NFS4ERR_RETRY_UNCACHED_REP: 424 case -NFS4ERR_OLD_STATEID: 425 exception->retry = 1; 426 break; 427 case -NFS4ERR_BADOWNER: 428 /* The following works around a Linux server bug! */ 429 case -NFS4ERR_BADNAME: 430 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 431 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 432 exception->retry = 1; 433 printk(KERN_WARNING "NFS: v4 server %s " 434 "does not accept raw " 435 "uid/gids. " 436 "Reenabling the idmapper.\n", 437 server->nfs_client->cl_hostname); 438 } 439 } 440 /* We failed to handle the error */ 441 return nfs4_map_errors(ret); 442wait_on_recovery: 443 ret = nfs4_wait_clnt_recover(clp); 444 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 445 return -EIO; 446 if (ret == 0) 447 exception->retry = 1; 448 return ret; 449} 450 451/* 452 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 453 * or 'false' otherwise. 454 */ 455static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 456{ 457 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 458 459 if (flavor == RPC_AUTH_GSS_KRB5I || 460 flavor == RPC_AUTH_GSS_KRB5P) 461 return true; 462 463 return false; 464} 465 466static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 467{ 468 spin_lock(&clp->cl_lock); 469 if (time_before(clp->cl_last_renewal,timestamp)) 470 clp->cl_last_renewal = timestamp; 471 spin_unlock(&clp->cl_lock); 472} 473 474static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 475{ 476 do_renew_lease(server->nfs_client, timestamp); 477} 478 479struct nfs4_call_sync_data { 480 const struct nfs_server *seq_server; 481 struct nfs4_sequence_args *seq_args; 482 struct nfs4_sequence_res *seq_res; 483}; 484 485static void nfs4_init_sequence(struct nfs4_sequence_args *args, 486 struct nfs4_sequence_res *res, int cache_reply) 487{ 488 args->sa_slot = NULL; 489 args->sa_cache_this = cache_reply; 490 args->sa_privileged = 0; 491 492 res->sr_slot = NULL; 493} 494 495static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 496{ 497 args->sa_privileged = 1; 498} 499 500int nfs40_setup_sequence(struct nfs4_slot_table *tbl, 501 struct nfs4_sequence_args *args, 502 struct nfs4_sequence_res *res, 503 struct rpc_task *task) 504{ 505 struct nfs4_slot *slot; 506 507 /* slot already allocated? */ 508 if (res->sr_slot != NULL) 509 goto out_start; 510 511 spin_lock(&tbl->slot_tbl_lock); 512 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 513 goto out_sleep; 514 515 slot = nfs4_alloc_slot(tbl); 516 if (IS_ERR(slot)) { 517 if (slot == ERR_PTR(-ENOMEM)) 518 task->tk_timeout = HZ >> 2; 519 goto out_sleep; 520 } 521 spin_unlock(&tbl->slot_tbl_lock); 522 523 args->sa_slot = slot; 524 res->sr_slot = slot; 525 526out_start: 527 rpc_call_start(task); 528 return 0; 529 530out_sleep: 531 if (args->sa_privileged) 532 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 533 NULL, RPC_PRIORITY_PRIVILEGED); 534 else 535 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 536 spin_unlock(&tbl->slot_tbl_lock); 537 return -EAGAIN; 538} 539EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 540 541static int nfs40_sequence_done(struct rpc_task *task, 542 struct nfs4_sequence_res *res) 543{ 544 struct nfs4_slot *slot = res->sr_slot; 545 struct nfs4_slot_table *tbl; 546 547 if (slot == NULL) 548 goto out; 549 550 tbl = slot->table; 551 spin_lock(&tbl->slot_tbl_lock); 552 if (!nfs41_wake_and_assign_slot(tbl, slot)) 553 nfs4_free_slot(tbl, slot); 554 spin_unlock(&tbl->slot_tbl_lock); 555 556 res->sr_slot = NULL; 557out: 558 return 1; 559} 560 561#if defined(CONFIG_NFS_V4_1) 562 563static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 564{ 565 struct nfs4_session *session; 566 struct nfs4_slot_table *tbl; 567 struct nfs4_slot *slot = res->sr_slot; 568 bool send_new_highest_used_slotid = false; 569 570 tbl = slot->table; 571 session = tbl->session; 572 573 spin_lock(&tbl->slot_tbl_lock); 574 /* Be nice to the server: try to ensure that the last transmitted 575 * value for highest_user_slotid <= target_highest_slotid 576 */ 577 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 578 send_new_highest_used_slotid = true; 579 580 if (nfs41_wake_and_assign_slot(tbl, slot)) { 581 send_new_highest_used_slotid = false; 582 goto out_unlock; 583 } 584 nfs4_free_slot(tbl, slot); 585 586 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 587 send_new_highest_used_slotid = false; 588out_unlock: 589 spin_unlock(&tbl->slot_tbl_lock); 590 res->sr_slot = NULL; 591 if (send_new_highest_used_slotid) 592 nfs41_server_notify_highest_slotid_update(session->clp); 593} 594 595int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 596{ 597 struct nfs4_session *session; 598 struct nfs4_slot *slot = res->sr_slot; 599 struct nfs_client *clp; 600 bool interrupted = false; 601 int ret = 1; 602 603 if (slot == NULL) 604 goto out_noaction; 605 /* don't increment the sequence number if the task wasn't sent */ 606 if (!RPC_WAS_SENT(task)) 607 goto out; 608 609 session = slot->table->session; 610 611 if (slot->interrupted) { 612 slot->interrupted = 0; 613 interrupted = true; 614 } 615 616 trace_nfs4_sequence_done(session, res); 617 /* Check the SEQUENCE operation status */ 618 switch (res->sr_status) { 619 case 0: 620 /* Update the slot's sequence and clientid lease timer */ 621 ++slot->seq_nr; 622 clp = session->clp; 623 do_renew_lease(clp, res->sr_timestamp); 624 /* Check sequence flags */ 625 if (res->sr_status_flags != 0) 626 nfs4_schedule_lease_recovery(clp); 627 nfs41_update_target_slotid(slot->table, slot, res); 628 break; 629 case 1: 630 /* 631 * sr_status remains 1 if an RPC level error occurred. 632 * The server may or may not have processed the sequence 633 * operation.. 634 * Mark the slot as having hosted an interrupted RPC call. 635 */ 636 slot->interrupted = 1; 637 goto out; 638 case -NFS4ERR_DELAY: 639 /* The server detected a resend of the RPC call and 640 * returned NFS4ERR_DELAY as per Section 2.10.6.2 641 * of RFC5661. 642 */ 643 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 644 __func__, 645 slot->slot_nr, 646 slot->seq_nr); 647 goto out_retry; 648 case -NFS4ERR_BADSLOT: 649 /* 650 * The slot id we used was probably retired. Try again 651 * using a different slot id. 652 */ 653 goto retry_nowait; 654 case -NFS4ERR_SEQ_MISORDERED: 655 /* 656 * Was the last operation on this sequence interrupted? 657 * If so, retry after bumping the sequence number. 658 */ 659 if (interrupted) { 660 ++slot->seq_nr; 661 goto retry_nowait; 662 } 663 /* 664 * Could this slot have been previously retired? 665 * If so, then the server may be expecting seq_nr = 1! 666 */ 667 if (slot->seq_nr != 1) { 668 slot->seq_nr = 1; 669 goto retry_nowait; 670 } 671 break; 672 case -NFS4ERR_SEQ_FALSE_RETRY: 673 ++slot->seq_nr; 674 goto retry_nowait; 675 default: 676 /* Just update the slot sequence no. */ 677 ++slot->seq_nr; 678 } 679out: 680 /* The session may be reset by one of the error handlers. */ 681 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 682 nfs41_sequence_free_slot(res); 683out_noaction: 684 return ret; 685retry_nowait: 686 if (rpc_restart_call_prepare(task)) { 687 task->tk_status = 0; 688 ret = 0; 689 } 690 goto out; 691out_retry: 692 if (!rpc_restart_call(task)) 693 goto out; 694 rpc_delay(task, NFS4_POLL_RETRY_MAX); 695 return 0; 696} 697EXPORT_SYMBOL_GPL(nfs41_sequence_done); 698 699int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 700{ 701 if (res->sr_slot == NULL) 702 return 1; 703 if (!res->sr_slot->table->session) 704 return nfs40_sequence_done(task, res); 705 return nfs41_sequence_done(task, res); 706} 707EXPORT_SYMBOL_GPL(nfs4_sequence_done); 708 709int nfs41_setup_sequence(struct nfs4_session *session, 710 struct nfs4_sequence_args *args, 711 struct nfs4_sequence_res *res, 712 struct rpc_task *task) 713{ 714 struct nfs4_slot *slot; 715 struct nfs4_slot_table *tbl; 716 717 dprintk("--> %s\n", __func__); 718 /* slot already allocated? */ 719 if (res->sr_slot != NULL) 720 goto out_success; 721 722 tbl = &session->fc_slot_table; 723 724 task->tk_timeout = 0; 725 726 spin_lock(&tbl->slot_tbl_lock); 727 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 728 !args->sa_privileged) { 729 /* The state manager will wait until the slot table is empty */ 730 dprintk("%s session is draining\n", __func__); 731 goto out_sleep; 732 } 733 734 slot = nfs4_alloc_slot(tbl); 735 if (IS_ERR(slot)) { 736 /* If out of memory, try again in 1/4 second */ 737 if (slot == ERR_PTR(-ENOMEM)) 738 task->tk_timeout = HZ >> 2; 739 dprintk("<-- %s: no free slots\n", __func__); 740 goto out_sleep; 741 } 742 spin_unlock(&tbl->slot_tbl_lock); 743 744 args->sa_slot = slot; 745 746 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, 747 slot->slot_nr, slot->seq_nr); 748 749 res->sr_slot = slot; 750 res->sr_timestamp = jiffies; 751 res->sr_status_flags = 0; 752 /* 753 * sr_status is only set in decode_sequence, and so will remain 754 * set to 1 if an rpc level failure occurs. 755 */ 756 res->sr_status = 1; 757 trace_nfs4_setup_sequence(session, args); 758out_success: 759 rpc_call_start(task); 760 return 0; 761out_sleep: 762 /* Privileged tasks are queued with top priority */ 763 if (args->sa_privileged) 764 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 765 NULL, RPC_PRIORITY_PRIVILEGED); 766 else 767 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 768 spin_unlock(&tbl->slot_tbl_lock); 769 return -EAGAIN; 770} 771EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 772 773static int nfs4_setup_sequence(const struct nfs_server *server, 774 struct nfs4_sequence_args *args, 775 struct nfs4_sequence_res *res, 776 struct rpc_task *task) 777{ 778 struct nfs4_session *session = nfs4_get_session(server); 779 int ret = 0; 780 781 if (!session) 782 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 783 args, res, task); 784 785 dprintk("--> %s clp %p session %p sr_slot %u\n", 786 __func__, session->clp, session, res->sr_slot ? 787 res->sr_slot->slot_nr : NFS4_NO_SLOT); 788 789 ret = nfs41_setup_sequence(session, args, res, task); 790 791 dprintk("<-- %s status=%d\n", __func__, ret); 792 return ret; 793} 794 795static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 796{ 797 struct nfs4_call_sync_data *data = calldata; 798 struct nfs4_session *session = nfs4_get_session(data->seq_server); 799 800 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 801 802 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 803} 804 805static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 806{ 807 struct nfs4_call_sync_data *data = calldata; 808 809 nfs41_sequence_done(task, data->seq_res); 810} 811 812static const struct rpc_call_ops nfs41_call_sync_ops = { 813 .rpc_call_prepare = nfs41_call_sync_prepare, 814 .rpc_call_done = nfs41_call_sync_done, 815}; 816 817#else /* !CONFIG_NFS_V4_1 */ 818 819static int nfs4_setup_sequence(const struct nfs_server *server, 820 struct nfs4_sequence_args *args, 821 struct nfs4_sequence_res *res, 822 struct rpc_task *task) 823{ 824 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 825 args, res, task); 826} 827 828int nfs4_sequence_done(struct rpc_task *task, 829 struct nfs4_sequence_res *res) 830{ 831 return nfs40_sequence_done(task, res); 832} 833EXPORT_SYMBOL_GPL(nfs4_sequence_done); 834 835#endif /* !CONFIG_NFS_V4_1 */ 836 837static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 838{ 839 struct nfs4_call_sync_data *data = calldata; 840 nfs4_setup_sequence(data->seq_server, 841 data->seq_args, data->seq_res, task); 842} 843 844static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 845{ 846 struct nfs4_call_sync_data *data = calldata; 847 nfs4_sequence_done(task, data->seq_res); 848} 849 850static const struct rpc_call_ops nfs40_call_sync_ops = { 851 .rpc_call_prepare = nfs40_call_sync_prepare, 852 .rpc_call_done = nfs40_call_sync_done, 853}; 854 855static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 856 struct nfs_server *server, 857 struct rpc_message *msg, 858 struct nfs4_sequence_args *args, 859 struct nfs4_sequence_res *res) 860{ 861 int ret; 862 struct rpc_task *task; 863 struct nfs_client *clp = server->nfs_client; 864 struct nfs4_call_sync_data data = { 865 .seq_server = server, 866 .seq_args = args, 867 .seq_res = res, 868 }; 869 struct rpc_task_setup task_setup = { 870 .rpc_client = clnt, 871 .rpc_message = msg, 872 .callback_ops = clp->cl_mvops->call_sync_ops, 873 .callback_data = &data 874 }; 875 876 task = rpc_run_task(&task_setup); 877 if (IS_ERR(task)) 878 ret = PTR_ERR(task); 879 else { 880 ret = task->tk_status; 881 rpc_put_task(task); 882 } 883 return ret; 884} 885 886int nfs4_call_sync(struct rpc_clnt *clnt, 887 struct nfs_server *server, 888 struct rpc_message *msg, 889 struct nfs4_sequence_args *args, 890 struct nfs4_sequence_res *res, 891 int cache_reply) 892{ 893 nfs4_init_sequence(args, res, cache_reply); 894 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 895} 896 897static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 898{ 899 struct nfs_inode *nfsi = NFS_I(dir); 900 901 spin_lock(&dir->i_lock); 902 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 903 if (!cinfo->atomic || cinfo->before != dir->i_version) 904 nfs_force_lookup_revalidate(dir); 905 dir->i_version = cinfo->after; 906 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 907 nfs_fscache_invalidate(dir); 908 spin_unlock(&dir->i_lock); 909} 910 911struct nfs4_opendata { 912 struct kref kref; 913 struct nfs_openargs o_arg; 914 struct nfs_openres o_res; 915 struct nfs_open_confirmargs c_arg; 916 struct nfs_open_confirmres c_res; 917 struct nfs4_string owner_name; 918 struct nfs4_string group_name; 919 struct nfs_fattr f_attr; 920 struct nfs4_label *f_label; 921 struct dentry *dir; 922 struct dentry *dentry; 923 struct nfs4_state_owner *owner; 924 struct nfs4_state *state; 925 struct iattr attrs; 926 unsigned long timestamp; 927 unsigned int rpc_done : 1; 928 unsigned int file_created : 1; 929 unsigned int is_recover : 1; 930 int rpc_status; 931 int cancelled; 932}; 933 934static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 935 int err, struct nfs4_exception *exception) 936{ 937 if (err != -EINVAL) 938 return false; 939 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 940 return false; 941 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 942 exception->retry = 1; 943 return true; 944} 945 946static u32 947nfs4_map_atomic_open_share(struct nfs_server *server, 948 fmode_t fmode, int openflags) 949{ 950 u32 res = 0; 951 952 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 953 case FMODE_READ: 954 res = NFS4_SHARE_ACCESS_READ; 955 break; 956 case FMODE_WRITE: 957 res = NFS4_SHARE_ACCESS_WRITE; 958 break; 959 case FMODE_READ|FMODE_WRITE: 960 res = NFS4_SHARE_ACCESS_BOTH; 961 } 962 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 963 goto out; 964 /* Want no delegation if we're using O_DIRECT */ 965 if (openflags & O_DIRECT) 966 res |= NFS4_SHARE_WANT_NO_DELEG; 967out: 968 return res; 969} 970 971static enum open_claim_type4 972nfs4_map_atomic_open_claim(struct nfs_server *server, 973 enum open_claim_type4 claim) 974{ 975 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 976 return claim; 977 switch (claim) { 978 default: 979 return claim; 980 case NFS4_OPEN_CLAIM_FH: 981 return NFS4_OPEN_CLAIM_NULL; 982 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 983 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 984 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 985 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 986 } 987} 988 989static void nfs4_init_opendata_res(struct nfs4_opendata *p) 990{ 991 p->o_res.f_attr = &p->f_attr; 992 p->o_res.f_label = p->f_label; 993 p->o_res.seqid = p->o_arg.seqid; 994 p->c_res.seqid = p->c_arg.seqid; 995 p->o_res.server = p->o_arg.server; 996 p->o_res.access_request = p->o_arg.access; 997 nfs_fattr_init(&p->f_attr); 998 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 999} 1000 1001static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1002 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1003 const struct iattr *attrs, 1004 struct nfs4_label *label, 1005 enum open_claim_type4 claim, 1006 gfp_t gfp_mask) 1007{ 1008 struct dentry *parent = dget_parent(dentry); 1009 struct inode *dir = d_inode(parent); 1010 struct nfs_server *server = NFS_SERVER(dir); 1011 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1012 struct nfs4_opendata *p; 1013 1014 p = kzalloc(sizeof(*p), gfp_mask); 1015 if (p == NULL) 1016 goto err; 1017 1018 p->f_label = nfs4_label_alloc(server, gfp_mask); 1019 if (IS_ERR(p->f_label)) 1020 goto err_free_p; 1021 1022 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1023 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1024 if (IS_ERR(p->o_arg.seqid)) 1025 goto err_free_label; 1026 nfs_sb_active(dentry->d_sb); 1027 p->dentry = dget(dentry); 1028 p->dir = parent; 1029 p->owner = sp; 1030 atomic_inc(&sp->so_count); 1031 p->o_arg.open_flags = flags; 1032 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1033 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1034 fmode, flags); 1035 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1036 * will return permission denied for all bits until close */ 1037 if (!(flags & O_EXCL)) { 1038 /* ask server to check for all possible rights as results 1039 * are cached */ 1040 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1041 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 1042 } 1043 p->o_arg.clientid = server->nfs_client->cl_clientid; 1044 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1045 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1046 p->o_arg.name = &dentry->d_name; 1047 p->o_arg.server = server; 1048 p->o_arg.bitmask = nfs4_bitmask(server, label); 1049 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1050 p->o_arg.label = label; 1051 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1052 switch (p->o_arg.claim) { 1053 case NFS4_OPEN_CLAIM_NULL: 1054 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1055 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1056 p->o_arg.fh = NFS_FH(dir); 1057 break; 1058 case NFS4_OPEN_CLAIM_PREVIOUS: 1059 case NFS4_OPEN_CLAIM_FH: 1060 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1061 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1062 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1063 } 1064 if (attrs != NULL && attrs->ia_valid != 0) { 1065 __u32 verf[2]; 1066 1067 p->o_arg.u.attrs = &p->attrs; 1068 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 1069 1070 verf[0] = jiffies; 1071 verf[1] = current->pid; 1072 memcpy(p->o_arg.u.verifier.data, verf, 1073 sizeof(p->o_arg.u.verifier.data)); 1074 } 1075 p->c_arg.fh = &p->o_res.fh; 1076 p->c_arg.stateid = &p->o_res.stateid; 1077 p->c_arg.seqid = p->o_arg.seqid; 1078 nfs4_init_opendata_res(p); 1079 kref_init(&p->kref); 1080 return p; 1081 1082err_free_label: 1083 nfs4_label_free(p->f_label); 1084err_free_p: 1085 kfree(p); 1086err: 1087 dput(parent); 1088 return NULL; 1089} 1090 1091static void nfs4_opendata_free(struct kref *kref) 1092{ 1093 struct nfs4_opendata *p = container_of(kref, 1094 struct nfs4_opendata, kref); 1095 struct super_block *sb = p->dentry->d_sb; 1096 1097 nfs_free_seqid(p->o_arg.seqid); 1098 if (p->state != NULL) 1099 nfs4_put_open_state(p->state); 1100 nfs4_put_state_owner(p->owner); 1101 1102 nfs4_label_free(p->f_label); 1103 1104 dput(p->dir); 1105 dput(p->dentry); 1106 nfs_sb_deactive(sb); 1107 nfs_fattr_free_names(&p->f_attr); 1108 kfree(p->f_attr.mdsthreshold); 1109 kfree(p); 1110} 1111 1112static void nfs4_opendata_put(struct nfs4_opendata *p) 1113{ 1114 if (p != NULL) 1115 kref_put(&p->kref, nfs4_opendata_free); 1116} 1117 1118static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 1119{ 1120 int ret; 1121 1122 ret = rpc_wait_for_completion_task(task); 1123 return ret; 1124} 1125 1126static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1127{ 1128 int ret = 0; 1129 1130 if (open_mode & (O_EXCL|O_TRUNC)) 1131 goto out; 1132 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1133 case FMODE_READ: 1134 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1135 && state->n_rdonly != 0; 1136 break; 1137 case FMODE_WRITE: 1138 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1139 && state->n_wronly != 0; 1140 break; 1141 case FMODE_READ|FMODE_WRITE: 1142 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1143 && state->n_rdwr != 0; 1144 } 1145out: 1146 return ret; 1147} 1148 1149static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 1150{ 1151 if (delegation == NULL) 1152 return 0; 1153 if ((delegation->type & fmode) != fmode) 1154 return 0; 1155 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1156 return 0; 1157 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1158 return 0; 1159 nfs_mark_delegation_referenced(delegation); 1160 return 1; 1161} 1162 1163static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1164{ 1165 switch (fmode) { 1166 case FMODE_WRITE: 1167 state->n_wronly++; 1168 break; 1169 case FMODE_READ: 1170 state->n_rdonly++; 1171 break; 1172 case FMODE_READ|FMODE_WRITE: 1173 state->n_rdwr++; 1174 } 1175 nfs4_state_set_mode_locked(state, state->state | fmode); 1176} 1177 1178static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1179{ 1180 struct nfs_client *clp = state->owner->so_server->nfs_client; 1181 bool need_recover = false; 1182 1183 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1184 need_recover = true; 1185 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1186 need_recover = true; 1187 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1188 need_recover = true; 1189 if (need_recover) 1190 nfs4_state_mark_reclaim_nograce(clp, state); 1191} 1192 1193static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1194 nfs4_stateid *stateid) 1195{ 1196 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1197 return true; 1198 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1199 nfs_test_and_clear_all_open_stateid(state); 1200 return true; 1201 } 1202 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) 1203 return true; 1204 return false; 1205} 1206 1207static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1208{ 1209 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1210 return; 1211 if (state->n_wronly) 1212 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1213 if (state->n_rdonly) 1214 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1215 if (state->n_rdwr) 1216 set_bit(NFS_O_RDWR_STATE, &state->flags); 1217 set_bit(NFS_OPEN_STATE, &state->flags); 1218} 1219 1220static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1221 nfs4_stateid *arg_stateid, 1222 nfs4_stateid *stateid, fmode_t fmode) 1223{ 1224 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1225 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1226 case FMODE_WRITE: 1227 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1228 break; 1229 case FMODE_READ: 1230 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1231 break; 1232 case 0: 1233 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1234 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1235 clear_bit(NFS_OPEN_STATE, &state->flags); 1236 } 1237 if (stateid == NULL) 1238 return; 1239 /* Handle races with OPEN */ 1240 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1241 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1242 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1243 nfs_resync_open_stateid_locked(state); 1244 return; 1245 } 1246 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1247 nfs4_stateid_copy(&state->stateid, stateid); 1248 nfs4_stateid_copy(&state->open_stateid, stateid); 1249} 1250 1251static void nfs_clear_open_stateid(struct nfs4_state *state, 1252 nfs4_stateid *arg_stateid, 1253 nfs4_stateid *stateid, fmode_t fmode) 1254{ 1255 write_seqlock(&state->seqlock); 1256 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1257 write_sequnlock(&state->seqlock); 1258 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1259 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1260} 1261 1262static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1263{ 1264 switch (fmode) { 1265 case FMODE_READ: 1266 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1267 break; 1268 case FMODE_WRITE: 1269 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1270 break; 1271 case FMODE_READ|FMODE_WRITE: 1272 set_bit(NFS_O_RDWR_STATE, &state->flags); 1273 } 1274 if (!nfs_need_update_open_stateid(state, stateid)) 1275 return; 1276 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1277 nfs4_stateid_copy(&state->stateid, stateid); 1278 nfs4_stateid_copy(&state->open_stateid, stateid); 1279} 1280 1281static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1282{ 1283 /* 1284 * Protect the call to nfs4_state_set_mode_locked and 1285 * serialise the stateid update 1286 */ 1287 spin_lock(&state->owner->so_lock); 1288 write_seqlock(&state->seqlock); 1289 if (deleg_stateid != NULL) { 1290 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1291 set_bit(NFS_DELEGATED_STATE, &state->flags); 1292 } 1293 if (open_stateid != NULL) 1294 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1295 write_sequnlock(&state->seqlock); 1296 update_open_stateflags(state, fmode); 1297 spin_unlock(&state->owner->so_lock); 1298} 1299 1300static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1301{ 1302 struct nfs_inode *nfsi = NFS_I(state->inode); 1303 struct nfs_delegation *deleg_cur; 1304 int ret = 0; 1305 1306 fmode &= (FMODE_READ|FMODE_WRITE); 1307 1308 rcu_read_lock(); 1309 deleg_cur = rcu_dereference(nfsi->delegation); 1310 if (deleg_cur == NULL) 1311 goto no_delegation; 1312 1313 spin_lock(&deleg_cur->lock); 1314 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1315 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1316 (deleg_cur->type & fmode) != fmode) 1317 goto no_delegation_unlock; 1318 1319 if (delegation == NULL) 1320 delegation = &deleg_cur->stateid; 1321 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1322 goto no_delegation_unlock; 1323 1324 nfs_mark_delegation_referenced(deleg_cur); 1325 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1326 ret = 1; 1327no_delegation_unlock: 1328 spin_unlock(&deleg_cur->lock); 1329no_delegation: 1330 rcu_read_unlock(); 1331 1332 if (!ret && open_stateid != NULL) { 1333 __update_open_stateid(state, open_stateid, NULL, fmode); 1334 ret = 1; 1335 } 1336 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1337 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1338 1339 return ret; 1340} 1341 1342static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1343 const nfs4_stateid *stateid) 1344{ 1345 struct nfs4_state *state = lsp->ls_state; 1346 bool ret = false; 1347 1348 spin_lock(&state->state_lock); 1349 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1350 goto out_noupdate; 1351 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1352 goto out_noupdate; 1353 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1354 ret = true; 1355out_noupdate: 1356 spin_unlock(&state->state_lock); 1357 return ret; 1358} 1359 1360static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1361{ 1362 struct nfs_delegation *delegation; 1363 1364 rcu_read_lock(); 1365 delegation = rcu_dereference(NFS_I(inode)->delegation); 1366 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1367 rcu_read_unlock(); 1368 return; 1369 } 1370 rcu_read_unlock(); 1371 nfs4_inode_return_delegation(inode); 1372} 1373 1374static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1375{ 1376 struct nfs4_state *state = opendata->state; 1377 struct nfs_inode *nfsi = NFS_I(state->inode); 1378 struct nfs_delegation *delegation; 1379 int open_mode = opendata->o_arg.open_flags; 1380 fmode_t fmode = opendata->o_arg.fmode; 1381 nfs4_stateid stateid; 1382 int ret = -EAGAIN; 1383 1384 for (;;) { 1385 spin_lock(&state->owner->so_lock); 1386 if (can_open_cached(state, fmode, open_mode)) { 1387 update_open_stateflags(state, fmode); 1388 spin_unlock(&state->owner->so_lock); 1389 goto out_return_state; 1390 } 1391 spin_unlock(&state->owner->so_lock); 1392 rcu_read_lock(); 1393 delegation = rcu_dereference(nfsi->delegation); 1394 if (!can_open_delegated(delegation, fmode)) { 1395 rcu_read_unlock(); 1396 break; 1397 } 1398 /* Save the delegation */ 1399 nfs4_stateid_copy(&stateid, &delegation->stateid); 1400 rcu_read_unlock(); 1401 nfs_release_seqid(opendata->o_arg.seqid); 1402 if (!opendata->is_recover) { 1403 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1404 if (ret != 0) 1405 goto out; 1406 } 1407 ret = -EAGAIN; 1408 1409 /* Try to update the stateid using the delegation */ 1410 if (update_open_stateid(state, NULL, &stateid, fmode)) 1411 goto out_return_state; 1412 } 1413out: 1414 return ERR_PTR(ret); 1415out_return_state: 1416 atomic_inc(&state->count); 1417 return state; 1418} 1419 1420static void 1421nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1422{ 1423 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1424 struct nfs_delegation *delegation; 1425 int delegation_flags = 0; 1426 1427 rcu_read_lock(); 1428 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1429 if (delegation) 1430 delegation_flags = delegation->flags; 1431 rcu_read_unlock(); 1432 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1433 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1434 "returning a delegation for " 1435 "OPEN(CLAIM_DELEGATE_CUR)\n", 1436 clp->cl_hostname); 1437 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1438 nfs_inode_set_delegation(state->inode, 1439 data->owner->so_cred, 1440 &data->o_res); 1441 else 1442 nfs_inode_reclaim_delegation(state->inode, 1443 data->owner->so_cred, 1444 &data->o_res); 1445} 1446 1447/* 1448 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1449 * and update the nfs4_state. 1450 */ 1451static struct nfs4_state * 1452_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1453{ 1454 struct inode *inode = data->state->inode; 1455 struct nfs4_state *state = data->state; 1456 int ret; 1457 1458 if (!data->rpc_done) { 1459 if (data->rpc_status) { 1460 ret = data->rpc_status; 1461 goto err; 1462 } 1463 /* cached opens have already been processed */ 1464 goto update; 1465 } 1466 1467 ret = nfs_refresh_inode(inode, &data->f_attr); 1468 if (ret) 1469 goto err; 1470 1471 if (data->o_res.delegation_type != 0) 1472 nfs4_opendata_check_deleg(data, state); 1473update: 1474 update_open_stateid(state, &data->o_res.stateid, NULL, 1475 data->o_arg.fmode); 1476 atomic_inc(&state->count); 1477 1478 return state; 1479err: 1480 return ERR_PTR(ret); 1481 1482} 1483 1484static struct nfs4_state * 1485_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1486{ 1487 struct inode *inode; 1488 struct nfs4_state *state = NULL; 1489 int ret; 1490 1491 if (!data->rpc_done) { 1492 state = nfs4_try_open_cached(data); 1493 goto out; 1494 } 1495 1496 ret = -EAGAIN; 1497 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1498 goto err; 1499 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1500 ret = PTR_ERR(inode); 1501 if (IS_ERR(inode)) 1502 goto err; 1503 ret = -ENOMEM; 1504 state = nfs4_get_open_state(inode, data->owner); 1505 if (state == NULL) 1506 goto err_put_inode; 1507 if (data->o_res.delegation_type != 0) 1508 nfs4_opendata_check_deleg(data, state); 1509 update_open_stateid(state, &data->o_res.stateid, NULL, 1510 data->o_arg.fmode); 1511 iput(inode); 1512out: 1513 nfs_release_seqid(data->o_arg.seqid); 1514 return state; 1515err_put_inode: 1516 iput(inode); 1517err: 1518 return ERR_PTR(ret); 1519} 1520 1521static struct nfs4_state * 1522nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1523{ 1524 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1525 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1526 return _nfs4_opendata_to_nfs4_state(data); 1527} 1528 1529static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1530{ 1531 struct nfs_inode *nfsi = NFS_I(state->inode); 1532 struct nfs_open_context *ctx; 1533 1534 spin_lock(&state->inode->i_lock); 1535 list_for_each_entry(ctx, &nfsi->open_files, list) { 1536 if (ctx->state != state) 1537 continue; 1538 get_nfs_open_context(ctx); 1539 spin_unlock(&state->inode->i_lock); 1540 return ctx; 1541 } 1542 spin_unlock(&state->inode->i_lock); 1543 return ERR_PTR(-ENOENT); 1544} 1545 1546static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1547 struct nfs4_state *state, enum open_claim_type4 claim) 1548{ 1549 struct nfs4_opendata *opendata; 1550 1551 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1552 NULL, NULL, claim, GFP_NOFS); 1553 if (opendata == NULL) 1554 return ERR_PTR(-ENOMEM); 1555 opendata->state = state; 1556 atomic_inc(&state->count); 1557 return opendata; 1558} 1559 1560static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1561{ 1562 struct nfs4_state *newstate; 1563 int ret; 1564 1565 opendata->o_arg.open_flags = 0; 1566 opendata->o_arg.fmode = fmode; 1567 opendata->o_arg.share_access = nfs4_map_atomic_open_share( 1568 NFS_SB(opendata->dentry->d_sb), 1569 fmode, 0); 1570 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1571 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1572 nfs4_init_opendata_res(opendata); 1573 ret = _nfs4_recover_proc_open(opendata); 1574 if (ret != 0) 1575 return ret; 1576 newstate = nfs4_opendata_to_nfs4_state(opendata); 1577 if (IS_ERR(newstate)) 1578 return PTR_ERR(newstate); 1579 nfs4_close_state(newstate, fmode); 1580 *res = newstate; 1581 return 0; 1582} 1583 1584static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1585{ 1586 struct nfs4_state *newstate; 1587 int ret; 1588 1589 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1590 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1591 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1592 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1593 /* memory barrier prior to reading state->n_* */ 1594 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1595 clear_bit(NFS_OPEN_STATE, &state->flags); 1596 smp_rmb(); 1597 if (state->n_rdwr != 0) { 1598 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1599 if (ret != 0) 1600 return ret; 1601 if (newstate != state) 1602 return -ESTALE; 1603 } 1604 if (state->n_wronly != 0) { 1605 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1606 if (ret != 0) 1607 return ret; 1608 if (newstate != state) 1609 return -ESTALE; 1610 } 1611 if (state->n_rdonly != 0) { 1612 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1613 if (ret != 0) 1614 return ret; 1615 if (newstate != state) 1616 return -ESTALE; 1617 } 1618 /* 1619 * We may have performed cached opens for all three recoveries. 1620 * Check if we need to update the current stateid. 1621 */ 1622 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1623 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1624 write_seqlock(&state->seqlock); 1625 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1626 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1627 write_sequnlock(&state->seqlock); 1628 } 1629 return 0; 1630} 1631 1632/* 1633 * OPEN_RECLAIM: 1634 * reclaim state on the server after a reboot. 1635 */ 1636static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1637{ 1638 struct nfs_delegation *delegation; 1639 struct nfs4_opendata *opendata; 1640 fmode_t delegation_type = 0; 1641 int status; 1642 1643 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1644 NFS4_OPEN_CLAIM_PREVIOUS); 1645 if (IS_ERR(opendata)) 1646 return PTR_ERR(opendata); 1647 rcu_read_lock(); 1648 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1649 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1650 delegation_type = delegation->type; 1651 rcu_read_unlock(); 1652 opendata->o_arg.u.delegation_type = delegation_type; 1653 status = nfs4_open_recover(opendata, state); 1654 nfs4_opendata_put(opendata); 1655 return status; 1656} 1657 1658static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1659{ 1660 struct nfs_server *server = NFS_SERVER(state->inode); 1661 struct nfs4_exception exception = { }; 1662 int err; 1663 do { 1664 err = _nfs4_do_open_reclaim(ctx, state); 1665 trace_nfs4_open_reclaim(ctx, 0, err); 1666 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1667 continue; 1668 if (err != -NFS4ERR_DELAY) 1669 break; 1670 nfs4_handle_exception(server, err, &exception); 1671 } while (exception.retry); 1672 return err; 1673} 1674 1675static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1676{ 1677 struct nfs_open_context *ctx; 1678 int ret; 1679 1680 ctx = nfs4_state_find_open_context(state); 1681 if (IS_ERR(ctx)) 1682 return -EAGAIN; 1683 ret = nfs4_do_open_reclaim(ctx, state); 1684 put_nfs_open_context(ctx); 1685 return ret; 1686} 1687 1688static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1689{ 1690 switch (err) { 1691 default: 1692 printk(KERN_ERR "NFS: %s: unhandled error " 1693 "%d.\n", __func__, err); 1694 case 0: 1695 case -ENOENT: 1696 case -ESTALE: 1697 break; 1698 case -NFS4ERR_BADSESSION: 1699 case -NFS4ERR_BADSLOT: 1700 case -NFS4ERR_BAD_HIGH_SLOT: 1701 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1702 case -NFS4ERR_DEADSESSION: 1703 set_bit(NFS_DELEGATED_STATE, &state->flags); 1704 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1705 return -EAGAIN; 1706 case -NFS4ERR_STALE_CLIENTID: 1707 case -NFS4ERR_STALE_STATEID: 1708 set_bit(NFS_DELEGATED_STATE, &state->flags); 1709 case -NFS4ERR_EXPIRED: 1710 /* Don't recall a delegation if it was lost */ 1711 nfs4_schedule_lease_recovery(server->nfs_client); 1712 return -EAGAIN; 1713 case -NFS4ERR_MOVED: 1714 nfs4_schedule_migration_recovery(server); 1715 return -EAGAIN; 1716 case -NFS4ERR_LEASE_MOVED: 1717 nfs4_schedule_lease_moved_recovery(server->nfs_client); 1718 return -EAGAIN; 1719 case -NFS4ERR_DELEG_REVOKED: 1720 case -NFS4ERR_ADMIN_REVOKED: 1721 case -NFS4ERR_BAD_STATEID: 1722 case -NFS4ERR_OPENMODE: 1723 nfs_inode_find_state_and_recover(state->inode, 1724 stateid); 1725 nfs4_schedule_stateid_recovery(server, state); 1726 return -EAGAIN; 1727 case -NFS4ERR_DELAY: 1728 case -NFS4ERR_GRACE: 1729 set_bit(NFS_DELEGATED_STATE, &state->flags); 1730 ssleep(1); 1731 return -EAGAIN; 1732 case -ENOMEM: 1733 case -NFS4ERR_DENIED: 1734 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1735 return 0; 1736 } 1737 return err; 1738} 1739 1740int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1741{ 1742 struct nfs_server *server = NFS_SERVER(state->inode); 1743 struct nfs4_opendata *opendata; 1744 int err; 1745 1746 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1747 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1748 if (IS_ERR(opendata)) 1749 return PTR_ERR(opendata); 1750 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1751 err = nfs4_open_recover(opendata, state); 1752 nfs4_opendata_put(opendata); 1753 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1754} 1755 1756static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1757{ 1758 struct nfs4_opendata *data = calldata; 1759 1760 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, 1761 &data->c_arg.seq_args, &data->c_res.seq_res, task); 1762} 1763 1764static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1765{ 1766 struct nfs4_opendata *data = calldata; 1767 1768 nfs40_sequence_done(task, &data->c_res.seq_res); 1769 1770 data->rpc_status = task->tk_status; 1771 if (data->rpc_status == 0) { 1772 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1773 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1774 renew_lease(data->o_res.server, data->timestamp); 1775 data->rpc_done = 1; 1776 } 1777} 1778 1779static void nfs4_open_confirm_release(void *calldata) 1780{ 1781 struct nfs4_opendata *data = calldata; 1782 struct nfs4_state *state = NULL; 1783 1784 /* If this request hasn't been cancelled, do nothing */ 1785 if (data->cancelled == 0) 1786 goto out_free; 1787 /* In case of error, no cleanup! */ 1788 if (!data->rpc_done) 1789 goto out_free; 1790 state = nfs4_opendata_to_nfs4_state(data); 1791 if (!IS_ERR(state)) 1792 nfs4_close_state(state, data->o_arg.fmode); 1793out_free: 1794 nfs4_opendata_put(data); 1795} 1796 1797static const struct rpc_call_ops nfs4_open_confirm_ops = { 1798 .rpc_call_prepare = nfs4_open_confirm_prepare, 1799 .rpc_call_done = nfs4_open_confirm_done, 1800 .rpc_release = nfs4_open_confirm_release, 1801}; 1802 1803/* 1804 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1805 */ 1806static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1807{ 1808 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 1809 struct rpc_task *task; 1810 struct rpc_message msg = { 1811 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1812 .rpc_argp = &data->c_arg, 1813 .rpc_resp = &data->c_res, 1814 .rpc_cred = data->owner->so_cred, 1815 }; 1816 struct rpc_task_setup task_setup_data = { 1817 .rpc_client = server->client, 1818 .rpc_message = &msg, 1819 .callback_ops = &nfs4_open_confirm_ops, 1820 .callback_data = data, 1821 .workqueue = nfsiod_workqueue, 1822 .flags = RPC_TASK_ASYNC, 1823 }; 1824 int status; 1825 1826 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); 1827 kref_get(&data->kref); 1828 data->rpc_done = 0; 1829 data->rpc_status = 0; 1830 data->timestamp = jiffies; 1831 task = rpc_run_task(&task_setup_data); 1832 if (IS_ERR(task)) 1833 return PTR_ERR(task); 1834 status = nfs4_wait_for_completion_rpc_task(task); 1835 if (status != 0) { 1836 data->cancelled = 1; 1837 smp_wmb(); 1838 } else 1839 status = data->rpc_status; 1840 rpc_put_task(task); 1841 return status; 1842} 1843 1844static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1845{ 1846 struct nfs4_opendata *data = calldata; 1847 struct nfs4_state_owner *sp = data->owner; 1848 struct nfs_client *clp = sp->so_server->nfs_client; 1849 1850 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1851 goto out_wait; 1852 /* 1853 * Check if we still need to send an OPEN call, or if we can use 1854 * a delegation instead. 1855 */ 1856 if (data->state != NULL) { 1857 struct nfs_delegation *delegation; 1858 1859 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1860 goto out_no_action; 1861 rcu_read_lock(); 1862 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1863 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1864 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH && 1865 can_open_delegated(delegation, data->o_arg.fmode)) 1866 goto unlock_no_action; 1867 rcu_read_unlock(); 1868 } 1869 /* Update client id. */ 1870 data->o_arg.clientid = clp->cl_clientid; 1871 switch (data->o_arg.claim) { 1872 case NFS4_OPEN_CLAIM_PREVIOUS: 1873 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1874 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1875 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 1876 case NFS4_OPEN_CLAIM_FH: 1877 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1878 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1879 } 1880 data->timestamp = jiffies; 1881 if (nfs4_setup_sequence(data->o_arg.server, 1882 &data->o_arg.seq_args, 1883 &data->o_res.seq_res, 1884 task) != 0) 1885 nfs_release_seqid(data->o_arg.seqid); 1886 1887 /* Set the create mode (note dependency on the session type) */ 1888 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 1889 if (data->o_arg.open_flags & O_EXCL) { 1890 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 1891 if (nfs4_has_persistent_session(clp)) 1892 data->o_arg.createmode = NFS4_CREATE_GUARDED; 1893 else if (clp->cl_mvops->minor_version > 0) 1894 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 1895 } 1896 return; 1897unlock_no_action: 1898 rcu_read_unlock(); 1899out_no_action: 1900 task->tk_action = NULL; 1901out_wait: 1902 nfs4_sequence_done(task, &data->o_res.seq_res); 1903} 1904 1905static void nfs4_open_done(struct rpc_task *task, void *calldata) 1906{ 1907 struct nfs4_opendata *data = calldata; 1908 1909 data->rpc_status = task->tk_status; 1910 1911 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1912 return; 1913 1914 if (task->tk_status == 0) { 1915 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 1916 switch (data->o_res.f_attr->mode & S_IFMT) { 1917 case S_IFREG: 1918 break; 1919 case S_IFLNK: 1920 data->rpc_status = -ELOOP; 1921 break; 1922 case S_IFDIR: 1923 data->rpc_status = -EISDIR; 1924 break; 1925 default: 1926 data->rpc_status = -ENOTDIR; 1927 } 1928 } 1929 renew_lease(data->o_res.server, data->timestamp); 1930 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1931 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1932 } 1933 data->rpc_done = 1; 1934} 1935 1936static void nfs4_open_release(void *calldata) 1937{ 1938 struct nfs4_opendata *data = calldata; 1939 struct nfs4_state *state = NULL; 1940 1941 /* If this request hasn't been cancelled, do nothing */ 1942 if (data->cancelled == 0) 1943 goto out_free; 1944 /* In case of error, no cleanup! */ 1945 if (data->rpc_status != 0 || !data->rpc_done) 1946 goto out_free; 1947 /* In case we need an open_confirm, no cleanup! */ 1948 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1949 goto out_free; 1950 state = nfs4_opendata_to_nfs4_state(data); 1951 if (!IS_ERR(state)) 1952 nfs4_close_state(state, data->o_arg.fmode); 1953out_free: 1954 nfs4_opendata_put(data); 1955} 1956 1957static const struct rpc_call_ops nfs4_open_ops = { 1958 .rpc_call_prepare = nfs4_open_prepare, 1959 .rpc_call_done = nfs4_open_done, 1960 .rpc_release = nfs4_open_release, 1961}; 1962 1963static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1964{ 1965 struct inode *dir = d_inode(data->dir); 1966 struct nfs_server *server = NFS_SERVER(dir); 1967 struct nfs_openargs *o_arg = &data->o_arg; 1968 struct nfs_openres *o_res = &data->o_res; 1969 struct rpc_task *task; 1970 struct rpc_message msg = { 1971 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1972 .rpc_argp = o_arg, 1973 .rpc_resp = o_res, 1974 .rpc_cred = data->owner->so_cred, 1975 }; 1976 struct rpc_task_setup task_setup_data = { 1977 .rpc_client = server->client, 1978 .rpc_message = &msg, 1979 .callback_ops = &nfs4_open_ops, 1980 .callback_data = data, 1981 .workqueue = nfsiod_workqueue, 1982 .flags = RPC_TASK_ASYNC, 1983 }; 1984 int status; 1985 1986 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 1987 kref_get(&data->kref); 1988 data->rpc_done = 0; 1989 data->rpc_status = 0; 1990 data->cancelled = 0; 1991 data->is_recover = 0; 1992 if (isrecover) { 1993 nfs4_set_sequence_privileged(&o_arg->seq_args); 1994 data->is_recover = 1; 1995 } 1996 task = rpc_run_task(&task_setup_data); 1997 if (IS_ERR(task)) 1998 return PTR_ERR(task); 1999 status = nfs4_wait_for_completion_rpc_task(task); 2000 if (status != 0) { 2001 data->cancelled = 1; 2002 smp_wmb(); 2003 } else 2004 status = data->rpc_status; 2005 rpc_put_task(task); 2006 2007 return status; 2008} 2009 2010static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2011{ 2012 struct inode *dir = d_inode(data->dir); 2013 struct nfs_openres *o_res = &data->o_res; 2014 int status; 2015 2016 status = nfs4_run_open_task(data, 1); 2017 if (status != 0 || !data->rpc_done) 2018 return status; 2019 2020 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2021 2022 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2023 status = _nfs4_proc_open_confirm(data); 2024 if (status != 0) 2025 return status; 2026 } 2027 2028 return status; 2029} 2030 2031/* 2032 * Additional permission checks in order to distinguish between an 2033 * open for read, and an open for execute. This works around the 2034 * fact that NFSv4 OPEN treats read and execute permissions as being 2035 * the same. 2036 * Note that in the non-execute case, we want to turn off permission 2037 * checking if we just created a new file (POSIX open() semantics). 2038 */ 2039static int nfs4_opendata_access(struct rpc_cred *cred, 2040 struct nfs4_opendata *opendata, 2041 struct nfs4_state *state, fmode_t fmode, 2042 int openflags) 2043{ 2044 struct nfs_access_entry cache; 2045 u32 mask; 2046 2047 /* access call failed or for some reason the server doesn't 2048 * support any access modes -- defer access call until later */ 2049 if (opendata->o_res.access_supported == 0) 2050 return 0; 2051 2052 mask = 0; 2053 /* 2054 * Use openflags to check for exec, because fmode won't 2055 * always have FMODE_EXEC set when file open for exec. 2056 */ 2057 if (openflags & __FMODE_EXEC) { 2058 /* ONLY check for exec rights */ 2059 mask = MAY_EXEC; 2060 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2061 mask = MAY_READ; 2062 2063 cache.cred = cred; 2064 cache.jiffies = jiffies; 2065 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2066 nfs_access_add_cache(state->inode, &cache); 2067 2068 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2069 return 0; 2070 2071 /* even though OPEN succeeded, access is denied. Close the file */ 2072 nfs4_close_state(state, fmode); 2073 return -EACCES; 2074} 2075 2076/* 2077 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2078 */ 2079static int _nfs4_proc_open(struct nfs4_opendata *data) 2080{ 2081 struct inode *dir = d_inode(data->dir); 2082 struct nfs_server *server = NFS_SERVER(dir); 2083 struct nfs_openargs *o_arg = &data->o_arg; 2084 struct nfs_openres *o_res = &data->o_res; 2085 int status; 2086 2087 status = nfs4_run_open_task(data, 0); 2088 if (!data->rpc_done) 2089 return status; 2090 if (status != 0) { 2091 if (status == -NFS4ERR_BADNAME && 2092 !(o_arg->open_flags & O_CREAT)) 2093 return -ENOENT; 2094 return status; 2095 } 2096 2097 nfs_fattr_map_and_free_names(server, &data->f_attr); 2098 2099 if (o_arg->open_flags & O_CREAT) { 2100 update_changeattr(dir, &o_res->cinfo); 2101 if (o_arg->open_flags & O_EXCL) 2102 data->file_created = 1; 2103 else if (o_res->cinfo.before != o_res->cinfo.after) 2104 data->file_created = 1; 2105 } 2106 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2107 server->caps &= ~NFS_CAP_POSIX_LOCK; 2108 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2109 status = _nfs4_proc_open_confirm(data); 2110 if (status != 0) 2111 return status; 2112 } 2113 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 2114 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); 2115 return 0; 2116} 2117 2118static int nfs4_recover_expired_lease(struct nfs_server *server) 2119{ 2120 return nfs4_client_recover_expired_lease(server->nfs_client); 2121} 2122 2123/* 2124 * OPEN_EXPIRED: 2125 * reclaim state on the server after a network partition. 2126 * Assumes caller holds the appropriate lock 2127 */ 2128static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2129{ 2130 struct nfs4_opendata *opendata; 2131 int ret; 2132 2133 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2134 NFS4_OPEN_CLAIM_FH); 2135 if (IS_ERR(opendata)) 2136 return PTR_ERR(opendata); 2137 ret = nfs4_open_recover(opendata, state); 2138 if (ret == -ESTALE) 2139 d_drop(ctx->dentry); 2140 nfs4_opendata_put(opendata); 2141 return ret; 2142} 2143 2144static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2145{ 2146 struct nfs_server *server = NFS_SERVER(state->inode); 2147 struct nfs4_exception exception = { }; 2148 int err; 2149 2150 do { 2151 err = _nfs4_open_expired(ctx, state); 2152 trace_nfs4_open_expired(ctx, 0, err); 2153 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2154 continue; 2155 switch (err) { 2156 default: 2157 goto out; 2158 case -NFS4ERR_GRACE: 2159 case -NFS4ERR_DELAY: 2160 nfs4_handle_exception(server, err, &exception); 2161 err = 0; 2162 } 2163 } while (exception.retry); 2164out: 2165 return err; 2166} 2167 2168static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2169{ 2170 struct nfs_open_context *ctx; 2171 int ret; 2172 2173 ctx = nfs4_state_find_open_context(state); 2174 if (IS_ERR(ctx)) 2175 return -EAGAIN; 2176 ret = nfs4_do_open_expired(ctx, state); 2177 put_nfs_open_context(ctx); 2178 return ret; 2179} 2180 2181static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2182{ 2183 nfs_remove_bad_delegation(state->inode); 2184 write_seqlock(&state->seqlock); 2185 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2186 write_sequnlock(&state->seqlock); 2187 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2188} 2189 2190static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2191{ 2192 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2193 nfs_finish_clear_delegation_stateid(state); 2194} 2195 2196static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2197{ 2198 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2199 nfs40_clear_delegation_stateid(state); 2200 return nfs4_open_expired(sp, state); 2201} 2202 2203#if defined(CONFIG_NFS_V4_1) 2204static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2205{ 2206 struct nfs_server *server = NFS_SERVER(state->inode); 2207 nfs4_stateid stateid; 2208 struct nfs_delegation *delegation; 2209 struct rpc_cred *cred; 2210 int status; 2211 2212 /* Get the delegation credential for use by test/free_stateid */ 2213 rcu_read_lock(); 2214 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2215 if (delegation == NULL) { 2216 rcu_read_unlock(); 2217 return; 2218 } 2219 2220 nfs4_stateid_copy(&stateid, &delegation->stateid); 2221 cred = get_rpccred(delegation->cred); 2222 rcu_read_unlock(); 2223 status = nfs41_test_stateid(server, &stateid, cred); 2224 trace_nfs4_test_delegation_stateid(state, NULL, status); 2225 2226 if (status != NFS_OK) { 2227 /* Free the stateid unless the server explicitly 2228 * informs us the stateid is unrecognized. */ 2229 if (status != -NFS4ERR_BAD_STATEID) 2230 nfs41_free_stateid(server, &stateid, cred); 2231 nfs_finish_clear_delegation_stateid(state); 2232 } 2233 2234 put_rpccred(cred); 2235} 2236 2237/** 2238 * nfs41_check_open_stateid - possibly free an open stateid 2239 * 2240 * @state: NFSv4 state for an inode 2241 * 2242 * Returns NFS_OK if recovery for this stateid is now finished. 2243 * Otherwise a negative NFS4ERR value is returned. 2244 */ 2245static int nfs41_check_open_stateid(struct nfs4_state *state) 2246{ 2247 struct nfs_server *server = NFS_SERVER(state->inode); 2248 nfs4_stateid *stateid = &state->open_stateid; 2249 struct rpc_cred *cred = state->owner->so_cred; 2250 int status; 2251 2252 /* If a state reset has been done, test_stateid is unneeded */ 2253 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2254 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2255 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2256 return -NFS4ERR_BAD_STATEID; 2257 2258 status = nfs41_test_stateid(server, stateid, cred); 2259 trace_nfs4_test_open_stateid(state, NULL, status); 2260 if (status != NFS_OK) { 2261 /* Free the stateid unless the server explicitly 2262 * informs us the stateid is unrecognized. */ 2263 if (status != -NFS4ERR_BAD_STATEID) 2264 nfs41_free_stateid(server, stateid, cred); 2265 2266 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2267 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2268 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2269 clear_bit(NFS_OPEN_STATE, &state->flags); 2270 } 2271 return status; 2272} 2273 2274static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2275{ 2276 int status; 2277 2278 nfs41_check_delegation_stateid(state); 2279 status = nfs41_check_open_stateid(state); 2280 if (status != NFS_OK) 2281 status = nfs4_open_expired(sp, state); 2282 return status; 2283} 2284#endif 2285 2286/* 2287 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2288 * fields corresponding to attributes that were used to store the verifier. 2289 * Make sure we clobber those fields in the later setattr call 2290 */ 2291static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 2292{ 2293 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2294 !(sattr->ia_valid & ATTR_ATIME_SET)) 2295 sattr->ia_valid |= ATTR_ATIME; 2296 2297 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2298 !(sattr->ia_valid & ATTR_MTIME_SET)) 2299 sattr->ia_valid |= ATTR_MTIME; 2300} 2301 2302static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 2303 fmode_t fmode, 2304 int flags, 2305 struct nfs_open_context *ctx) 2306{ 2307 struct nfs4_state_owner *sp = opendata->owner; 2308 struct nfs_server *server = sp->so_server; 2309 struct dentry *dentry; 2310 struct nfs4_state *state; 2311 unsigned int seq; 2312 int ret; 2313 2314 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2315 2316 ret = _nfs4_proc_open(opendata); 2317 if (ret != 0) 2318 goto out; 2319 2320 state = nfs4_opendata_to_nfs4_state(opendata); 2321 ret = PTR_ERR(state); 2322 if (IS_ERR(state)) 2323 goto out; 2324 if (server->caps & NFS_CAP_POSIX_LOCK) 2325 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2326 2327 dentry = opendata->dentry; 2328 if (d_really_is_negative(dentry)) { 2329 /* FIXME: Is this d_drop() ever needed? */ 2330 d_drop(dentry); 2331 dentry = d_add_unique(dentry, igrab(state->inode)); 2332 if (dentry == NULL) { 2333 dentry = opendata->dentry; 2334 } else { 2335 dput(ctx->dentry); 2336 ctx->dentry = dentry; 2337 } 2338 nfs_set_verifier(dentry, 2339 nfs_save_change_attribute(d_inode(opendata->dir))); 2340 } 2341 2342 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2343 if (ret != 0) 2344 goto out; 2345 2346 ctx->state = state; 2347 if (d_inode(dentry) == state->inode) { 2348 nfs_inode_attach_open_context(ctx); 2349 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2350 nfs4_schedule_stateid_recovery(server, state); 2351 } 2352out: 2353 return ret; 2354} 2355 2356/* 2357 * Returns a referenced nfs4_state 2358 */ 2359static int _nfs4_do_open(struct inode *dir, 2360 struct nfs_open_context *ctx, 2361 int flags, 2362 struct iattr *sattr, 2363 struct nfs4_label *label, 2364 int *opened) 2365{ 2366 struct nfs4_state_owner *sp; 2367 struct nfs4_state *state = NULL; 2368 struct nfs_server *server = NFS_SERVER(dir); 2369 struct nfs4_opendata *opendata; 2370 struct dentry *dentry = ctx->dentry; 2371 struct rpc_cred *cred = ctx->cred; 2372 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 2373 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 2374 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 2375 struct nfs4_label *olabel = NULL; 2376 int status; 2377 2378 /* Protect against reboot recovery conflicts */ 2379 status = -ENOMEM; 2380 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 2381 if (sp == NULL) { 2382 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2383 goto out_err; 2384 } 2385 status = nfs4_recover_expired_lease(server); 2386 if (status != 0) 2387 goto err_put_state_owner; 2388 if (d_really_is_positive(dentry)) 2389 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 2390 status = -ENOMEM; 2391 if (d_really_is_positive(dentry)) 2392 claim = NFS4_OPEN_CLAIM_FH; 2393 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2394 label, claim, GFP_KERNEL); 2395 if (opendata == NULL) 2396 goto err_put_state_owner; 2397 2398 if (label) { 2399 olabel = nfs4_label_alloc(server, GFP_KERNEL); 2400 if (IS_ERR(olabel)) { 2401 status = PTR_ERR(olabel); 2402 goto err_opendata_put; 2403 } 2404 } 2405 2406 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2407 if (!opendata->f_attr.mdsthreshold) { 2408 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2409 if (!opendata->f_attr.mdsthreshold) 2410 goto err_free_label; 2411 } 2412 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2413 } 2414 if (d_really_is_positive(dentry)) 2415 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 2416 2417 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); 2418 if (status != 0) 2419 goto err_free_label; 2420 state = ctx->state; 2421 2422 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 2423 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2424 nfs4_exclusive_attrset(opendata, sattr); 2425 2426 nfs_fattr_init(opendata->o_res.f_attr); 2427 status = nfs4_do_setattr(state->inode, cred, 2428 opendata->o_res.f_attr, sattr, 2429 state, label, olabel); 2430 if (status == 0) { 2431 nfs_setattr_update_inode(state->inode, sattr, 2432 opendata->o_res.f_attr); 2433 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2434 } 2435 } 2436 if (opendata->file_created) 2437 *opened |= FILE_CREATED; 2438 2439 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 2440 *ctx_th = opendata->f_attr.mdsthreshold; 2441 opendata->f_attr.mdsthreshold = NULL; 2442 } 2443 2444 nfs4_label_free(olabel); 2445 2446 nfs4_opendata_put(opendata); 2447 nfs4_put_state_owner(sp); 2448 return 0; 2449err_free_label: 2450 nfs4_label_free(olabel); 2451err_opendata_put: 2452 nfs4_opendata_put(opendata); 2453err_put_state_owner: 2454 nfs4_put_state_owner(sp); 2455out_err: 2456 return status; 2457} 2458 2459 2460static struct nfs4_state *nfs4_do_open(struct inode *dir, 2461 struct nfs_open_context *ctx, 2462 int flags, 2463 struct iattr *sattr, 2464 struct nfs4_label *label, 2465 int *opened) 2466{ 2467 struct nfs_server *server = NFS_SERVER(dir); 2468 struct nfs4_exception exception = { }; 2469 struct nfs4_state *res; 2470 int status; 2471 2472 do { 2473 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened); 2474 res = ctx->state; 2475 trace_nfs4_open_file(ctx, flags, status); 2476 if (status == 0) 2477 break; 2478 /* NOTE: BAD_SEQID means the server and client disagree about the 2479 * book-keeping w.r.t. state-changing operations 2480 * (OPEN/CLOSE/LOCK/LOCKU...) 2481 * It is actually a sign of a bug on the client or on the server. 2482 * 2483 * If we receive a BAD_SEQID error in the particular case of 2484 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2485 * have unhashed the old state_owner for us, and that we can 2486 * therefore safely retry using a new one. We should still warn 2487 * the user though... 2488 */ 2489 if (status == -NFS4ERR_BAD_SEQID) { 2490 pr_warn_ratelimited("NFS: v4 server %s " 2491 " returned a bad sequence-id error!\n", 2492 NFS_SERVER(dir)->nfs_client->cl_hostname); 2493 exception.retry = 1; 2494 continue; 2495 } 2496 /* 2497 * BAD_STATEID on OPEN means that the server cancelled our 2498 * state before it received the OPEN_CONFIRM. 2499 * Recover by retrying the request as per the discussion 2500 * on Page 181 of RFC3530. 2501 */ 2502 if (status == -NFS4ERR_BAD_STATEID) { 2503 exception.retry = 1; 2504 continue; 2505 } 2506 if (status == -EAGAIN) { 2507 /* We must have found a delegation */ 2508 exception.retry = 1; 2509 continue; 2510 } 2511 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2512 continue; 2513 res = ERR_PTR(nfs4_handle_exception(server, 2514 status, &exception)); 2515 } while (exception.retry); 2516 return res; 2517} 2518 2519static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2520 struct nfs_fattr *fattr, struct iattr *sattr, 2521 struct nfs4_state *state, struct nfs4_label *ilabel, 2522 struct nfs4_label *olabel) 2523{ 2524 struct nfs_server *server = NFS_SERVER(inode); 2525 struct nfs_setattrargs arg = { 2526 .fh = NFS_FH(inode), 2527 .iap = sattr, 2528 .server = server, 2529 .bitmask = server->attr_bitmask, 2530 .label = ilabel, 2531 }; 2532 struct nfs_setattrres res = { 2533 .fattr = fattr, 2534 .label = olabel, 2535 .server = server, 2536 }; 2537 struct rpc_message msg = { 2538 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2539 .rpc_argp = &arg, 2540 .rpc_resp = &res, 2541 .rpc_cred = cred, 2542 }; 2543 unsigned long timestamp = jiffies; 2544 fmode_t fmode; 2545 bool truncate; 2546 int status; 2547 2548 arg.bitmask = nfs4_bitmask(server, ilabel); 2549 if (ilabel) 2550 arg.bitmask = nfs4_bitmask(server, olabel); 2551 2552 nfs_fattr_init(fattr); 2553 2554 /* Servers should only apply open mode checks for file size changes */ 2555 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false; 2556 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2557 2558 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2559 /* Use that stateid */ 2560 } else if (truncate && state != NULL) { 2561 struct nfs_lockowner lockowner = { 2562 .l_owner = current->files, 2563 .l_pid = current->tgid, 2564 }; 2565 if (!nfs4_valid_open_stateid(state)) 2566 return -EBADF; 2567 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2568 &lockowner) == -EIO) 2569 return -EBADF; 2570 } else 2571 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2572 2573 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2574 if (status == 0 && state != NULL) 2575 renew_lease(server, timestamp); 2576 return status; 2577} 2578 2579static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2580 struct nfs_fattr *fattr, struct iattr *sattr, 2581 struct nfs4_state *state, struct nfs4_label *ilabel, 2582 struct nfs4_label *olabel) 2583{ 2584 struct nfs_server *server = NFS_SERVER(inode); 2585 struct nfs4_exception exception = { 2586 .state = state, 2587 .inode = inode, 2588 }; 2589 int err; 2590 do { 2591 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel); 2592 trace_nfs4_setattr(inode, err); 2593 switch (err) { 2594 case -NFS4ERR_OPENMODE: 2595 if (!(sattr->ia_valid & ATTR_SIZE)) { 2596 pr_warn_once("NFSv4: server %s is incorrectly " 2597 "applying open mode checks to " 2598 "a SETATTR that is not " 2599 "changing file size.\n", 2600 server->nfs_client->cl_hostname); 2601 } 2602 if (state && !(state->state & FMODE_WRITE)) { 2603 err = -EBADF; 2604 if (sattr->ia_valid & ATTR_OPEN) 2605 err = -EACCES; 2606 goto out; 2607 } 2608 } 2609 err = nfs4_handle_exception(server, err, &exception); 2610 } while (exception.retry); 2611out: 2612 return err; 2613} 2614 2615struct nfs4_closedata { 2616 struct inode *inode; 2617 struct nfs4_state *state; 2618 struct nfs_closeargs arg; 2619 struct nfs_closeres res; 2620 struct nfs_fattr fattr; 2621 unsigned long timestamp; 2622 bool roc; 2623 u32 roc_barrier; 2624}; 2625 2626static void nfs4_free_closedata(void *data) 2627{ 2628 struct nfs4_closedata *calldata = data; 2629 struct nfs4_state_owner *sp = calldata->state->owner; 2630 struct super_block *sb = calldata->state->inode->i_sb; 2631 2632 if (calldata->roc) 2633 pnfs_roc_release(calldata->state->inode); 2634 nfs4_put_open_state(calldata->state); 2635 nfs_free_seqid(calldata->arg.seqid); 2636 nfs4_put_state_owner(sp); 2637 nfs_sb_deactive(sb); 2638 kfree(calldata); 2639} 2640 2641static void nfs4_close_done(struct rpc_task *task, void *data) 2642{ 2643 struct nfs4_closedata *calldata = data; 2644 struct nfs4_state *state = calldata->state; 2645 struct nfs_server *server = NFS_SERVER(calldata->inode); 2646 nfs4_stateid *res_stateid = NULL; 2647 2648 dprintk("%s: begin!\n", __func__); 2649 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2650 return; 2651 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 2652 /* hmm. we are done with the inode, and in the process of freeing 2653 * the state_owner. we keep this around to process errors 2654 */ 2655 switch (task->tk_status) { 2656 case 0: 2657 res_stateid = &calldata->res.stateid; 2658 if (calldata->arg.fmode == 0 && calldata->roc) 2659 pnfs_roc_set_barrier(state->inode, 2660 calldata->roc_barrier); 2661 renew_lease(server, calldata->timestamp); 2662 break; 2663 case -NFS4ERR_ADMIN_REVOKED: 2664 case -NFS4ERR_STALE_STATEID: 2665 case -NFS4ERR_OLD_STATEID: 2666 case -NFS4ERR_BAD_STATEID: 2667 case -NFS4ERR_EXPIRED: 2668 if (!nfs4_stateid_match(&calldata->arg.stateid, 2669 &state->open_stateid)) { 2670 rpc_restart_call_prepare(task); 2671 goto out_release; 2672 } 2673 if (calldata->arg.fmode == 0) 2674 break; 2675 default: 2676 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2677 rpc_restart_call_prepare(task); 2678 goto out_release; 2679 } 2680 } 2681 nfs_clear_open_stateid(state, &calldata->arg.stateid, 2682 res_stateid, calldata->arg.fmode); 2683out_release: 2684 nfs_release_seqid(calldata->arg.seqid); 2685 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2686 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2687} 2688 2689static void nfs4_close_prepare(struct rpc_task *task, void *data) 2690{ 2691 struct nfs4_closedata *calldata = data; 2692 struct nfs4_state *state = calldata->state; 2693 struct inode *inode = calldata->inode; 2694 bool is_rdonly, is_wronly, is_rdwr; 2695 int call_close = 0; 2696 2697 dprintk("%s: begin!\n", __func__); 2698 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2699 goto out_wait; 2700 2701 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2702 spin_lock(&state->owner->so_lock); 2703 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2704 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2705 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2706 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); 2707 /* Calculate the change in open mode */ 2708 calldata->arg.fmode = 0; 2709 if (state->n_rdwr == 0) { 2710 if (state->n_rdonly == 0) 2711 call_close |= is_rdonly; 2712 else if (is_rdonly) 2713 calldata->arg.fmode |= FMODE_READ; 2714 if (state->n_wronly == 0) 2715 call_close |= is_wronly; 2716 else if (is_wronly) 2717 calldata->arg.fmode |= FMODE_WRITE; 2718 } else if (is_rdwr) 2719 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2720 2721 if (calldata->arg.fmode == 0) 2722 call_close |= is_rdwr; 2723 2724 if (!nfs4_valid_open_stateid(state)) 2725 call_close = 0; 2726 spin_unlock(&state->owner->so_lock); 2727 2728 if (!call_close) { 2729 /* Note: exit _without_ calling nfs4_close_done */ 2730 goto out_no_action; 2731 } 2732 2733 if (calldata->arg.fmode == 0) { 2734 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2735 if (calldata->roc && 2736 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) { 2737 nfs_release_seqid(calldata->arg.seqid); 2738 goto out_wait; 2739 } 2740 } 2741 calldata->arg.share_access = 2742 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2743 calldata->arg.fmode, 0); 2744 2745 nfs_fattr_init(calldata->res.fattr); 2746 calldata->timestamp = jiffies; 2747 if (nfs4_setup_sequence(NFS_SERVER(inode), 2748 &calldata->arg.seq_args, 2749 &calldata->res.seq_res, 2750 task) != 0) 2751 nfs_release_seqid(calldata->arg.seqid); 2752 dprintk("%s: done!\n", __func__); 2753 return; 2754out_no_action: 2755 task->tk_action = NULL; 2756out_wait: 2757 nfs4_sequence_done(task, &calldata->res.seq_res); 2758} 2759 2760static const struct rpc_call_ops nfs4_close_ops = { 2761 .rpc_call_prepare = nfs4_close_prepare, 2762 .rpc_call_done = nfs4_close_done, 2763 .rpc_release = nfs4_free_closedata, 2764}; 2765 2766static bool nfs4_roc(struct inode *inode) 2767{ 2768 if (!nfs_have_layout(inode)) 2769 return false; 2770 return pnfs_roc(inode); 2771} 2772 2773/* 2774 * It is possible for data to be read/written from a mem-mapped file 2775 * after the sys_close call (which hits the vfs layer as a flush). 2776 * This means that we can't safely call nfsv4 close on a file until 2777 * the inode is cleared. This in turn means that we are not good 2778 * NFSv4 citizens - we do not indicate to the server to update the file's 2779 * share state even when we are done with one of the three share 2780 * stateid's in the inode. 2781 * 2782 * NOTE: Caller must be holding the sp->so_owner semaphore! 2783 */ 2784int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2785{ 2786 struct nfs_server *server = NFS_SERVER(state->inode); 2787 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 2788 struct nfs4_closedata *calldata; 2789 struct nfs4_state_owner *sp = state->owner; 2790 struct rpc_task *task; 2791 struct rpc_message msg = { 2792 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2793 .rpc_cred = state->owner->so_cred, 2794 }; 2795 struct rpc_task_setup task_setup_data = { 2796 .rpc_client = server->client, 2797 .rpc_message = &msg, 2798 .callback_ops = &nfs4_close_ops, 2799 .workqueue = nfsiod_workqueue, 2800 .flags = RPC_TASK_ASYNC, 2801 }; 2802 int status = -ENOMEM; 2803 2804 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 2805 &task_setup_data.rpc_client, &msg); 2806 2807 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2808 if (calldata == NULL) 2809 goto out; 2810 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2811 calldata->inode = state->inode; 2812 calldata->state = state; 2813 calldata->arg.fh = NFS_FH(state->inode); 2814 /* Serialization for the sequence id */ 2815 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 2816 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 2817 if (IS_ERR(calldata->arg.seqid)) 2818 goto out_free_calldata; 2819 calldata->arg.fmode = 0; 2820 calldata->arg.bitmask = server->cache_consistency_bitmask; 2821 calldata->res.fattr = &calldata->fattr; 2822 calldata->res.seqid = calldata->arg.seqid; 2823 calldata->res.server = server; 2824 calldata->roc = nfs4_roc(state->inode); 2825 nfs_sb_active(calldata->inode->i_sb); 2826 2827 msg.rpc_argp = &calldata->arg; 2828 msg.rpc_resp = &calldata->res; 2829 task_setup_data.callback_data = calldata; 2830 task = rpc_run_task(&task_setup_data); 2831 if (IS_ERR(task)) 2832 return PTR_ERR(task); 2833 status = 0; 2834 if (wait) 2835 status = rpc_wait_for_completion_task(task); 2836 rpc_put_task(task); 2837 return status; 2838out_free_calldata: 2839 kfree(calldata); 2840out: 2841 nfs4_put_open_state(state); 2842 nfs4_put_state_owner(sp); 2843 return status; 2844} 2845 2846static struct inode * 2847nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 2848 int open_flags, struct iattr *attr, int *opened) 2849{ 2850 struct nfs4_state *state; 2851 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 2852 2853 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 2854 2855 /* Protect against concurrent sillydeletes */ 2856 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 2857 2858 nfs4_label_release_security(label); 2859 2860 if (IS_ERR(state)) 2861 return ERR_CAST(state); 2862 return state->inode; 2863} 2864 2865static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2866{ 2867 if (ctx->state == NULL) 2868 return; 2869 if (is_sync) 2870 nfs4_close_sync(ctx->state, ctx->mode); 2871 else 2872 nfs4_close_state(ctx->state, ctx->mode); 2873} 2874 2875#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 2876#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 2877#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL) 2878 2879static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2880{ 2881 struct nfs4_server_caps_arg args = { 2882 .fhandle = fhandle, 2883 }; 2884 struct nfs4_server_caps_res res = {}; 2885 struct rpc_message msg = { 2886 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2887 .rpc_argp = &args, 2888 .rpc_resp = &res, 2889 }; 2890 int status; 2891 2892 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2893 if (status == 0) { 2894 /* Sanity check the server answers */ 2895 switch (server->nfs_client->cl_minorversion) { 2896 case 0: 2897 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 2898 res.attr_bitmask[2] = 0; 2899 break; 2900 case 1: 2901 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 2902 break; 2903 case 2: 2904 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 2905 } 2906 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2907 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2908 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2909 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2910 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2911 NFS_CAP_CTIME|NFS_CAP_MTIME| 2912 NFS_CAP_SECURITY_LABEL); 2913 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 2914 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 2915 server->caps |= NFS_CAP_ACLS; 2916 if (res.has_links != 0) 2917 server->caps |= NFS_CAP_HARDLINKS; 2918 if (res.has_symlinks != 0) 2919 server->caps |= NFS_CAP_SYMLINKS; 2920 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2921 server->caps |= NFS_CAP_FILEID; 2922 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2923 server->caps |= NFS_CAP_MODE; 2924 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2925 server->caps |= NFS_CAP_NLINK; 2926 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2927 server->caps |= NFS_CAP_OWNER; 2928 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2929 server->caps |= NFS_CAP_OWNER_GROUP; 2930 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2931 server->caps |= NFS_CAP_ATIME; 2932 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2933 server->caps |= NFS_CAP_CTIME; 2934 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2935 server->caps |= NFS_CAP_MTIME; 2936#ifdef CONFIG_NFS_V4_SECURITY_LABEL 2937 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 2938 server->caps |= NFS_CAP_SECURITY_LABEL; 2939#endif 2940 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 2941 sizeof(server->attr_bitmask)); 2942 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 2943 2944 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2945 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2946 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2947 server->cache_consistency_bitmask[2] = 0; 2948 server->acl_bitmask = res.acl_bitmask; 2949 server->fh_expire_type = res.fh_expire_type; 2950 } 2951 2952 return status; 2953} 2954 2955int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2956{ 2957 struct nfs4_exception exception = { }; 2958 int err; 2959 do { 2960 err = nfs4_handle_exception(server, 2961 _nfs4_server_capabilities(server, fhandle), 2962 &exception); 2963 } while (exception.retry); 2964 return err; 2965} 2966 2967static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2968 struct nfs_fsinfo *info) 2969{ 2970 u32 bitmask[3]; 2971 struct nfs4_lookup_root_arg args = { 2972 .bitmask = bitmask, 2973 }; 2974 struct nfs4_lookup_res res = { 2975 .server = server, 2976 .fattr = info->fattr, 2977 .fh = fhandle, 2978 }; 2979 struct rpc_message msg = { 2980 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 2981 .rpc_argp = &args, 2982 .rpc_resp = &res, 2983 }; 2984 2985 bitmask[0] = nfs4_fattr_bitmap[0]; 2986 bitmask[1] = nfs4_fattr_bitmap[1]; 2987 /* 2988 * Process the label in the upcoming getfattr 2989 */ 2990 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 2991 2992 nfs_fattr_init(info->fattr); 2993 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2994} 2995 2996static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2997 struct nfs_fsinfo *info) 2998{ 2999 struct nfs4_exception exception = { }; 3000 int err; 3001 do { 3002 err = _nfs4_lookup_root(server, fhandle, info); 3003 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 3004 switch (err) { 3005 case 0: 3006 case -NFS4ERR_WRONGSEC: 3007 goto out; 3008 default: 3009 err = nfs4_handle_exception(server, err, &exception); 3010 } 3011 } while (exception.retry); 3012out: 3013 return err; 3014} 3015 3016static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3017 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 3018{ 3019 struct rpc_auth_create_args auth_args = { 3020 .pseudoflavor = flavor, 3021 }; 3022 struct rpc_auth *auth; 3023 int ret; 3024 3025 auth = rpcauth_create(&auth_args, server->client); 3026 if (IS_ERR(auth)) { 3027 ret = -EACCES; 3028 goto out; 3029 } 3030 ret = nfs4_lookup_root(server, fhandle, info); 3031out: 3032 return ret; 3033} 3034 3035/* 3036 * Retry pseudoroot lookup with various security flavors. We do this when: 3037 * 3038 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 3039 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 3040 * 3041 * Returns zero on success, or a negative NFS4ERR value, or a 3042 * negative errno value. 3043 */ 3044static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3045 struct nfs_fsinfo *info) 3046{ 3047 /* Per 3530bis 15.33.5 */ 3048 static const rpc_authflavor_t flav_array[] = { 3049 RPC_AUTH_GSS_KRB5P, 3050 RPC_AUTH_GSS_KRB5I, 3051 RPC_AUTH_GSS_KRB5, 3052 RPC_AUTH_UNIX, /* courtesy */ 3053 RPC_AUTH_NULL, 3054 }; 3055 int status = -EPERM; 3056 size_t i; 3057 3058 if (server->auth_info.flavor_len > 0) { 3059 /* try each flavor specified by user */ 3060 for (i = 0; i < server->auth_info.flavor_len; i++) { 3061 status = nfs4_lookup_root_sec(server, fhandle, info, 3062 server->auth_info.flavors[i]); 3063 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3064 continue; 3065 break; 3066 } 3067 } else { 3068 /* no flavors specified by user, try default list */ 3069 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 3070 status = nfs4_lookup_root_sec(server, fhandle, info, 3071 flav_array[i]); 3072 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3073 continue; 3074 break; 3075 } 3076 } 3077 3078 /* 3079 * -EACCESS could mean that the user doesn't have correct permissions 3080 * to access the mount. It could also mean that we tried to mount 3081 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3082 * existing mount programs don't handle -EACCES very well so it should 3083 * be mapped to -EPERM instead. 3084 */ 3085 if (status == -EACCES) 3086 status = -EPERM; 3087 return status; 3088} 3089 3090static int nfs4_do_find_root_sec(struct nfs_server *server, 3091 struct nfs_fh *fhandle, struct nfs_fsinfo *info) 3092{ 3093 int mv = server->nfs_client->cl_minorversion; 3094 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info); 3095} 3096 3097/** 3098 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 3099 * @server: initialized nfs_server handle 3100 * @fhandle: we fill in the pseudo-fs root file handle 3101 * @info: we fill in an FSINFO struct 3102 * @auth_probe: probe the auth flavours 3103 * 3104 * Returns zero on success, or a negative errno. 3105 */ 3106int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 3107 struct nfs_fsinfo *info, 3108 bool auth_probe) 3109{ 3110 int status = 0; 3111 3112 if (!auth_probe) 3113 status = nfs4_lookup_root(server, fhandle, info); 3114 3115 if (auth_probe || status == NFS4ERR_WRONGSEC) 3116 status = nfs4_do_find_root_sec(server, fhandle, info); 3117 3118 if (status == 0) 3119 status = nfs4_server_capabilities(server, fhandle); 3120 if (status == 0) 3121 status = nfs4_do_fsinfo(server, fhandle, info); 3122 3123 return nfs4_map_errors(status); 3124} 3125 3126static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 3127 struct nfs_fsinfo *info) 3128{ 3129 int error; 3130 struct nfs_fattr *fattr = info->fattr; 3131 struct nfs4_label *label = NULL; 3132 3133 error = nfs4_server_capabilities(server, mntfh); 3134 if (error < 0) { 3135 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 3136 return error; 3137 } 3138 3139 label = nfs4_label_alloc(server, GFP_KERNEL); 3140 if (IS_ERR(label)) 3141 return PTR_ERR(label); 3142 3143 error = nfs4_proc_getattr(server, mntfh, fattr, label); 3144 if (error < 0) { 3145 dprintk("nfs4_get_root: getattr error = %d\n", -error); 3146 goto err_free_label; 3147 } 3148 3149 if (fattr->valid & NFS_ATTR_FATTR_FSID && 3150 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 3151 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 3152 3153err_free_label: 3154 nfs4_label_free(label); 3155 3156 return error; 3157} 3158 3159/* 3160 * Get locations and (maybe) other attributes of a referral. 3161 * Note that we'll actually follow the referral later when 3162 * we detect fsid mismatch in inode revalidation 3163 */ 3164static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 3165 const struct qstr *name, struct nfs_fattr *fattr, 3166 struct nfs_fh *fhandle) 3167{ 3168 int status = -ENOMEM; 3169 struct page *page = NULL; 3170 struct nfs4_fs_locations *locations = NULL; 3171 3172 page = alloc_page(GFP_KERNEL); 3173 if (page == NULL) 3174 goto out; 3175 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 3176 if (locations == NULL) 3177 goto out; 3178 3179 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 3180 if (status != 0) 3181 goto out; 3182 3183 /* 3184 * If the fsid didn't change, this is a migration event, not a 3185 * referral. Cause us to drop into the exception handler, which 3186 * will kick off migration recovery. 3187 */ 3188 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 3189 dprintk("%s: server did not return a different fsid for" 3190 " a referral at %s\n", __func__, name->name); 3191 status = -NFS4ERR_MOVED; 3192 goto out; 3193 } 3194 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 3195 nfs_fixup_referral_attributes(&locations->fattr); 3196 3197 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 3198 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 3199 memset(fhandle, 0, sizeof(struct nfs_fh)); 3200out: 3201 if (page) 3202 __free_page(page); 3203 kfree(locations); 3204 return status; 3205} 3206 3207static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3208 struct nfs_fattr *fattr, struct nfs4_label *label) 3209{ 3210 struct nfs4_getattr_arg args = { 3211 .fh = fhandle, 3212 .bitmask = server->attr_bitmask, 3213 }; 3214 struct nfs4_getattr_res res = { 3215 .fattr = fattr, 3216 .label = label, 3217 .server = server, 3218 }; 3219 struct rpc_message msg = { 3220 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 3221 .rpc_argp = &args, 3222 .rpc_resp = &res, 3223 }; 3224 3225 args.bitmask = nfs4_bitmask(server, label); 3226 3227 nfs_fattr_init(fattr); 3228 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3229} 3230 3231static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3232 struct nfs_fattr *fattr, struct nfs4_label *label) 3233{ 3234 struct nfs4_exception exception = { }; 3235 int err; 3236 do { 3237 err = _nfs4_proc_getattr(server, fhandle, fattr, label); 3238 trace_nfs4_getattr(server, fhandle, fattr, err); 3239 err = nfs4_handle_exception(server, err, 3240 &exception); 3241 } while (exception.retry); 3242 return err; 3243} 3244 3245/* 3246 * The file is not closed if it is opened due to the a request to change 3247 * the size of the file. The open call will not be needed once the 3248 * VFS layer lookup-intents are implemented. 3249 * 3250 * Close is called when the inode is destroyed. 3251 * If we haven't opened the file for O_WRONLY, we 3252 * need to in the size_change case to obtain a stateid. 3253 * 3254 * Got race? 3255 * Because OPEN is always done by name in nfsv4, it is 3256 * possible that we opened a different file by the same 3257 * name. We can recognize this race condition, but we 3258 * can't do anything about it besides returning an error. 3259 * 3260 * This will be fixed with VFS changes (lookup-intent). 3261 */ 3262static int 3263nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 3264 struct iattr *sattr) 3265{ 3266 struct inode *inode = d_inode(dentry); 3267 struct rpc_cred *cred = NULL; 3268 struct nfs4_state *state = NULL; 3269 struct nfs4_label *label = NULL; 3270 int status; 3271 3272 if (pnfs_ld_layoutret_on_setattr(inode) && 3273 sattr->ia_valid & ATTR_SIZE && 3274 sattr->ia_size < i_size_read(inode)) 3275 pnfs_commit_and_return_layout(inode); 3276 3277 nfs_fattr_init(fattr); 3278 3279 /* Deal with open(O_TRUNC) */ 3280 if (sattr->ia_valid & ATTR_OPEN) 3281 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 3282 3283 /* Optimization: if the end result is no change, don't RPC */ 3284 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 3285 return 0; 3286 3287 /* Search for an existing open(O_WRITE) file */ 3288 if (sattr->ia_valid & ATTR_FILE) { 3289 struct nfs_open_context *ctx; 3290 3291 ctx = nfs_file_open_context(sattr->ia_file); 3292 if (ctx) { 3293 cred = ctx->cred; 3294 state = ctx->state; 3295 } 3296 } 3297 3298 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 3299 if (IS_ERR(label)) 3300 return PTR_ERR(label); 3301 3302 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3303 if (status == 0) { 3304 nfs_setattr_update_inode(inode, sattr, fattr); 3305 nfs_setsecurity(inode, fattr, label); 3306 } 3307 nfs4_label_free(label); 3308 return status; 3309} 3310 3311static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 3312 const struct qstr *name, struct nfs_fh *fhandle, 3313 struct nfs_fattr *fattr, struct nfs4_label *label) 3314{ 3315 struct nfs_server *server = NFS_SERVER(dir); 3316 int status; 3317 struct nfs4_lookup_arg args = { 3318 .bitmask = server->attr_bitmask, 3319 .dir_fh = NFS_FH(dir), 3320 .name = name, 3321 }; 3322 struct nfs4_lookup_res res = { 3323 .server = server, 3324 .fattr = fattr, 3325 .label = label, 3326 .fh = fhandle, 3327 }; 3328 struct rpc_message msg = { 3329 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 3330 .rpc_argp = &args, 3331 .rpc_resp = &res, 3332 }; 3333 3334 args.bitmask = nfs4_bitmask(server, label); 3335 3336 nfs_fattr_init(fattr); 3337 3338 dprintk("NFS call lookup %s\n", name->name); 3339 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 3340 dprintk("NFS reply lookup: %d\n", status); 3341 return status; 3342} 3343 3344static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 3345{ 3346 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 3347 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 3348 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 3349 fattr->nlink = 2; 3350} 3351 3352static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 3353 struct qstr *name, struct nfs_fh *fhandle, 3354 struct nfs_fattr *fattr, struct nfs4_label *label) 3355{ 3356 struct nfs4_exception exception = { }; 3357 struct rpc_clnt *client = *clnt; 3358 int err; 3359 do { 3360 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label); 3361 trace_nfs4_lookup(dir, name, err); 3362 switch (err) { 3363 case -NFS4ERR_BADNAME: 3364 err = -ENOENT; 3365 goto out; 3366 case -NFS4ERR_MOVED: 3367 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 3368 goto out; 3369 case -NFS4ERR_WRONGSEC: 3370 err = -EPERM; 3371 if (client != *clnt) 3372 goto out; 3373 client = nfs4_negotiate_security(client, dir, name); 3374 if (IS_ERR(client)) 3375 return PTR_ERR(client); 3376 3377 exception.retry = 1; 3378 break; 3379 default: 3380 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3381 } 3382 } while (exception.retry); 3383 3384out: 3385 if (err == 0) 3386 *clnt = client; 3387 else if (client != *clnt) 3388 rpc_shutdown_client(client); 3389 3390 return err; 3391} 3392 3393static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 3394 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 3395 struct nfs4_label *label) 3396{ 3397 int status; 3398 struct rpc_clnt *client = NFS_CLIENT(dir); 3399 3400 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label); 3401 if (client != NFS_CLIENT(dir)) { 3402 rpc_shutdown_client(client); 3403 nfs_fixup_secinfo_attributes(fattr); 3404 } 3405 return status; 3406} 3407 3408struct rpc_clnt * 3409nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 3410 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3411{ 3412 struct rpc_clnt *client = NFS_CLIENT(dir); 3413 int status; 3414 3415 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3416 if (status < 0) 3417 return ERR_PTR(status); 3418 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 3419} 3420 3421static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3422{ 3423 struct nfs_server *server = NFS_SERVER(inode); 3424 struct nfs4_accessargs args = { 3425 .fh = NFS_FH(inode), 3426 .bitmask = server->cache_consistency_bitmask, 3427 }; 3428 struct nfs4_accessres res = { 3429 .server = server, 3430 }; 3431 struct rpc_message msg = { 3432 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 3433 .rpc_argp = &args, 3434 .rpc_resp = &res, 3435 .rpc_cred = entry->cred, 3436 }; 3437 int mode = entry->mask; 3438 int status = 0; 3439 3440 /* 3441 * Determine which access bits we want to ask for... 3442 */ 3443 if (mode & MAY_READ) 3444 args.access |= NFS4_ACCESS_READ; 3445 if (S_ISDIR(inode->i_mode)) { 3446 if (mode & MAY_WRITE) 3447 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 3448 if (mode & MAY_EXEC) 3449 args.access |= NFS4_ACCESS_LOOKUP; 3450 } else { 3451 if (mode & MAY_WRITE) 3452 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 3453 if (mode & MAY_EXEC) 3454 args.access |= NFS4_ACCESS_EXECUTE; 3455 } 3456 3457 res.fattr = nfs_alloc_fattr(); 3458 if (res.fattr == NULL) 3459 return -ENOMEM; 3460 3461 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3462 if (!status) { 3463 nfs_access_set_mask(entry, res.access); 3464 nfs_refresh_inode(inode, res.fattr); 3465 } 3466 nfs_free_fattr(res.fattr); 3467 return status; 3468} 3469 3470static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3471{ 3472 struct nfs4_exception exception = { }; 3473 int err; 3474 do { 3475 err = _nfs4_proc_access(inode, entry); 3476 trace_nfs4_access(inode, err); 3477 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3478 &exception); 3479 } while (exception.retry); 3480 return err; 3481} 3482 3483/* 3484 * TODO: For the time being, we don't try to get any attributes 3485 * along with any of the zero-copy operations READ, READDIR, 3486 * READLINK, WRITE. 3487 * 3488 * In the case of the first three, we want to put the GETATTR 3489 * after the read-type operation -- this is because it is hard 3490 * to predict the length of a GETATTR response in v4, and thus 3491 * align the READ data correctly. This means that the GETATTR 3492 * may end up partially falling into the page cache, and we should 3493 * shift it into the 'tail' of the xdr_buf before processing. 3494 * To do this efficiently, we need to know the total length 3495 * of data received, which doesn't seem to be available outside 3496 * of the RPC layer. 3497 * 3498 * In the case of WRITE, we also want to put the GETATTR after 3499 * the operation -- in this case because we want to make sure 3500 * we get the post-operation mtime and size. 3501 * 3502 * Both of these changes to the XDR layer would in fact be quite 3503 * minor, but I decided to leave them for a subsequent patch. 3504 */ 3505static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 3506 unsigned int pgbase, unsigned int pglen) 3507{ 3508 struct nfs4_readlink args = { 3509 .fh = NFS_FH(inode), 3510 .pgbase = pgbase, 3511 .pglen = pglen, 3512 .pages = &page, 3513 }; 3514 struct nfs4_readlink_res res; 3515 struct rpc_message msg = { 3516 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3517 .rpc_argp = &args, 3518 .rpc_resp = &res, 3519 }; 3520 3521 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3522} 3523 3524static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3525 unsigned int pgbase, unsigned int pglen) 3526{ 3527 struct nfs4_exception exception = { }; 3528 int err; 3529 do { 3530 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 3531 trace_nfs4_readlink(inode, err); 3532 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3533 &exception); 3534 } while (exception.retry); 3535 return err; 3536} 3537 3538/* 3539 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3540 */ 3541static int 3542nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3543 int flags) 3544{ 3545 struct nfs4_label l, *ilabel = NULL; 3546 struct nfs_open_context *ctx; 3547 struct nfs4_state *state; 3548 int opened = 0; 3549 int status = 0; 3550 3551 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3552 if (IS_ERR(ctx)) 3553 return PTR_ERR(ctx); 3554 3555 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3556 3557 sattr->ia_mode &= ~current_umask(); 3558 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened); 3559 if (IS_ERR(state)) { 3560 status = PTR_ERR(state); 3561 goto out; 3562 } 3563out: 3564 nfs4_label_release_security(ilabel); 3565 put_nfs_open_context(ctx); 3566 return status; 3567} 3568 3569static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 3570{ 3571 struct nfs_server *server = NFS_SERVER(dir); 3572 struct nfs_removeargs args = { 3573 .fh = NFS_FH(dir), 3574 .name = *name, 3575 }; 3576 struct nfs_removeres res = { 3577 .server = server, 3578 }; 3579 struct rpc_message msg = { 3580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3581 .rpc_argp = &args, 3582 .rpc_resp = &res, 3583 }; 3584 int status; 3585 3586 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3587 if (status == 0) 3588 update_changeattr(dir, &res.cinfo); 3589 return status; 3590} 3591 3592static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3593{ 3594 struct nfs4_exception exception = { }; 3595 int err; 3596 do { 3597 err = _nfs4_proc_remove(dir, name); 3598 trace_nfs4_remove(dir, name, err); 3599 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3600 &exception); 3601 } while (exception.retry); 3602 return err; 3603} 3604 3605static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3606{ 3607 struct nfs_server *server = NFS_SERVER(dir); 3608 struct nfs_removeargs *args = msg->rpc_argp; 3609 struct nfs_removeres *res = msg->rpc_resp; 3610 3611 res->server = server; 3612 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3613 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1); 3614 3615 nfs_fattr_init(res->dir_attr); 3616} 3617 3618static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3619{ 3620 nfs4_setup_sequence(NFS_SERVER(data->dir), 3621 &data->args.seq_args, 3622 &data->res.seq_res, 3623 task); 3624} 3625 3626static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3627{ 3628 struct nfs_unlinkdata *data = task->tk_calldata; 3629 struct nfs_removeres *res = &data->res; 3630 3631 if (!nfs4_sequence_done(task, &res->seq_res)) 3632 return 0; 3633 if (nfs4_async_handle_error(task, res->server, NULL, 3634 &data->timeout) == -EAGAIN) 3635 return 0; 3636 update_changeattr(dir, &res->cinfo); 3637 return 1; 3638} 3639 3640static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3641{ 3642 struct nfs_server *server = NFS_SERVER(dir); 3643 struct nfs_renameargs *arg = msg->rpc_argp; 3644 struct nfs_renameres *res = msg->rpc_resp; 3645 3646 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3647 res->server = server; 3648 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1); 3649} 3650 3651static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3652{ 3653 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3654 &data->args.seq_args, 3655 &data->res.seq_res, 3656 task); 3657} 3658 3659static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3660 struct inode *new_dir) 3661{ 3662 struct nfs_renamedata *data = task->tk_calldata; 3663 struct nfs_renameres *res = &data->res; 3664 3665 if (!nfs4_sequence_done(task, &res->seq_res)) 3666 return 0; 3667 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3668 return 0; 3669 3670 update_changeattr(old_dir, &res->old_cinfo); 3671 update_changeattr(new_dir, &res->new_cinfo); 3672 return 1; 3673} 3674 3675static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3676{ 3677 struct nfs_server *server = NFS_SERVER(inode); 3678 struct nfs4_link_arg arg = { 3679 .fh = NFS_FH(inode), 3680 .dir_fh = NFS_FH(dir), 3681 .name = name, 3682 .bitmask = server->attr_bitmask, 3683 }; 3684 struct nfs4_link_res res = { 3685 .server = server, 3686 .label = NULL, 3687 }; 3688 struct rpc_message msg = { 3689 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3690 .rpc_argp = &arg, 3691 .rpc_resp = &res, 3692 }; 3693 int status = -ENOMEM; 3694 3695 res.fattr = nfs_alloc_fattr(); 3696 if (res.fattr == NULL) 3697 goto out; 3698 3699 res.label = nfs4_label_alloc(server, GFP_KERNEL); 3700 if (IS_ERR(res.label)) { 3701 status = PTR_ERR(res.label); 3702 goto out; 3703 } 3704 arg.bitmask = nfs4_bitmask(server, res.label); 3705 3706 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3707 if (!status) { 3708 update_changeattr(dir, &res.cinfo); 3709 status = nfs_post_op_update_inode(inode, res.fattr); 3710 if (!status) 3711 nfs_setsecurity(inode, res.fattr, res.label); 3712 } 3713 3714 3715 nfs4_label_free(res.label); 3716 3717out: 3718 nfs_free_fattr(res.fattr); 3719 return status; 3720} 3721 3722static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3723{ 3724 struct nfs4_exception exception = { }; 3725 int err; 3726 do { 3727 err = nfs4_handle_exception(NFS_SERVER(inode), 3728 _nfs4_proc_link(inode, dir, name), 3729 &exception); 3730 } while (exception.retry); 3731 return err; 3732} 3733 3734struct nfs4_createdata { 3735 struct rpc_message msg; 3736 struct nfs4_create_arg arg; 3737 struct nfs4_create_res res; 3738 struct nfs_fh fh; 3739 struct nfs_fattr fattr; 3740 struct nfs4_label *label; 3741}; 3742 3743static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3744 struct qstr *name, struct iattr *sattr, u32 ftype) 3745{ 3746 struct nfs4_createdata *data; 3747 3748 data = kzalloc(sizeof(*data), GFP_KERNEL); 3749 if (data != NULL) { 3750 struct nfs_server *server = NFS_SERVER(dir); 3751 3752 data->label = nfs4_label_alloc(server, GFP_KERNEL); 3753 if (IS_ERR(data->label)) 3754 goto out_free; 3755 3756 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3757 data->msg.rpc_argp = &data->arg; 3758 data->msg.rpc_resp = &data->res; 3759 data->arg.dir_fh = NFS_FH(dir); 3760 data->arg.server = server; 3761 data->arg.name = name; 3762 data->arg.attrs = sattr; 3763 data->arg.ftype = ftype; 3764 data->arg.bitmask = nfs4_bitmask(server, data->label); 3765 data->res.server = server; 3766 data->res.fh = &data->fh; 3767 data->res.fattr = &data->fattr; 3768 data->res.label = data->label; 3769 nfs_fattr_init(data->res.fattr); 3770 } 3771 return data; 3772out_free: 3773 kfree(data); 3774 return NULL; 3775} 3776 3777static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3778{ 3779 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3780 &data->arg.seq_args, &data->res.seq_res, 1); 3781 if (status == 0) { 3782 update_changeattr(dir, &data->res.dir_cinfo); 3783 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 3784 } 3785 return status; 3786} 3787 3788static void nfs4_free_createdata(struct nfs4_createdata *data) 3789{ 3790 nfs4_label_free(data->label); 3791 kfree(data); 3792} 3793 3794static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3795 struct page *page, unsigned int len, struct iattr *sattr, 3796 struct nfs4_label *label) 3797{ 3798 struct nfs4_createdata *data; 3799 int status = -ENAMETOOLONG; 3800 3801 if (len > NFS4_MAXPATHLEN) 3802 goto out; 3803 3804 status = -ENOMEM; 3805 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3806 if (data == NULL) 3807 goto out; 3808 3809 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3810 data->arg.u.symlink.pages = &page; 3811 data->arg.u.symlink.len = len; 3812 data->arg.label = label; 3813 3814 status = nfs4_do_create(dir, dentry, data); 3815 3816 nfs4_free_createdata(data); 3817out: 3818 return status; 3819} 3820 3821static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3822 struct page *page, unsigned int len, struct iattr *sattr) 3823{ 3824 struct nfs4_exception exception = { }; 3825 struct nfs4_label l, *label = NULL; 3826 int err; 3827 3828 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3829 3830 do { 3831 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 3832 trace_nfs4_symlink(dir, &dentry->d_name, err); 3833 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3834 &exception); 3835 } while (exception.retry); 3836 3837 nfs4_label_release_security(label); 3838 return err; 3839} 3840 3841static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3842 struct iattr *sattr, struct nfs4_label *label) 3843{ 3844 struct nfs4_createdata *data; 3845 int status = -ENOMEM; 3846 3847 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3848 if (data == NULL) 3849 goto out; 3850 3851 data->arg.label = label; 3852 status = nfs4_do_create(dir, dentry, data); 3853 3854 nfs4_free_createdata(data); 3855out: 3856 return status; 3857} 3858 3859static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3860 struct iattr *sattr) 3861{ 3862 struct nfs4_exception exception = { }; 3863 struct nfs4_label l, *label = NULL; 3864 int err; 3865 3866 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3867 3868 sattr->ia_mode &= ~current_umask(); 3869 do { 3870 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 3871 trace_nfs4_mkdir(dir, &dentry->d_name, err); 3872 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3873 &exception); 3874 } while (exception.retry); 3875 nfs4_label_release_security(label); 3876 3877 return err; 3878} 3879 3880static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3881 u64 cookie, struct page **pages, unsigned int count, int plus) 3882{ 3883 struct inode *dir = d_inode(dentry); 3884 struct nfs4_readdir_arg args = { 3885 .fh = NFS_FH(dir), 3886 .pages = pages, 3887 .pgbase = 0, 3888 .count = count, 3889 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, 3890 .plus = plus, 3891 }; 3892 struct nfs4_readdir_res res; 3893 struct rpc_message msg = { 3894 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3895 .rpc_argp = &args, 3896 .rpc_resp = &res, 3897 .rpc_cred = cred, 3898 }; 3899 int status; 3900 3901 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, 3902 dentry, 3903 (unsigned long long)cookie); 3904 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 3905 res.pgbase = args.pgbase; 3906 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3907 if (status >= 0) { 3908 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 3909 status += args.pgbase; 3910 } 3911 3912 nfs_invalidate_atime(dir); 3913 3914 dprintk("%s: returns %d\n", __func__, status); 3915 return status; 3916} 3917 3918static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3919 u64 cookie, struct page **pages, unsigned int count, int plus) 3920{ 3921 struct nfs4_exception exception = { }; 3922 int err; 3923 do { 3924 err = _nfs4_proc_readdir(dentry, cred, cookie, 3925 pages, count, plus); 3926 trace_nfs4_readdir(d_inode(dentry), err); 3927 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err, 3928 &exception); 3929 } while (exception.retry); 3930 return err; 3931} 3932 3933static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3934 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 3935{ 3936 struct nfs4_createdata *data; 3937 int mode = sattr->ia_mode; 3938 int status = -ENOMEM; 3939 3940 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3941 if (data == NULL) 3942 goto out; 3943 3944 if (S_ISFIFO(mode)) 3945 data->arg.ftype = NF4FIFO; 3946 else if (S_ISBLK(mode)) { 3947 data->arg.ftype = NF4BLK; 3948 data->arg.u.device.specdata1 = MAJOR(rdev); 3949 data->arg.u.device.specdata2 = MINOR(rdev); 3950 } 3951 else if (S_ISCHR(mode)) { 3952 data->arg.ftype = NF4CHR; 3953 data->arg.u.device.specdata1 = MAJOR(rdev); 3954 data->arg.u.device.specdata2 = MINOR(rdev); 3955 } else if (!S_ISSOCK(mode)) { 3956 status = -EINVAL; 3957 goto out_free; 3958 } 3959 3960 data->arg.label = label; 3961 status = nfs4_do_create(dir, dentry, data); 3962out_free: 3963 nfs4_free_createdata(data); 3964out: 3965 return status; 3966} 3967 3968static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3969 struct iattr *sattr, dev_t rdev) 3970{ 3971 struct nfs4_exception exception = { }; 3972 struct nfs4_label l, *label = NULL; 3973 int err; 3974 3975 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3976 3977 sattr->ia_mode &= ~current_umask(); 3978 do { 3979 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 3980 trace_nfs4_mknod(dir, &dentry->d_name, err); 3981 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3982 &exception); 3983 } while (exception.retry); 3984 3985 nfs4_label_release_security(label); 3986 3987 return err; 3988} 3989 3990static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 3991 struct nfs_fsstat *fsstat) 3992{ 3993 struct nfs4_statfs_arg args = { 3994 .fh = fhandle, 3995 .bitmask = server->attr_bitmask, 3996 }; 3997 struct nfs4_statfs_res res = { 3998 .fsstat = fsstat, 3999 }; 4000 struct rpc_message msg = { 4001 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 4002 .rpc_argp = &args, 4003 .rpc_resp = &res, 4004 }; 4005 4006 nfs_fattr_init(fsstat->fattr); 4007 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4008} 4009 4010static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 4011{ 4012 struct nfs4_exception exception = { }; 4013 int err; 4014 do { 4015 err = nfs4_handle_exception(server, 4016 _nfs4_proc_statfs(server, fhandle, fsstat), 4017 &exception); 4018 } while (exception.retry); 4019 return err; 4020} 4021 4022static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 4023 struct nfs_fsinfo *fsinfo) 4024{ 4025 struct nfs4_fsinfo_arg args = { 4026 .fh = fhandle, 4027 .bitmask = server->attr_bitmask, 4028 }; 4029 struct nfs4_fsinfo_res res = { 4030 .fsinfo = fsinfo, 4031 }; 4032 struct rpc_message msg = { 4033 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 4034 .rpc_argp = &args, 4035 .rpc_resp = &res, 4036 }; 4037 4038 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4039} 4040 4041static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4042{ 4043 struct nfs4_exception exception = { }; 4044 unsigned long now = jiffies; 4045 int err; 4046 4047 do { 4048 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4049 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4050 if (err == 0) { 4051 struct nfs_client *clp = server->nfs_client; 4052 4053 spin_lock(&clp->cl_lock); 4054 clp->cl_lease_time = fsinfo->lease_time * HZ; 4055 clp->cl_last_renewal = now; 4056 spin_unlock(&clp->cl_lock); 4057 break; 4058 } 4059 err = nfs4_handle_exception(server, err, &exception); 4060 } while (exception.retry); 4061 return err; 4062} 4063 4064static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4065{ 4066 int error; 4067 4068 nfs_fattr_init(fsinfo->fattr); 4069 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 4070 if (error == 0) { 4071 /* block layout checks this! */ 4072 server->pnfs_blksize = fsinfo->blksize; 4073 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 4074 } 4075 4076 return error; 4077} 4078 4079static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4080 struct nfs_pathconf *pathconf) 4081{ 4082 struct nfs4_pathconf_arg args = { 4083 .fh = fhandle, 4084 .bitmask = server->attr_bitmask, 4085 }; 4086 struct nfs4_pathconf_res res = { 4087 .pathconf = pathconf, 4088 }; 4089 struct rpc_message msg = { 4090 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 4091 .rpc_argp = &args, 4092 .rpc_resp = &res, 4093 }; 4094 4095 /* None of the pathconf attributes are mandatory to implement */ 4096 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 4097 memset(pathconf, 0, sizeof(*pathconf)); 4098 return 0; 4099 } 4100 4101 nfs_fattr_init(pathconf->fattr); 4102 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4103} 4104 4105static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4106 struct nfs_pathconf *pathconf) 4107{ 4108 struct nfs4_exception exception = { }; 4109 int err; 4110 4111 do { 4112 err = nfs4_handle_exception(server, 4113 _nfs4_proc_pathconf(server, fhandle, pathconf), 4114 &exception); 4115 } while (exception.retry); 4116 return err; 4117} 4118 4119int nfs4_set_rw_stateid(nfs4_stateid *stateid, 4120 const struct nfs_open_context *ctx, 4121 const struct nfs_lock_context *l_ctx, 4122 fmode_t fmode) 4123{ 4124 const struct nfs_lockowner *lockowner = NULL; 4125 4126 if (l_ctx != NULL) 4127 lockowner = &l_ctx->lockowner; 4128 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner); 4129} 4130EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 4131 4132static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 4133 const struct nfs_open_context *ctx, 4134 const struct nfs_lock_context *l_ctx, 4135 fmode_t fmode) 4136{ 4137 nfs4_stateid current_stateid; 4138 4139 /* If the current stateid represents a lost lock, then exit */ 4140 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) 4141 return true; 4142 return nfs4_stateid_match(stateid, ¤t_stateid); 4143} 4144 4145static bool nfs4_error_stateid_expired(int err) 4146{ 4147 switch (err) { 4148 case -NFS4ERR_DELEG_REVOKED: 4149 case -NFS4ERR_ADMIN_REVOKED: 4150 case -NFS4ERR_BAD_STATEID: 4151 case -NFS4ERR_STALE_STATEID: 4152 case -NFS4ERR_OLD_STATEID: 4153 case -NFS4ERR_OPENMODE: 4154 case -NFS4ERR_EXPIRED: 4155 return true; 4156 } 4157 return false; 4158} 4159 4160void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4161{ 4162 nfs_invalidate_atime(hdr->inode); 4163} 4164 4165static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4166{ 4167 struct nfs_server *server = NFS_SERVER(hdr->inode); 4168 4169 trace_nfs4_read(hdr, task->tk_status); 4170 if (nfs4_async_handle_error(task, server, 4171 hdr->args.context->state, 4172 NULL) == -EAGAIN) { 4173 rpc_restart_call_prepare(task); 4174 return -EAGAIN; 4175 } 4176 4177 __nfs4_read_done_cb(hdr); 4178 if (task->tk_status > 0) 4179 renew_lease(server, hdr->timestamp); 4180 return 0; 4181} 4182 4183static bool nfs4_read_stateid_changed(struct rpc_task *task, 4184 struct nfs_pgio_args *args) 4185{ 4186 4187 if (!nfs4_error_stateid_expired(task->tk_status) || 4188 nfs4_stateid_is_current(&args->stateid, 4189 args->context, 4190 args->lock_context, 4191 FMODE_READ)) 4192 return false; 4193 rpc_restart_call_prepare(task); 4194 return true; 4195} 4196 4197static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4198{ 4199 4200 dprintk("--> %s\n", __func__); 4201 4202 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4203 return -EAGAIN; 4204 if (nfs4_read_stateid_changed(task, &hdr->args)) 4205 return -EAGAIN; 4206 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4207 nfs4_read_done_cb(task, hdr); 4208} 4209 4210static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 4211 struct rpc_message *msg) 4212{ 4213 hdr->timestamp = jiffies; 4214 hdr->pgio_done_cb = nfs4_read_done_cb; 4215 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 4216 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); 4217} 4218 4219static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 4220 struct nfs_pgio_header *hdr) 4221{ 4222 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), 4223 &hdr->args.seq_args, 4224 &hdr->res.seq_res, 4225 task)) 4226 return 0; 4227 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 4228 hdr->args.lock_context, 4229 hdr->rw_ops->rw_mode) == -EIO) 4230 return -EIO; 4231 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 4232 return -EIO; 4233 return 0; 4234} 4235 4236static int nfs4_write_done_cb(struct rpc_task *task, 4237 struct nfs_pgio_header *hdr) 4238{ 4239 struct inode *inode = hdr->inode; 4240 4241 trace_nfs4_write(hdr, task->tk_status); 4242 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4243 hdr->args.context->state, 4244 NULL) == -EAGAIN) { 4245 rpc_restart_call_prepare(task); 4246 return -EAGAIN; 4247 } 4248 if (task->tk_status >= 0) { 4249 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4250 nfs_writeback_update_inode(hdr); 4251 } 4252 return 0; 4253} 4254 4255static bool nfs4_write_stateid_changed(struct rpc_task *task, 4256 struct nfs_pgio_args *args) 4257{ 4258 4259 if (!nfs4_error_stateid_expired(task->tk_status) || 4260 nfs4_stateid_is_current(&args->stateid, 4261 args->context, 4262 args->lock_context, 4263 FMODE_WRITE)) 4264 return false; 4265 rpc_restart_call_prepare(task); 4266 return true; 4267} 4268 4269static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4270{ 4271 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4272 return -EAGAIN; 4273 if (nfs4_write_stateid_changed(task, &hdr->args)) 4274 return -EAGAIN; 4275 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4276 nfs4_write_done_cb(task, hdr); 4277} 4278 4279static 4280bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 4281{ 4282 /* Don't request attributes for pNFS or O_DIRECT writes */ 4283 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 4284 return false; 4285 /* Otherwise, request attributes if and only if we don't hold 4286 * a delegation 4287 */ 4288 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 4289} 4290 4291static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 4292 struct rpc_message *msg) 4293{ 4294 struct nfs_server *server = NFS_SERVER(hdr->inode); 4295 4296 if (!nfs4_write_need_cache_consistency_data(hdr)) { 4297 hdr->args.bitmask = NULL; 4298 hdr->res.fattr = NULL; 4299 } else 4300 hdr->args.bitmask = server->cache_consistency_bitmask; 4301 4302 if (!hdr->pgio_done_cb) 4303 hdr->pgio_done_cb = nfs4_write_done_cb; 4304 hdr->res.server = server; 4305 hdr->timestamp = jiffies; 4306 4307 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 4308 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); 4309} 4310 4311static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 4312{ 4313 nfs4_setup_sequence(NFS_SERVER(data->inode), 4314 &data->args.seq_args, 4315 &data->res.seq_res, 4316 task); 4317} 4318 4319static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 4320{ 4321 struct inode *inode = data->inode; 4322 4323 trace_nfs4_commit(data, task->tk_status); 4324 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4325 NULL, NULL) == -EAGAIN) { 4326 rpc_restart_call_prepare(task); 4327 return -EAGAIN; 4328 } 4329 return 0; 4330} 4331 4332static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 4333{ 4334 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4335 return -EAGAIN; 4336 return data->commit_done_cb(task, data); 4337} 4338 4339static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 4340{ 4341 struct nfs_server *server = NFS_SERVER(data->inode); 4342 4343 if (data->commit_done_cb == NULL) 4344 data->commit_done_cb = nfs4_commit_done_cb; 4345 data->res.server = server; 4346 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 4347 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4348} 4349 4350struct nfs4_renewdata { 4351 struct nfs_client *client; 4352 unsigned long timestamp; 4353}; 4354 4355/* 4356 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 4357 * standalone procedure for queueing an asynchronous RENEW. 4358 */ 4359static void nfs4_renew_release(void *calldata) 4360{ 4361 struct nfs4_renewdata *data = calldata; 4362 struct nfs_client *clp = data->client; 4363 4364 if (atomic_read(&clp->cl_count) > 1) 4365 nfs4_schedule_state_renewal(clp); 4366 nfs_put_client(clp); 4367 kfree(data); 4368} 4369 4370static void nfs4_renew_done(struct rpc_task *task, void *calldata) 4371{ 4372 struct nfs4_renewdata *data = calldata; 4373 struct nfs_client *clp = data->client; 4374 unsigned long timestamp = data->timestamp; 4375 4376 trace_nfs4_renew_async(clp, task->tk_status); 4377 switch (task->tk_status) { 4378 case 0: 4379 break; 4380 case -NFS4ERR_LEASE_MOVED: 4381 nfs4_schedule_lease_moved_recovery(clp); 4382 break; 4383 default: 4384 /* Unless we're shutting down, schedule state recovery! */ 4385 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 4386 return; 4387 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 4388 nfs4_schedule_lease_recovery(clp); 4389 return; 4390 } 4391 nfs4_schedule_path_down_recovery(clp); 4392 } 4393 do_renew_lease(clp, timestamp); 4394} 4395 4396static const struct rpc_call_ops nfs4_renew_ops = { 4397 .rpc_call_done = nfs4_renew_done, 4398 .rpc_release = nfs4_renew_release, 4399}; 4400 4401static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 4402{ 4403 struct rpc_message msg = { 4404 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4405 .rpc_argp = clp, 4406 .rpc_cred = cred, 4407 }; 4408 struct nfs4_renewdata *data; 4409 4410 if (renew_flags == 0) 4411 return 0; 4412 if (!atomic_inc_not_zero(&clp->cl_count)) 4413 return -EIO; 4414 data = kmalloc(sizeof(*data), GFP_NOFS); 4415 if (data == NULL) 4416 return -ENOMEM; 4417 data->client = clp; 4418 data->timestamp = jiffies; 4419 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 4420 &nfs4_renew_ops, data); 4421} 4422 4423static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 4424{ 4425 struct rpc_message msg = { 4426 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4427 .rpc_argp = clp, 4428 .rpc_cred = cred, 4429 }; 4430 unsigned long now = jiffies; 4431 int status; 4432 4433 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4434 if (status < 0) 4435 return status; 4436 do_renew_lease(clp, now); 4437 return 0; 4438} 4439 4440static inline int nfs4_server_supports_acls(struct nfs_server *server) 4441{ 4442 return server->caps & NFS_CAP_ACLS; 4443} 4444 4445/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 4446 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 4447 * the stack. 4448 */ 4449#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 4450 4451static int buf_to_pages_noslab(const void *buf, size_t buflen, 4452 struct page **pages, unsigned int *pgbase) 4453{ 4454 struct page *newpage, **spages; 4455 int rc = 0; 4456 size_t len; 4457 spages = pages; 4458 4459 do { 4460 len = min_t(size_t, PAGE_SIZE, buflen); 4461 newpage = alloc_page(GFP_KERNEL); 4462 4463 if (newpage == NULL) 4464 goto unwind; 4465 memcpy(page_address(newpage), buf, len); 4466 buf += len; 4467 buflen -= len; 4468 *pages++ = newpage; 4469 rc++; 4470 } while (buflen != 0); 4471 4472 return rc; 4473 4474unwind: 4475 for(; rc > 0; rc--) 4476 __free_page(spages[rc-1]); 4477 return -ENOMEM; 4478} 4479 4480struct nfs4_cached_acl { 4481 int cached; 4482 size_t len; 4483 char data[0]; 4484}; 4485 4486static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 4487{ 4488 struct nfs_inode *nfsi = NFS_I(inode); 4489 4490 spin_lock(&inode->i_lock); 4491 kfree(nfsi->nfs4_acl); 4492 nfsi->nfs4_acl = acl; 4493 spin_unlock(&inode->i_lock); 4494} 4495 4496static void nfs4_zap_acl_attr(struct inode *inode) 4497{ 4498 nfs4_set_cached_acl(inode, NULL); 4499} 4500 4501static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 4502{ 4503 struct nfs_inode *nfsi = NFS_I(inode); 4504 struct nfs4_cached_acl *acl; 4505 int ret = -ENOENT; 4506 4507 spin_lock(&inode->i_lock); 4508 acl = nfsi->nfs4_acl; 4509 if (acl == NULL) 4510 goto out; 4511 if (buf == NULL) /* user is just asking for length */ 4512 goto out_len; 4513 if (acl->cached == 0) 4514 goto out; 4515 ret = -ERANGE; /* see getxattr(2) man page */ 4516 if (acl->len > buflen) 4517 goto out; 4518 memcpy(buf, acl->data, acl->len); 4519out_len: 4520 ret = acl->len; 4521out: 4522 spin_unlock(&inode->i_lock); 4523 return ret; 4524} 4525 4526static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 4527{ 4528 struct nfs4_cached_acl *acl; 4529 size_t buflen = sizeof(*acl) + acl_len; 4530 4531 if (buflen <= PAGE_SIZE) { 4532 acl = kmalloc(buflen, GFP_KERNEL); 4533 if (acl == NULL) 4534 goto out; 4535 acl->cached = 1; 4536 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4537 } else { 4538 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4539 if (acl == NULL) 4540 goto out; 4541 acl->cached = 0; 4542 } 4543 acl->len = acl_len; 4544out: 4545 nfs4_set_cached_acl(inode, acl); 4546} 4547 4548/* 4549 * The getxattr API returns the required buffer length when called with a 4550 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4551 * the required buf. On a NULL buf, we send a page of data to the server 4552 * guessing that the ACL request can be serviced by a page. If so, we cache 4553 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4554 * the cache. If not so, we throw away the page, and cache the required 4555 * length. The next getxattr call will then produce another round trip to 4556 * the server, this time with the input buf of the required size. 4557 */ 4558static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4559{ 4560 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4561 struct nfs_getaclargs args = { 4562 .fh = NFS_FH(inode), 4563 .acl_pages = pages, 4564 .acl_len = buflen, 4565 }; 4566 struct nfs_getaclres res = { 4567 .acl_len = buflen, 4568 }; 4569 struct rpc_message msg = { 4570 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4571 .rpc_argp = &args, 4572 .rpc_resp = &res, 4573 }; 4574 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4575 int ret = -ENOMEM, i; 4576 4577 /* As long as we're doing a round trip to the server anyway, 4578 * let's be prepared for a page of acl data. */ 4579 if (npages == 0) 4580 npages = 1; 4581 if (npages > ARRAY_SIZE(pages)) 4582 return -ERANGE; 4583 4584 for (i = 0; i < npages; i++) { 4585 pages[i] = alloc_page(GFP_KERNEL); 4586 if (!pages[i]) 4587 goto out_free; 4588 } 4589 4590 /* for decoding across pages */ 4591 res.acl_scratch = alloc_page(GFP_KERNEL); 4592 if (!res.acl_scratch) 4593 goto out_free; 4594 4595 args.acl_len = npages * PAGE_SIZE; 4596 args.acl_pgbase = 0; 4597 4598 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4599 __func__, buf, buflen, npages, args.acl_len); 4600 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4601 &msg, &args.seq_args, &res.seq_res, 0); 4602 if (ret) 4603 goto out_free; 4604 4605 /* Handle the case where the passed-in buffer is too short */ 4606 if (res.acl_flags & NFS4_ACL_TRUNC) { 4607 /* Did the user only issue a request for the acl length? */ 4608 if (buf == NULL) 4609 goto out_ok; 4610 ret = -ERANGE; 4611 goto out_free; 4612 } 4613 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4614 if (buf) { 4615 if (res.acl_len > buflen) { 4616 ret = -ERANGE; 4617 goto out_free; 4618 } 4619 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4620 } 4621out_ok: 4622 ret = res.acl_len; 4623out_free: 4624 for (i = 0; i < npages; i++) 4625 if (pages[i]) 4626 __free_page(pages[i]); 4627 if (res.acl_scratch) 4628 __free_page(res.acl_scratch); 4629 return ret; 4630} 4631 4632static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4633{ 4634 struct nfs4_exception exception = { }; 4635 ssize_t ret; 4636 do { 4637 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4638 trace_nfs4_get_acl(inode, ret); 4639 if (ret >= 0) 4640 break; 4641 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4642 } while (exception.retry); 4643 return ret; 4644} 4645 4646static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4647{ 4648 struct nfs_server *server = NFS_SERVER(inode); 4649 int ret; 4650 4651 if (!nfs4_server_supports_acls(server)) 4652 return -EOPNOTSUPP; 4653 ret = nfs_revalidate_inode(server, inode); 4654 if (ret < 0) 4655 return ret; 4656 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4657 nfs_zap_acl_cache(inode); 4658 ret = nfs4_read_cached_acl(inode, buf, buflen); 4659 if (ret != -ENOENT) 4660 /* -ENOENT is returned if there is no ACL or if there is an ACL 4661 * but no cached acl data, just the acl length */ 4662 return ret; 4663 return nfs4_get_acl_uncached(inode, buf, buflen); 4664} 4665 4666static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4667{ 4668 struct nfs_server *server = NFS_SERVER(inode); 4669 struct page *pages[NFS4ACL_MAXPAGES]; 4670 struct nfs_setaclargs arg = { 4671 .fh = NFS_FH(inode), 4672 .acl_pages = pages, 4673 .acl_len = buflen, 4674 }; 4675 struct nfs_setaclres res; 4676 struct rpc_message msg = { 4677 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4678 .rpc_argp = &arg, 4679 .rpc_resp = &res, 4680 }; 4681 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4682 int ret, i; 4683 4684 if (!nfs4_server_supports_acls(server)) 4685 return -EOPNOTSUPP; 4686 if (npages > ARRAY_SIZE(pages)) 4687 return -ERANGE; 4688 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 4689 if (i < 0) 4690 return i; 4691 nfs4_inode_return_delegation(inode); 4692 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4693 4694 /* 4695 * Free each page after tx, so the only ref left is 4696 * held by the network stack 4697 */ 4698 for (; i > 0; i--) 4699 put_page(pages[i-1]); 4700 4701 /* 4702 * Acl update can result in inode attribute update. 4703 * so mark the attribute cache invalid. 4704 */ 4705 spin_lock(&inode->i_lock); 4706 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4707 spin_unlock(&inode->i_lock); 4708 nfs_access_zap_cache(inode); 4709 nfs_zap_acl_cache(inode); 4710 return ret; 4711} 4712 4713static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4714{ 4715 struct nfs4_exception exception = { }; 4716 int err; 4717 do { 4718 err = __nfs4_proc_set_acl(inode, buf, buflen); 4719 trace_nfs4_set_acl(inode, err); 4720 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4721 &exception); 4722 } while (exception.retry); 4723 return err; 4724} 4725 4726#ifdef CONFIG_NFS_V4_SECURITY_LABEL 4727static int _nfs4_get_security_label(struct inode *inode, void *buf, 4728 size_t buflen) 4729{ 4730 struct nfs_server *server = NFS_SERVER(inode); 4731 struct nfs_fattr fattr; 4732 struct nfs4_label label = {0, 0, buflen, buf}; 4733 4734 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4735 struct nfs4_getattr_arg arg = { 4736 .fh = NFS_FH(inode), 4737 .bitmask = bitmask, 4738 }; 4739 struct nfs4_getattr_res res = { 4740 .fattr = &fattr, 4741 .label = &label, 4742 .server = server, 4743 }; 4744 struct rpc_message msg = { 4745 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4746 .rpc_argp = &arg, 4747 .rpc_resp = &res, 4748 }; 4749 int ret; 4750 4751 nfs_fattr_init(&fattr); 4752 4753 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 4754 if (ret) 4755 return ret; 4756 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 4757 return -ENOENT; 4758 if (buflen < label.len) 4759 return -ERANGE; 4760 return 0; 4761} 4762 4763static int nfs4_get_security_label(struct inode *inode, void *buf, 4764 size_t buflen) 4765{ 4766 struct nfs4_exception exception = { }; 4767 int err; 4768 4769 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4770 return -EOPNOTSUPP; 4771 4772 do { 4773 err = _nfs4_get_security_label(inode, buf, buflen); 4774 trace_nfs4_get_security_label(inode, err); 4775 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4776 &exception); 4777 } while (exception.retry); 4778 return err; 4779} 4780 4781static int _nfs4_do_set_security_label(struct inode *inode, 4782 struct nfs4_label *ilabel, 4783 struct nfs_fattr *fattr, 4784 struct nfs4_label *olabel) 4785{ 4786 4787 struct iattr sattr = {0}; 4788 struct nfs_server *server = NFS_SERVER(inode); 4789 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4790 struct nfs_setattrargs arg = { 4791 .fh = NFS_FH(inode), 4792 .iap = &sattr, 4793 .server = server, 4794 .bitmask = bitmask, 4795 .label = ilabel, 4796 }; 4797 struct nfs_setattrres res = { 4798 .fattr = fattr, 4799 .label = olabel, 4800 .server = server, 4801 }; 4802 struct rpc_message msg = { 4803 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 4804 .rpc_argp = &arg, 4805 .rpc_resp = &res, 4806 }; 4807 int status; 4808 4809 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 4810 4811 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4812 if (status) 4813 dprintk("%s failed: %d\n", __func__, status); 4814 4815 return status; 4816} 4817 4818static int nfs4_do_set_security_label(struct inode *inode, 4819 struct nfs4_label *ilabel, 4820 struct nfs_fattr *fattr, 4821 struct nfs4_label *olabel) 4822{ 4823 struct nfs4_exception exception = { }; 4824 int err; 4825 4826 do { 4827 err = _nfs4_do_set_security_label(inode, ilabel, 4828 fattr, olabel); 4829 trace_nfs4_set_security_label(inode, err); 4830 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4831 &exception); 4832 } while (exception.retry); 4833 return err; 4834} 4835 4836static int 4837nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen) 4838{ 4839 struct nfs4_label ilabel, *olabel = NULL; 4840 struct nfs_fattr fattr; 4841 struct rpc_cred *cred; 4842 struct inode *inode = d_inode(dentry); 4843 int status; 4844 4845 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4846 return -EOPNOTSUPP; 4847 4848 nfs_fattr_init(&fattr); 4849 4850 ilabel.pi = 0; 4851 ilabel.lfs = 0; 4852 ilabel.label = (char *)buf; 4853 ilabel.len = buflen; 4854 4855 cred = rpc_lookup_cred(); 4856 if (IS_ERR(cred)) 4857 return PTR_ERR(cred); 4858 4859 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 4860 if (IS_ERR(olabel)) { 4861 status = -PTR_ERR(olabel); 4862 goto out; 4863 } 4864 4865 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 4866 if (status == 0) 4867 nfs_setsecurity(inode, &fattr, olabel); 4868 4869 nfs4_label_free(olabel); 4870out: 4871 put_rpccred(cred); 4872 return status; 4873} 4874#endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 4875 4876 4877static int 4878nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, 4879 struct nfs4_state *state, long *timeout) 4880{ 4881 struct nfs_client *clp = server->nfs_client; 4882 4883 if (task->tk_status >= 0) 4884 return 0; 4885 switch(task->tk_status) { 4886 case -NFS4ERR_DELEG_REVOKED: 4887 case -NFS4ERR_ADMIN_REVOKED: 4888 case -NFS4ERR_BAD_STATEID: 4889 case -NFS4ERR_OPENMODE: 4890 if (state == NULL) 4891 break; 4892 if (nfs4_schedule_stateid_recovery(server, state) < 0) 4893 goto recovery_failed; 4894 goto wait_on_recovery; 4895 case -NFS4ERR_EXPIRED: 4896 if (state != NULL) { 4897 if (nfs4_schedule_stateid_recovery(server, state) < 0) 4898 goto recovery_failed; 4899 } 4900 case -NFS4ERR_STALE_STATEID: 4901 case -NFS4ERR_STALE_CLIENTID: 4902 nfs4_schedule_lease_recovery(clp); 4903 goto wait_on_recovery; 4904 case -NFS4ERR_MOVED: 4905 if (nfs4_schedule_migration_recovery(server) < 0) 4906 goto recovery_failed; 4907 goto wait_on_recovery; 4908 case -NFS4ERR_LEASE_MOVED: 4909 nfs4_schedule_lease_moved_recovery(clp); 4910 goto wait_on_recovery; 4911#if defined(CONFIG_NFS_V4_1) 4912 case -NFS4ERR_BADSESSION: 4913 case -NFS4ERR_BADSLOT: 4914 case -NFS4ERR_BAD_HIGH_SLOT: 4915 case -NFS4ERR_DEADSESSION: 4916 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4917 case -NFS4ERR_SEQ_FALSE_RETRY: 4918 case -NFS4ERR_SEQ_MISORDERED: 4919 dprintk("%s ERROR %d, Reset session\n", __func__, 4920 task->tk_status); 4921 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4922 goto wait_on_recovery; 4923#endif /* CONFIG_NFS_V4_1 */ 4924 case -NFS4ERR_DELAY: 4925 nfs_inc_server_stats(server, NFSIOS_DELAY); 4926 rpc_delay(task, nfs4_update_delay(timeout)); 4927 goto restart_call; 4928 case -NFS4ERR_GRACE: 4929 rpc_delay(task, NFS4_POLL_RETRY_MAX); 4930 case -NFS4ERR_RETRY_UNCACHED_REP: 4931 case -NFS4ERR_OLD_STATEID: 4932 goto restart_call; 4933 } 4934 task->tk_status = nfs4_map_errors(task->tk_status); 4935 return 0; 4936recovery_failed: 4937 task->tk_status = -EIO; 4938 return 0; 4939wait_on_recovery: 4940 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 4941 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 4942 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 4943 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 4944 goto recovery_failed; 4945restart_call: 4946 task->tk_status = 0; 4947 return -EAGAIN; 4948} 4949 4950static void nfs4_init_boot_verifier(const struct nfs_client *clp, 4951 nfs4_verifier *bootverf) 4952{ 4953 __be32 verf[2]; 4954 4955 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 4956 /* An impossible timestamp guarantees this value 4957 * will never match a generated boot time. */ 4958 verf[0] = 0; 4959 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 4960 } else { 4961 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 4962 verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 4963 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 4964 } 4965 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 4966} 4967 4968static unsigned int 4969nfs4_init_nonuniform_client_string(struct nfs_client *clp, 4970 char *buf, size_t len) 4971{ 4972 unsigned int result; 4973 4974 if (clp->cl_owner_id != NULL) 4975 return strlcpy(buf, clp->cl_owner_id, len); 4976 4977 rcu_read_lock(); 4978 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", 4979 clp->cl_ipaddr, 4980 rpc_peeraddr2str(clp->cl_rpcclient, 4981 RPC_DISPLAY_ADDR), 4982 rpc_peeraddr2str(clp->cl_rpcclient, 4983 RPC_DISPLAY_PROTO)); 4984 rcu_read_unlock(); 4985 clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); 4986 return result; 4987} 4988 4989static unsigned int 4990nfs4_init_uniform_client_string(struct nfs_client *clp, 4991 char *buf, size_t len) 4992{ 4993 const char *nodename = clp->cl_rpcclient->cl_nodename; 4994 unsigned int result; 4995 4996 if (clp->cl_owner_id != NULL) 4997 return strlcpy(buf, clp->cl_owner_id, len); 4998 4999 if (nfs4_client_id_uniquifier[0] != '\0') 5000 result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", 5001 clp->rpc_ops->version, 5002 clp->cl_minorversion, 5003 nfs4_client_id_uniquifier, 5004 nodename); 5005 else 5006 result = scnprintf(buf, len, "Linux NFSv%u.%u %s", 5007 clp->rpc_ops->version, clp->cl_minorversion, 5008 nodename); 5009 clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); 5010 return result; 5011} 5012 5013/* 5014 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 5015 * services. Advertise one based on the address family of the 5016 * clientaddr. 5017 */ 5018static unsigned int 5019nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 5020{ 5021 if (strchr(clp->cl_ipaddr, ':') != NULL) 5022 return scnprintf(buf, len, "tcp6"); 5023 else 5024 return scnprintf(buf, len, "tcp"); 5025} 5026 5027static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 5028{ 5029 struct nfs4_setclientid *sc = calldata; 5030 5031 if (task->tk_status == 0) 5032 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 5033} 5034 5035static const struct rpc_call_ops nfs4_setclientid_ops = { 5036 .rpc_call_done = nfs4_setclientid_done, 5037}; 5038 5039/** 5040 * nfs4_proc_setclientid - Negotiate client ID 5041 * @clp: state data structure 5042 * @program: RPC program for NFSv4 callback service 5043 * @port: IP port number for NFS4 callback service 5044 * @cred: RPC credential to use for this call 5045 * @res: where to place the result 5046 * 5047 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5048 */ 5049int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 5050 unsigned short port, struct rpc_cred *cred, 5051 struct nfs4_setclientid_res *res) 5052{ 5053 nfs4_verifier sc_verifier; 5054 struct nfs4_setclientid setclientid = { 5055 .sc_verifier = &sc_verifier, 5056 .sc_prog = program, 5057 .sc_cb_ident = clp->cl_cb_ident, 5058 }; 5059 struct rpc_message msg = { 5060 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 5061 .rpc_argp = &setclientid, 5062 .rpc_resp = res, 5063 .rpc_cred = cred, 5064 }; 5065 struct rpc_task *task; 5066 struct rpc_task_setup task_setup_data = { 5067 .rpc_client = clp->cl_rpcclient, 5068 .rpc_message = &msg, 5069 .callback_ops = &nfs4_setclientid_ops, 5070 .callback_data = &setclientid, 5071 .flags = RPC_TASK_TIMEOUT, 5072 }; 5073 int status; 5074 5075 /* nfs_client_id4 */ 5076 nfs4_init_boot_verifier(clp, &sc_verifier); 5077 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 5078 setclientid.sc_name_len = 5079 nfs4_init_uniform_client_string(clp, 5080 setclientid.sc_name, 5081 sizeof(setclientid.sc_name)); 5082 else 5083 setclientid.sc_name_len = 5084 nfs4_init_nonuniform_client_string(clp, 5085 setclientid.sc_name, 5086 sizeof(setclientid.sc_name)); 5087 /* cb_client4 */ 5088 setclientid.sc_netid_len = 5089 nfs4_init_callback_netid(clp, 5090 setclientid.sc_netid, 5091 sizeof(setclientid.sc_netid)); 5092 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 5093 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 5094 clp->cl_ipaddr, port >> 8, port & 255); 5095 5096 dprintk("NFS call setclientid auth=%s, '%.*s'\n", 5097 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5098 setclientid.sc_name_len, setclientid.sc_name); 5099 task = rpc_run_task(&task_setup_data); 5100 if (IS_ERR(task)) { 5101 status = PTR_ERR(task); 5102 goto out; 5103 } 5104 status = task->tk_status; 5105 if (setclientid.sc_cred) { 5106 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 5107 put_rpccred(setclientid.sc_cred); 5108 } 5109 rpc_put_task(task); 5110out: 5111 trace_nfs4_setclientid(clp, status); 5112 dprintk("NFS reply setclientid: %d\n", status); 5113 return status; 5114} 5115 5116/** 5117 * nfs4_proc_setclientid_confirm - Confirm client ID 5118 * @clp: state data structure 5119 * @res: result of a previous SETCLIENTID 5120 * @cred: RPC credential to use for this call 5121 * 5122 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5123 */ 5124int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 5125 struct nfs4_setclientid_res *arg, 5126 struct rpc_cred *cred) 5127{ 5128 struct rpc_message msg = { 5129 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 5130 .rpc_argp = arg, 5131 .rpc_cred = cred, 5132 }; 5133 int status; 5134 5135 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 5136 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5137 clp->cl_clientid); 5138 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5139 trace_nfs4_setclientid_confirm(clp, status); 5140 dprintk("NFS reply setclientid_confirm: %d\n", status); 5141 return status; 5142} 5143 5144struct nfs4_delegreturndata { 5145 struct nfs4_delegreturnargs args; 5146 struct nfs4_delegreturnres res; 5147 struct nfs_fh fh; 5148 nfs4_stateid stateid; 5149 unsigned long timestamp; 5150 struct nfs_fattr fattr; 5151 int rpc_status; 5152 struct inode *inode; 5153 bool roc; 5154 u32 roc_barrier; 5155}; 5156 5157static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 5158{ 5159 struct nfs4_delegreturndata *data = calldata; 5160 5161 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5162 return; 5163 5164 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 5165 switch (task->tk_status) { 5166 case 0: 5167 renew_lease(data->res.server, data->timestamp); 5168 case -NFS4ERR_ADMIN_REVOKED: 5169 case -NFS4ERR_DELEG_REVOKED: 5170 case -NFS4ERR_BAD_STATEID: 5171 case -NFS4ERR_OLD_STATEID: 5172 case -NFS4ERR_STALE_STATEID: 5173 case -NFS4ERR_EXPIRED: 5174 task->tk_status = 0; 5175 if (data->roc) 5176 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5177 break; 5178 default: 5179 if (nfs4_async_handle_error(task, data->res.server, 5180 NULL, NULL) == -EAGAIN) { 5181 rpc_restart_call_prepare(task); 5182 return; 5183 } 5184 } 5185 data->rpc_status = task->tk_status; 5186} 5187 5188static void nfs4_delegreturn_release(void *calldata) 5189{ 5190 struct nfs4_delegreturndata *data = calldata; 5191 struct inode *inode = data->inode; 5192 5193 if (inode) { 5194 if (data->roc) 5195 pnfs_roc_release(inode); 5196 nfs_iput_and_deactive(inode); 5197 } 5198 kfree(calldata); 5199} 5200 5201static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 5202{ 5203 struct nfs4_delegreturndata *d_data; 5204 5205 d_data = (struct nfs4_delegreturndata *)data; 5206 5207 if (d_data->roc && 5208 pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task)) 5209 return; 5210 5211 nfs4_setup_sequence(d_data->res.server, 5212 &d_data->args.seq_args, 5213 &d_data->res.seq_res, 5214 task); 5215} 5216 5217static const struct rpc_call_ops nfs4_delegreturn_ops = { 5218 .rpc_call_prepare = nfs4_delegreturn_prepare, 5219 .rpc_call_done = nfs4_delegreturn_done, 5220 .rpc_release = nfs4_delegreturn_release, 5221}; 5222 5223static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5224{ 5225 struct nfs4_delegreturndata *data; 5226 struct nfs_server *server = NFS_SERVER(inode); 5227 struct rpc_task *task; 5228 struct rpc_message msg = { 5229 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 5230 .rpc_cred = cred, 5231 }; 5232 struct rpc_task_setup task_setup_data = { 5233 .rpc_client = server->client, 5234 .rpc_message = &msg, 5235 .callback_ops = &nfs4_delegreturn_ops, 5236 .flags = RPC_TASK_ASYNC, 5237 }; 5238 int status = 0; 5239 5240 data = kzalloc(sizeof(*data), GFP_NOFS); 5241 if (data == NULL) 5242 return -ENOMEM; 5243 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 5244 data->args.fhandle = &data->fh; 5245 data->args.stateid = &data->stateid; 5246 data->args.bitmask = server->cache_consistency_bitmask; 5247 nfs_copy_fh(&data->fh, NFS_FH(inode)); 5248 nfs4_stateid_copy(&data->stateid, stateid); 5249 data->res.fattr = &data->fattr; 5250 data->res.server = server; 5251 nfs_fattr_init(data->res.fattr); 5252 data->timestamp = jiffies; 5253 data->rpc_status = 0; 5254 data->inode = nfs_igrab_and_active(inode); 5255 if (data->inode) 5256 data->roc = nfs4_roc(inode); 5257 5258 task_setup_data.callback_data = data; 5259 msg.rpc_argp = &data->args; 5260 msg.rpc_resp = &data->res; 5261 task = rpc_run_task(&task_setup_data); 5262 if (IS_ERR(task)) 5263 return PTR_ERR(task); 5264 if (!issync) 5265 goto out; 5266 status = nfs4_wait_for_completion_rpc_task(task); 5267 if (status != 0) 5268 goto out; 5269 status = data->rpc_status; 5270 if (status == 0) 5271 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 5272 else 5273 nfs_refresh_inode(inode, &data->fattr); 5274out: 5275 rpc_put_task(task); 5276 return status; 5277} 5278 5279int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5280{ 5281 struct nfs_server *server = NFS_SERVER(inode); 5282 struct nfs4_exception exception = { }; 5283 int err; 5284 do { 5285 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 5286 trace_nfs4_delegreturn(inode, err); 5287 switch (err) { 5288 case -NFS4ERR_STALE_STATEID: 5289 case -NFS4ERR_EXPIRED: 5290 case 0: 5291 return 0; 5292 } 5293 err = nfs4_handle_exception(server, err, &exception); 5294 } while (exception.retry); 5295 return err; 5296} 5297 5298#define NFS4_LOCK_MINTIMEOUT (1 * HZ) 5299#define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 5300 5301/* 5302 * sleep, with exponential backoff, and retry the LOCK operation. 5303 */ 5304static unsigned long 5305nfs4_set_lock_task_retry(unsigned long timeout) 5306{ 5307 freezable_schedule_timeout_killable_unsafe(timeout); 5308 timeout <<= 1; 5309 if (timeout > NFS4_LOCK_MAXTIMEOUT) 5310 return NFS4_LOCK_MAXTIMEOUT; 5311 return timeout; 5312} 5313 5314static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5315{ 5316 struct inode *inode = state->inode; 5317 struct nfs_server *server = NFS_SERVER(inode); 5318 struct nfs_client *clp = server->nfs_client; 5319 struct nfs_lockt_args arg = { 5320 .fh = NFS_FH(inode), 5321 .fl = request, 5322 }; 5323 struct nfs_lockt_res res = { 5324 .denied = request, 5325 }; 5326 struct rpc_message msg = { 5327 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 5328 .rpc_argp = &arg, 5329 .rpc_resp = &res, 5330 .rpc_cred = state->owner->so_cred, 5331 }; 5332 struct nfs4_lock_state *lsp; 5333 int status; 5334 5335 arg.lock_owner.clientid = clp->cl_clientid; 5336 status = nfs4_set_lock_state(state, request); 5337 if (status != 0) 5338 goto out; 5339 lsp = request->fl_u.nfs4_fl.owner; 5340 arg.lock_owner.id = lsp->ls_seqid.owner_id; 5341 arg.lock_owner.s_dev = server->s_dev; 5342 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5343 switch (status) { 5344 case 0: 5345 request->fl_type = F_UNLCK; 5346 break; 5347 case -NFS4ERR_DENIED: 5348 status = 0; 5349 } 5350 request->fl_ops->fl_release_private(request); 5351 request->fl_ops = NULL; 5352out: 5353 return status; 5354} 5355 5356static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5357{ 5358 struct nfs4_exception exception = { }; 5359 int err; 5360 5361 do { 5362 err = _nfs4_proc_getlk(state, cmd, request); 5363 trace_nfs4_get_lock(request, state, cmd, err); 5364 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 5365 &exception); 5366 } while (exception.retry); 5367 return err; 5368} 5369 5370static int do_vfs_lock(struct inode *inode, struct file_lock *fl) 5371{ 5372 int res = 0; 5373 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 5374 case FL_POSIX: 5375 res = posix_lock_inode_wait(inode, fl); 5376 break; 5377 case FL_FLOCK: 5378 res = flock_lock_inode_wait(inode, fl); 5379 break; 5380 default: 5381 BUG(); 5382 } 5383 return res; 5384} 5385 5386struct nfs4_unlockdata { 5387 struct nfs_locku_args arg; 5388 struct nfs_locku_res res; 5389 struct nfs4_lock_state *lsp; 5390 struct nfs_open_context *ctx; 5391 struct file_lock fl; 5392 const struct nfs_server *server; 5393 unsigned long timestamp; 5394}; 5395 5396static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 5397 struct nfs_open_context *ctx, 5398 struct nfs4_lock_state *lsp, 5399 struct nfs_seqid *seqid) 5400{ 5401 struct nfs4_unlockdata *p; 5402 struct inode *inode = lsp->ls_state->inode; 5403 5404 p = kzalloc(sizeof(*p), GFP_NOFS); 5405 if (p == NULL) 5406 return NULL; 5407 p->arg.fh = NFS_FH(inode); 5408 p->arg.fl = &p->fl; 5409 p->arg.seqid = seqid; 5410 p->res.seqid = seqid; 5411 p->lsp = lsp; 5412 atomic_inc(&lsp->ls_count); 5413 /* Ensure we don't close file until we're done freeing locks! */ 5414 p->ctx = get_nfs_open_context(ctx); 5415 memcpy(&p->fl, fl, sizeof(p->fl)); 5416 p->server = NFS_SERVER(inode); 5417 return p; 5418} 5419 5420static void nfs4_locku_release_calldata(void *data) 5421{ 5422 struct nfs4_unlockdata *calldata = data; 5423 nfs_free_seqid(calldata->arg.seqid); 5424 nfs4_put_lock_state(calldata->lsp); 5425 put_nfs_open_context(calldata->ctx); 5426 kfree(calldata); 5427} 5428 5429static void nfs4_locku_done(struct rpc_task *task, void *data) 5430{ 5431 struct nfs4_unlockdata *calldata = data; 5432 5433 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 5434 return; 5435 switch (task->tk_status) { 5436 case 0: 5437 renew_lease(calldata->server, calldata->timestamp); 5438 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); 5439 if (nfs4_update_lock_stateid(calldata->lsp, 5440 &calldata->res.stateid)) 5441 break; 5442 case -NFS4ERR_BAD_STATEID: 5443 case -NFS4ERR_OLD_STATEID: 5444 case -NFS4ERR_STALE_STATEID: 5445 case -NFS4ERR_EXPIRED: 5446 if (!nfs4_stateid_match(&calldata->arg.stateid, 5447 &calldata->lsp->ls_stateid)) 5448 rpc_restart_call_prepare(task); 5449 break; 5450 default: 5451 if (nfs4_async_handle_error(task, calldata->server, 5452 NULL, NULL) == -EAGAIN) 5453 rpc_restart_call_prepare(task); 5454 } 5455 nfs_release_seqid(calldata->arg.seqid); 5456} 5457 5458static void nfs4_locku_prepare(struct rpc_task *task, void *data) 5459{ 5460 struct nfs4_unlockdata *calldata = data; 5461 5462 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 5463 goto out_wait; 5464 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); 5465 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 5466 /* Note: exit _without_ running nfs4_locku_done */ 5467 goto out_no_action; 5468 } 5469 calldata->timestamp = jiffies; 5470 if (nfs4_setup_sequence(calldata->server, 5471 &calldata->arg.seq_args, 5472 &calldata->res.seq_res, 5473 task) != 0) 5474 nfs_release_seqid(calldata->arg.seqid); 5475 return; 5476out_no_action: 5477 task->tk_action = NULL; 5478out_wait: 5479 nfs4_sequence_done(task, &calldata->res.seq_res); 5480} 5481 5482static const struct rpc_call_ops nfs4_locku_ops = { 5483 .rpc_call_prepare = nfs4_locku_prepare, 5484 .rpc_call_done = nfs4_locku_done, 5485 .rpc_release = nfs4_locku_release_calldata, 5486}; 5487 5488static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 5489 struct nfs_open_context *ctx, 5490 struct nfs4_lock_state *lsp, 5491 struct nfs_seqid *seqid) 5492{ 5493 struct nfs4_unlockdata *data; 5494 struct rpc_message msg = { 5495 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 5496 .rpc_cred = ctx->cred, 5497 }; 5498 struct rpc_task_setup task_setup_data = { 5499 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 5500 .rpc_message = &msg, 5501 .callback_ops = &nfs4_locku_ops, 5502 .workqueue = nfsiod_workqueue, 5503 .flags = RPC_TASK_ASYNC, 5504 }; 5505 5506 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 5507 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 5508 5509 /* Ensure this is an unlock - when canceling a lock, the 5510 * canceled lock is passed in, and it won't be an unlock. 5511 */ 5512 fl->fl_type = F_UNLCK; 5513 5514 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 5515 if (data == NULL) { 5516 nfs_free_seqid(seqid); 5517 return ERR_PTR(-ENOMEM); 5518 } 5519 5520 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5521 msg.rpc_argp = &data->arg; 5522 msg.rpc_resp = &data->res; 5523 task_setup_data.callback_data = data; 5524 return rpc_run_task(&task_setup_data); 5525} 5526 5527static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 5528{ 5529 struct inode *inode = state->inode; 5530 struct nfs4_state_owner *sp = state->owner; 5531 struct nfs_inode *nfsi = NFS_I(inode); 5532 struct nfs_seqid *seqid; 5533 struct nfs4_lock_state *lsp; 5534 struct rpc_task *task; 5535 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5536 int status = 0; 5537 unsigned char fl_flags = request->fl_flags; 5538 5539 status = nfs4_set_lock_state(state, request); 5540 /* Unlock _before_ we do the RPC call */ 5541 request->fl_flags |= FL_EXISTS; 5542 /* Exclude nfs_delegation_claim_locks() */ 5543 mutex_lock(&sp->so_delegreturn_mutex); 5544 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5545 down_read(&nfsi->rwsem); 5546 if (do_vfs_lock(inode, request) == -ENOENT) { 5547 up_read(&nfsi->rwsem); 5548 mutex_unlock(&sp->so_delegreturn_mutex); 5549 goto out; 5550 } 5551 up_read(&nfsi->rwsem); 5552 mutex_unlock(&sp->so_delegreturn_mutex); 5553 if (status != 0) 5554 goto out; 5555 /* Is this a delegated lock? */ 5556 lsp = request->fl_u.nfs4_fl.owner; 5557 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 5558 goto out; 5559 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 5560 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 5561 status = -ENOMEM; 5562 if (IS_ERR(seqid)) 5563 goto out; 5564 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 5565 status = PTR_ERR(task); 5566 if (IS_ERR(task)) 5567 goto out; 5568 status = nfs4_wait_for_completion_rpc_task(task); 5569 rpc_put_task(task); 5570out: 5571 request->fl_flags = fl_flags; 5572 trace_nfs4_unlock(request, state, F_SETLK, status); 5573 return status; 5574} 5575 5576struct nfs4_lockdata { 5577 struct nfs_lock_args arg; 5578 struct nfs_lock_res res; 5579 struct nfs4_lock_state *lsp; 5580 struct nfs_open_context *ctx; 5581 struct file_lock fl; 5582 unsigned long timestamp; 5583 int rpc_status; 5584 int cancelled; 5585 struct nfs_server *server; 5586}; 5587 5588static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 5589 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 5590 gfp_t gfp_mask) 5591{ 5592 struct nfs4_lockdata *p; 5593 struct inode *inode = lsp->ls_state->inode; 5594 struct nfs_server *server = NFS_SERVER(inode); 5595 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5596 5597 p = kzalloc(sizeof(*p), gfp_mask); 5598 if (p == NULL) 5599 return NULL; 5600 5601 p->arg.fh = NFS_FH(inode); 5602 p->arg.fl = &p->fl; 5603 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 5604 if (IS_ERR(p->arg.open_seqid)) 5605 goto out_free; 5606 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 5607 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 5608 if (IS_ERR(p->arg.lock_seqid)) 5609 goto out_free_seqid; 5610 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 5611 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 5612 p->arg.lock_owner.s_dev = server->s_dev; 5613 p->res.lock_seqid = p->arg.lock_seqid; 5614 p->lsp = lsp; 5615 p->server = server; 5616 atomic_inc(&lsp->ls_count); 5617 p->ctx = get_nfs_open_context(ctx); 5618 get_file(fl->fl_file); 5619 memcpy(&p->fl, fl, sizeof(p->fl)); 5620 return p; 5621out_free_seqid: 5622 nfs_free_seqid(p->arg.open_seqid); 5623out_free: 5624 kfree(p); 5625 return NULL; 5626} 5627 5628static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 5629{ 5630 struct nfs4_lockdata *data = calldata; 5631 struct nfs4_state *state = data->lsp->ls_state; 5632 5633 dprintk("%s: begin!\n", __func__); 5634 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 5635 goto out_wait; 5636 /* Do we need to do an open_to_lock_owner? */ 5637 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 5638 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 5639 goto out_release_lock_seqid; 5640 } 5641 nfs4_stateid_copy(&data->arg.open_stateid, 5642 &state->open_stateid); 5643 data->arg.new_lock_owner = 1; 5644 data->res.open_seqid = data->arg.open_seqid; 5645 } else { 5646 data->arg.new_lock_owner = 0; 5647 nfs4_stateid_copy(&data->arg.lock_stateid, 5648 &data->lsp->ls_stateid); 5649 } 5650 if (!nfs4_valid_open_stateid(state)) { 5651 data->rpc_status = -EBADF; 5652 task->tk_action = NULL; 5653 goto out_release_open_seqid; 5654 } 5655 data->timestamp = jiffies; 5656 if (nfs4_setup_sequence(data->server, 5657 &data->arg.seq_args, 5658 &data->res.seq_res, 5659 task) == 0) 5660 return; 5661out_release_open_seqid: 5662 nfs_release_seqid(data->arg.open_seqid); 5663out_release_lock_seqid: 5664 nfs_release_seqid(data->arg.lock_seqid); 5665out_wait: 5666 nfs4_sequence_done(task, &data->res.seq_res); 5667 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 5668} 5669 5670static void nfs4_lock_done(struct rpc_task *task, void *calldata) 5671{ 5672 struct nfs4_lockdata *data = calldata; 5673 struct nfs4_lock_state *lsp = data->lsp; 5674 5675 dprintk("%s: begin!\n", __func__); 5676 5677 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5678 return; 5679 5680 data->rpc_status = task->tk_status; 5681 switch (task->tk_status) { 5682 case 0: 5683 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 5684 data->timestamp); 5685 if (data->arg.new_lock) { 5686 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5687 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) { 5688 rpc_restart_call_prepare(task); 5689 break; 5690 } 5691 } 5692 if (data->arg.new_lock_owner != 0) { 5693 nfs_confirm_seqid(&lsp->ls_seqid, 0); 5694 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 5695 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5696 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 5697 rpc_restart_call_prepare(task); 5698 break; 5699 case -NFS4ERR_BAD_STATEID: 5700 case -NFS4ERR_OLD_STATEID: 5701 case -NFS4ERR_STALE_STATEID: 5702 case -NFS4ERR_EXPIRED: 5703 if (data->arg.new_lock_owner != 0) { 5704 if (!nfs4_stateid_match(&data->arg.open_stateid, 5705 &lsp->ls_state->open_stateid)) 5706 rpc_restart_call_prepare(task); 5707 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 5708 &lsp->ls_stateid)) 5709 rpc_restart_call_prepare(task); 5710 } 5711 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 5712} 5713 5714static void nfs4_lock_release(void *calldata) 5715{ 5716 struct nfs4_lockdata *data = calldata; 5717 5718 dprintk("%s: begin!\n", __func__); 5719 nfs_free_seqid(data->arg.open_seqid); 5720 if (data->cancelled != 0) { 5721 struct rpc_task *task; 5722 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 5723 data->arg.lock_seqid); 5724 if (!IS_ERR(task)) 5725 rpc_put_task_async(task); 5726 dprintk("%s: cancelling lock!\n", __func__); 5727 } else 5728 nfs_free_seqid(data->arg.lock_seqid); 5729 nfs4_put_lock_state(data->lsp); 5730 put_nfs_open_context(data->ctx); 5731 fput(data->fl.fl_file); 5732 kfree(data); 5733 dprintk("%s: done!\n", __func__); 5734} 5735 5736static const struct rpc_call_ops nfs4_lock_ops = { 5737 .rpc_call_prepare = nfs4_lock_prepare, 5738 .rpc_call_done = nfs4_lock_done, 5739 .rpc_release = nfs4_lock_release, 5740}; 5741 5742static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 5743{ 5744 switch (error) { 5745 case -NFS4ERR_ADMIN_REVOKED: 5746 case -NFS4ERR_BAD_STATEID: 5747 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5748 if (new_lock_owner != 0 || 5749 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 5750 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 5751 break; 5752 case -NFS4ERR_STALE_STATEID: 5753 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5754 case -NFS4ERR_EXPIRED: 5755 nfs4_schedule_lease_recovery(server->nfs_client); 5756 }; 5757} 5758 5759static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 5760{ 5761 struct nfs4_lockdata *data; 5762 struct rpc_task *task; 5763 struct rpc_message msg = { 5764 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 5765 .rpc_cred = state->owner->so_cred, 5766 }; 5767 struct rpc_task_setup task_setup_data = { 5768 .rpc_client = NFS_CLIENT(state->inode), 5769 .rpc_message = &msg, 5770 .callback_ops = &nfs4_lock_ops, 5771 .workqueue = nfsiod_workqueue, 5772 .flags = RPC_TASK_ASYNC, 5773 }; 5774 int ret; 5775 5776 dprintk("%s: begin!\n", __func__); 5777 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 5778 fl->fl_u.nfs4_fl.owner, 5779 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 5780 if (data == NULL) 5781 return -ENOMEM; 5782 if (IS_SETLKW(cmd)) 5783 data->arg.block = 1; 5784 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5785 msg.rpc_argp = &data->arg; 5786 msg.rpc_resp = &data->res; 5787 task_setup_data.callback_data = data; 5788 if (recovery_type > NFS_LOCK_NEW) { 5789 if (recovery_type == NFS_LOCK_RECLAIM) 5790 data->arg.reclaim = NFS_LOCK_RECLAIM; 5791 nfs4_set_sequence_privileged(&data->arg.seq_args); 5792 } else 5793 data->arg.new_lock = 1; 5794 task = rpc_run_task(&task_setup_data); 5795 if (IS_ERR(task)) 5796 return PTR_ERR(task); 5797 ret = nfs4_wait_for_completion_rpc_task(task); 5798 if (ret == 0) { 5799 ret = data->rpc_status; 5800 if (ret) 5801 nfs4_handle_setlk_error(data->server, data->lsp, 5802 data->arg.new_lock_owner, ret); 5803 } else 5804 data->cancelled = 1; 5805 rpc_put_task(task); 5806 dprintk("%s: done, ret = %d!\n", __func__, ret); 5807 return ret; 5808} 5809 5810static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 5811{ 5812 struct nfs_server *server = NFS_SERVER(state->inode); 5813 struct nfs4_exception exception = { 5814 .inode = state->inode, 5815 }; 5816 int err; 5817 5818 do { 5819 /* Cache the lock if possible... */ 5820 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5821 return 0; 5822 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 5823 trace_nfs4_lock_reclaim(request, state, F_SETLK, err); 5824 if (err != -NFS4ERR_DELAY) 5825 break; 5826 nfs4_handle_exception(server, err, &exception); 5827 } while (exception.retry); 5828 return err; 5829} 5830 5831static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 5832{ 5833 struct nfs_server *server = NFS_SERVER(state->inode); 5834 struct nfs4_exception exception = { 5835 .inode = state->inode, 5836 }; 5837 int err; 5838 5839 err = nfs4_set_lock_state(state, request); 5840 if (err != 0) 5841 return err; 5842 if (!recover_lost_locks) { 5843 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 5844 return 0; 5845 } 5846 do { 5847 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5848 return 0; 5849 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 5850 trace_nfs4_lock_expired(request, state, F_SETLK, err); 5851 switch (err) { 5852 default: 5853 goto out; 5854 case -NFS4ERR_GRACE: 5855 case -NFS4ERR_DELAY: 5856 nfs4_handle_exception(server, err, &exception); 5857 err = 0; 5858 } 5859 } while (exception.retry); 5860out: 5861 return err; 5862} 5863 5864#if defined(CONFIG_NFS_V4_1) 5865/** 5866 * nfs41_check_expired_locks - possibly free a lock stateid 5867 * 5868 * @state: NFSv4 state for an inode 5869 * 5870 * Returns NFS_OK if recovery for this stateid is now finished. 5871 * Otherwise a negative NFS4ERR value is returned. 5872 */ 5873static int nfs41_check_expired_locks(struct nfs4_state *state) 5874{ 5875 int status, ret = -NFS4ERR_BAD_STATEID; 5876 struct nfs4_lock_state *lsp; 5877 struct nfs_server *server = NFS_SERVER(state->inode); 5878 5879 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 5880 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 5881 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 5882 5883 status = nfs41_test_stateid(server, 5884 &lsp->ls_stateid, 5885 cred); 5886 trace_nfs4_test_lock_stateid(state, lsp, status); 5887 if (status != NFS_OK) { 5888 /* Free the stateid unless the server 5889 * informs us the stateid is unrecognized. */ 5890 if (status != -NFS4ERR_BAD_STATEID) 5891 nfs41_free_stateid(server, 5892 &lsp->ls_stateid, 5893 cred); 5894 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5895 ret = status; 5896 } 5897 } 5898 }; 5899 5900 return ret; 5901} 5902 5903static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 5904{ 5905 int status = NFS_OK; 5906 5907 if (test_bit(LK_STATE_IN_USE, &state->flags)) 5908 status = nfs41_check_expired_locks(state); 5909 if (status != NFS_OK) 5910 status = nfs4_lock_expired(state, request); 5911 return status; 5912} 5913#endif 5914 5915static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5916{ 5917 struct nfs_inode *nfsi = NFS_I(state->inode); 5918 unsigned char fl_flags = request->fl_flags; 5919 int status = -ENOLCK; 5920 5921 if ((fl_flags & FL_POSIX) && 5922 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 5923 goto out; 5924 /* Is this a delegated open? */ 5925 status = nfs4_set_lock_state(state, request); 5926 if (status != 0) 5927 goto out; 5928 request->fl_flags |= FL_ACCESS; 5929 status = do_vfs_lock(state->inode, request); 5930 if (status < 0) 5931 goto out; 5932 down_read(&nfsi->rwsem); 5933 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 5934 /* Yes: cache locks! */ 5935 /* ...but avoid races with delegation recall... */ 5936 request->fl_flags = fl_flags & ~FL_SLEEP; 5937 status = do_vfs_lock(state->inode, request); 5938 up_read(&nfsi->rwsem); 5939 goto out; 5940 } 5941 up_read(&nfsi->rwsem); 5942 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 5943out: 5944 request->fl_flags = fl_flags; 5945 return status; 5946} 5947 5948static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5949{ 5950 struct nfs4_exception exception = { 5951 .state = state, 5952 .inode = state->inode, 5953 }; 5954 int err; 5955 5956 do { 5957 err = _nfs4_proc_setlk(state, cmd, request); 5958 trace_nfs4_set_lock(request, state, cmd, err); 5959 if (err == -NFS4ERR_DENIED) 5960 err = -EAGAIN; 5961 err = nfs4_handle_exception(NFS_SERVER(state->inode), 5962 err, &exception); 5963 } while (exception.retry); 5964 return err; 5965} 5966 5967static int 5968nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 5969{ 5970 struct nfs_open_context *ctx; 5971 struct nfs4_state *state; 5972 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 5973 int status; 5974 5975 /* verify open state */ 5976 ctx = nfs_file_open_context(filp); 5977 state = ctx->state; 5978 5979 if (request->fl_start < 0 || request->fl_end < 0) 5980 return -EINVAL; 5981 5982 if (IS_GETLK(cmd)) { 5983 if (state != NULL) 5984 return nfs4_proc_getlk(state, F_GETLK, request); 5985 return 0; 5986 } 5987 5988 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 5989 return -EINVAL; 5990 5991 if (request->fl_type == F_UNLCK) { 5992 if (state != NULL) 5993 return nfs4_proc_unlck(state, cmd, request); 5994 return 0; 5995 } 5996 5997 if (state == NULL) 5998 return -ENOLCK; 5999 /* 6000 * Don't rely on the VFS having checked the file open mode, 6001 * since it won't do this for flock() locks. 6002 */ 6003 switch (request->fl_type) { 6004 case F_RDLCK: 6005 if (!(filp->f_mode & FMODE_READ)) 6006 return -EBADF; 6007 break; 6008 case F_WRLCK: 6009 if (!(filp->f_mode & FMODE_WRITE)) 6010 return -EBADF; 6011 } 6012 6013 do { 6014 status = nfs4_proc_setlk(state, cmd, request); 6015 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6016 break; 6017 timeout = nfs4_set_lock_task_retry(timeout); 6018 status = -ERESTARTSYS; 6019 if (signalled()) 6020 break; 6021 } while(status < 0); 6022 return status; 6023} 6024 6025int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 6026{ 6027 struct nfs_server *server = NFS_SERVER(state->inode); 6028 int err; 6029 6030 err = nfs4_set_lock_state(state, fl); 6031 if (err != 0) 6032 return err; 6033 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6034 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6035} 6036 6037struct nfs_release_lockowner_data { 6038 struct nfs4_lock_state *lsp; 6039 struct nfs_server *server; 6040 struct nfs_release_lockowner_args args; 6041 struct nfs_release_lockowner_res res; 6042 unsigned long timestamp; 6043}; 6044 6045static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 6046{ 6047 struct nfs_release_lockowner_data *data = calldata; 6048 struct nfs_server *server = data->server; 6049 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 6050 &data->args.seq_args, &data->res.seq_res, task); 6051 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6052 data->timestamp = jiffies; 6053} 6054 6055static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 6056{ 6057 struct nfs_release_lockowner_data *data = calldata; 6058 struct nfs_server *server = data->server; 6059 6060 nfs40_sequence_done(task, &data->res.seq_res); 6061 6062 switch (task->tk_status) { 6063 case 0: 6064 renew_lease(server, data->timestamp); 6065 break; 6066 case -NFS4ERR_STALE_CLIENTID: 6067 case -NFS4ERR_EXPIRED: 6068 nfs4_schedule_lease_recovery(server->nfs_client); 6069 break; 6070 case -NFS4ERR_LEASE_MOVED: 6071 case -NFS4ERR_DELAY: 6072 if (nfs4_async_handle_error(task, server, 6073 NULL, NULL) == -EAGAIN) 6074 rpc_restart_call_prepare(task); 6075 } 6076} 6077 6078static void nfs4_release_lockowner_release(void *calldata) 6079{ 6080 struct nfs_release_lockowner_data *data = calldata; 6081 nfs4_free_lock_state(data->server, data->lsp); 6082 kfree(calldata); 6083} 6084 6085static const struct rpc_call_ops nfs4_release_lockowner_ops = { 6086 .rpc_call_prepare = nfs4_release_lockowner_prepare, 6087 .rpc_call_done = nfs4_release_lockowner_done, 6088 .rpc_release = nfs4_release_lockowner_release, 6089}; 6090 6091static void 6092nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 6093{ 6094 struct nfs_release_lockowner_data *data; 6095 struct rpc_message msg = { 6096 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 6097 }; 6098 6099 if (server->nfs_client->cl_mvops->minor_version != 0) 6100 return; 6101 6102 data = kmalloc(sizeof(*data), GFP_NOFS); 6103 if (!data) 6104 return; 6105 data->lsp = lsp; 6106 data->server = server; 6107 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6108 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 6109 data->args.lock_owner.s_dev = server->s_dev; 6110 6111 msg.rpc_argp = &data->args; 6112 msg.rpc_resp = &data->res; 6113 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6114 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 6115} 6116 6117#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6118 6119static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 6120 const void *buf, size_t buflen, 6121 int flags, int type) 6122{ 6123 if (strcmp(key, "") != 0) 6124 return -EINVAL; 6125 6126 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen); 6127} 6128 6129static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 6130 void *buf, size_t buflen, int type) 6131{ 6132 if (strcmp(key, "") != 0) 6133 return -EINVAL; 6134 6135 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen); 6136} 6137 6138static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 6139 size_t list_len, const char *name, 6140 size_t name_len, int type) 6141{ 6142 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 6143 6144 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)))) 6145 return 0; 6146 6147 if (list && len <= list_len) 6148 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 6149 return len; 6150} 6151 6152#ifdef CONFIG_NFS_V4_SECURITY_LABEL 6153static inline int nfs4_server_supports_labels(struct nfs_server *server) 6154{ 6155 return server->caps & NFS_CAP_SECURITY_LABEL; 6156} 6157 6158static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key, 6159 const void *buf, size_t buflen, 6160 int flags, int type) 6161{ 6162 if (security_ismaclabel(key)) 6163 return nfs4_set_security_label(dentry, buf, buflen); 6164 6165 return -EOPNOTSUPP; 6166} 6167 6168static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key, 6169 void *buf, size_t buflen, int type) 6170{ 6171 if (security_ismaclabel(key)) 6172 return nfs4_get_security_label(d_inode(dentry), buf, buflen); 6173 return -EOPNOTSUPP; 6174} 6175 6176static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list, 6177 size_t list_len, const char *name, 6178 size_t name_len, int type) 6179{ 6180 size_t len = 0; 6181 6182 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) { 6183 len = security_inode_listsecurity(d_inode(dentry), NULL, 0); 6184 if (list && len <= list_len) 6185 security_inode_listsecurity(d_inode(dentry), list, len); 6186 } 6187 return len; 6188} 6189 6190static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 6191 .prefix = XATTR_SECURITY_PREFIX, 6192 .list = nfs4_xattr_list_nfs4_label, 6193 .get = nfs4_xattr_get_nfs4_label, 6194 .set = nfs4_xattr_set_nfs4_label, 6195}; 6196#endif 6197 6198 6199/* 6200 * nfs_fhget will use either the mounted_on_fileid or the fileid 6201 */ 6202static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 6203{ 6204 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 6205 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 6206 (fattr->valid & NFS_ATTR_FATTR_FSID) && 6207 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 6208 return; 6209 6210 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 6211 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 6212 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 6213 fattr->nlink = 2; 6214} 6215 6216static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6217 const struct qstr *name, 6218 struct nfs4_fs_locations *fs_locations, 6219 struct page *page) 6220{ 6221 struct nfs_server *server = NFS_SERVER(dir); 6222 u32 bitmask[3] = { 6223 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6224 }; 6225 struct nfs4_fs_locations_arg args = { 6226 .dir_fh = NFS_FH(dir), 6227 .name = name, 6228 .page = page, 6229 .bitmask = bitmask, 6230 }; 6231 struct nfs4_fs_locations_res res = { 6232 .fs_locations = fs_locations, 6233 }; 6234 struct rpc_message msg = { 6235 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6236 .rpc_argp = &args, 6237 .rpc_resp = &res, 6238 }; 6239 int status; 6240 6241 dprintk("%s: start\n", __func__); 6242 6243 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 6244 * is not supported */ 6245 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 6246 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 6247 else 6248 bitmask[0] |= FATTR4_WORD0_FILEID; 6249 6250 nfs_fattr_init(&fs_locations->fattr); 6251 fs_locations->server = server; 6252 fs_locations->nlocations = 0; 6253 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 6254 dprintk("%s: returned status = %d\n", __func__, status); 6255 return status; 6256} 6257 6258int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6259 const struct qstr *name, 6260 struct nfs4_fs_locations *fs_locations, 6261 struct page *page) 6262{ 6263 struct nfs4_exception exception = { }; 6264 int err; 6265 do { 6266 err = _nfs4_proc_fs_locations(client, dir, name, 6267 fs_locations, page); 6268 trace_nfs4_get_fs_locations(dir, name, err); 6269 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6270 &exception); 6271 } while (exception.retry); 6272 return err; 6273} 6274 6275/* 6276 * This operation also signals the server that this client is 6277 * performing migration recovery. The server can stop returning 6278 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 6279 * appended to this compound to identify the client ID which is 6280 * performing recovery. 6281 */ 6282static int _nfs40_proc_get_locations(struct inode *inode, 6283 struct nfs4_fs_locations *locations, 6284 struct page *page, struct rpc_cred *cred) 6285{ 6286 struct nfs_server *server = NFS_SERVER(inode); 6287 struct rpc_clnt *clnt = server->client; 6288 u32 bitmask[2] = { 6289 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6290 }; 6291 struct nfs4_fs_locations_arg args = { 6292 .clientid = server->nfs_client->cl_clientid, 6293 .fh = NFS_FH(inode), 6294 .page = page, 6295 .bitmask = bitmask, 6296 .migration = 1, /* skip LOOKUP */ 6297 .renew = 1, /* append RENEW */ 6298 }; 6299 struct nfs4_fs_locations_res res = { 6300 .fs_locations = locations, 6301 .migration = 1, 6302 .renew = 1, 6303 }; 6304 struct rpc_message msg = { 6305 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6306 .rpc_argp = &args, 6307 .rpc_resp = &res, 6308 .rpc_cred = cred, 6309 }; 6310 unsigned long now = jiffies; 6311 int status; 6312 6313 nfs_fattr_init(&locations->fattr); 6314 locations->server = server; 6315 locations->nlocations = 0; 6316 6317 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6318 nfs4_set_sequence_privileged(&args.seq_args); 6319 status = nfs4_call_sync_sequence(clnt, server, &msg, 6320 &args.seq_args, &res.seq_res); 6321 if (status) 6322 return status; 6323 6324 renew_lease(server, now); 6325 return 0; 6326} 6327 6328#ifdef CONFIG_NFS_V4_1 6329 6330/* 6331 * This operation also signals the server that this client is 6332 * performing migration recovery. The server can stop asserting 6333 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 6334 * performing this operation is identified in the SEQUENCE 6335 * operation in this compound. 6336 * 6337 * When the client supports GETATTR(fs_locations_info), it can 6338 * be plumbed in here. 6339 */ 6340static int _nfs41_proc_get_locations(struct inode *inode, 6341 struct nfs4_fs_locations *locations, 6342 struct page *page, struct rpc_cred *cred) 6343{ 6344 struct nfs_server *server = NFS_SERVER(inode); 6345 struct rpc_clnt *clnt = server->client; 6346 u32 bitmask[2] = { 6347 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6348 }; 6349 struct nfs4_fs_locations_arg args = { 6350 .fh = NFS_FH(inode), 6351 .page = page, 6352 .bitmask = bitmask, 6353 .migration = 1, /* skip LOOKUP */ 6354 }; 6355 struct nfs4_fs_locations_res res = { 6356 .fs_locations = locations, 6357 .migration = 1, 6358 }; 6359 struct rpc_message msg = { 6360 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6361 .rpc_argp = &args, 6362 .rpc_resp = &res, 6363 .rpc_cred = cred, 6364 }; 6365 int status; 6366 6367 nfs_fattr_init(&locations->fattr); 6368 locations->server = server; 6369 locations->nlocations = 0; 6370 6371 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6372 nfs4_set_sequence_privileged(&args.seq_args); 6373 status = nfs4_call_sync_sequence(clnt, server, &msg, 6374 &args.seq_args, &res.seq_res); 6375 if (status == NFS4_OK && 6376 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6377 status = -NFS4ERR_LEASE_MOVED; 6378 return status; 6379} 6380 6381#endif /* CONFIG_NFS_V4_1 */ 6382 6383/** 6384 * nfs4_proc_get_locations - discover locations for a migrated FSID 6385 * @inode: inode on FSID that is migrating 6386 * @locations: result of query 6387 * @page: buffer 6388 * @cred: credential to use for this operation 6389 * 6390 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 6391 * operation failed, or a negative errno if a local error occurred. 6392 * 6393 * On success, "locations" is filled in, but if the server has 6394 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 6395 * asserted. 6396 * 6397 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 6398 * from this client that require migration recovery. 6399 */ 6400int nfs4_proc_get_locations(struct inode *inode, 6401 struct nfs4_fs_locations *locations, 6402 struct page *page, struct rpc_cred *cred) 6403{ 6404 struct nfs_server *server = NFS_SERVER(inode); 6405 struct nfs_client *clp = server->nfs_client; 6406 const struct nfs4_mig_recovery_ops *ops = 6407 clp->cl_mvops->mig_recovery_ops; 6408 struct nfs4_exception exception = { }; 6409 int status; 6410 6411 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6412 (unsigned long long)server->fsid.major, 6413 (unsigned long long)server->fsid.minor, 6414 clp->cl_hostname); 6415 nfs_display_fhandle(NFS_FH(inode), __func__); 6416 6417 do { 6418 status = ops->get_locations(inode, locations, page, cred); 6419 if (status != -NFS4ERR_DELAY) 6420 break; 6421 nfs4_handle_exception(server, status, &exception); 6422 } while (exception.retry); 6423 return status; 6424} 6425 6426/* 6427 * This operation also signals the server that this client is 6428 * performing "lease moved" recovery. The server can stop 6429 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 6430 * is appended to this compound to identify the client ID which is 6431 * performing recovery. 6432 */ 6433static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6434{ 6435 struct nfs_server *server = NFS_SERVER(inode); 6436 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 6437 struct rpc_clnt *clnt = server->client; 6438 struct nfs4_fsid_present_arg args = { 6439 .fh = NFS_FH(inode), 6440 .clientid = clp->cl_clientid, 6441 .renew = 1, /* append RENEW */ 6442 }; 6443 struct nfs4_fsid_present_res res = { 6444 .renew = 1, 6445 }; 6446 struct rpc_message msg = { 6447 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6448 .rpc_argp = &args, 6449 .rpc_resp = &res, 6450 .rpc_cred = cred, 6451 }; 6452 unsigned long now = jiffies; 6453 int status; 6454 6455 res.fh = nfs_alloc_fhandle(); 6456 if (res.fh == NULL) 6457 return -ENOMEM; 6458 6459 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6460 nfs4_set_sequence_privileged(&args.seq_args); 6461 status = nfs4_call_sync_sequence(clnt, server, &msg, 6462 &args.seq_args, &res.seq_res); 6463 nfs_free_fhandle(res.fh); 6464 if (status) 6465 return status; 6466 6467 do_renew_lease(clp, now); 6468 return 0; 6469} 6470 6471#ifdef CONFIG_NFS_V4_1 6472 6473/* 6474 * This operation also signals the server that this client is 6475 * performing "lease moved" recovery. The server can stop asserting 6476 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 6477 * this operation is identified in the SEQUENCE operation in this 6478 * compound. 6479 */ 6480static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6481{ 6482 struct nfs_server *server = NFS_SERVER(inode); 6483 struct rpc_clnt *clnt = server->client; 6484 struct nfs4_fsid_present_arg args = { 6485 .fh = NFS_FH(inode), 6486 }; 6487 struct nfs4_fsid_present_res res = { 6488 }; 6489 struct rpc_message msg = { 6490 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6491 .rpc_argp = &args, 6492 .rpc_resp = &res, 6493 .rpc_cred = cred, 6494 }; 6495 int status; 6496 6497 res.fh = nfs_alloc_fhandle(); 6498 if (res.fh == NULL) 6499 return -ENOMEM; 6500 6501 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6502 nfs4_set_sequence_privileged(&args.seq_args); 6503 status = nfs4_call_sync_sequence(clnt, server, &msg, 6504 &args.seq_args, &res.seq_res); 6505 nfs_free_fhandle(res.fh); 6506 if (status == NFS4_OK && 6507 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6508 status = -NFS4ERR_LEASE_MOVED; 6509 return status; 6510} 6511 6512#endif /* CONFIG_NFS_V4_1 */ 6513 6514/** 6515 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 6516 * @inode: inode on FSID to check 6517 * @cred: credential to use for this operation 6518 * 6519 * Server indicates whether the FSID is present, moved, or not 6520 * recognized. This operation is necessary to clear a LEASE_MOVED 6521 * condition for this client ID. 6522 * 6523 * Returns NFS4_OK if the FSID is present on this server, 6524 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 6525 * NFS4ERR code if some error occurred on the server, or a 6526 * negative errno if a local failure occurred. 6527 */ 6528int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6529{ 6530 struct nfs_server *server = NFS_SERVER(inode); 6531 struct nfs_client *clp = server->nfs_client; 6532 const struct nfs4_mig_recovery_ops *ops = 6533 clp->cl_mvops->mig_recovery_ops; 6534 struct nfs4_exception exception = { }; 6535 int status; 6536 6537 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6538 (unsigned long long)server->fsid.major, 6539 (unsigned long long)server->fsid.minor, 6540 clp->cl_hostname); 6541 nfs_display_fhandle(NFS_FH(inode), __func__); 6542 6543 do { 6544 status = ops->fsid_present(inode, cred); 6545 if (status != -NFS4ERR_DELAY) 6546 break; 6547 nfs4_handle_exception(server, status, &exception); 6548 } while (exception.retry); 6549 return status; 6550} 6551 6552/** 6553 * If 'use_integrity' is true and the state managment nfs_client 6554 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 6555 * and the machine credential as per RFC3530bis and RFC5661 Security 6556 * Considerations sections. Otherwise, just use the user cred with the 6557 * filesystem's rpc_client. 6558 */ 6559static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 6560{ 6561 int status; 6562 struct nfs4_secinfo_arg args = { 6563 .dir_fh = NFS_FH(dir), 6564 .name = name, 6565 }; 6566 struct nfs4_secinfo_res res = { 6567 .flavors = flavors, 6568 }; 6569 struct rpc_message msg = { 6570 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 6571 .rpc_argp = &args, 6572 .rpc_resp = &res, 6573 }; 6574 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 6575 struct rpc_cred *cred = NULL; 6576 6577 if (use_integrity) { 6578 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient; 6579 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client); 6580 msg.rpc_cred = cred; 6581 } 6582 6583 dprintk("NFS call secinfo %s\n", name->name); 6584 6585 nfs4_state_protect(NFS_SERVER(dir)->nfs_client, 6586 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 6587 6588 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, 6589 &res.seq_res, 0); 6590 dprintk("NFS reply secinfo: %d\n", status); 6591 6592 if (cred) 6593 put_rpccred(cred); 6594 6595 return status; 6596} 6597 6598int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 6599 struct nfs4_secinfo_flavors *flavors) 6600{ 6601 struct nfs4_exception exception = { }; 6602 int err; 6603 do { 6604 err = -NFS4ERR_WRONGSEC; 6605 6606 /* try to use integrity protection with machine cred */ 6607 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 6608 err = _nfs4_proc_secinfo(dir, name, flavors, true); 6609 6610 /* 6611 * if unable to use integrity protection, or SECINFO with 6612 * integrity protection returns NFS4ERR_WRONGSEC (which is 6613 * disallowed by spec, but exists in deployed servers) use 6614 * the current filesystem's rpc_client and the user cred. 6615 */ 6616 if (err == -NFS4ERR_WRONGSEC) 6617 err = _nfs4_proc_secinfo(dir, name, flavors, false); 6618 6619 trace_nfs4_secinfo(dir, name, err); 6620 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6621 &exception); 6622 } while (exception.retry); 6623 return err; 6624} 6625 6626#ifdef CONFIG_NFS_V4_1 6627/* 6628 * Check the exchange flags returned by the server for invalid flags, having 6629 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 6630 * DS flags set. 6631 */ 6632static int nfs4_check_cl_exchange_flags(u32 flags) 6633{ 6634 if (flags & ~EXCHGID4_FLAG_MASK_R) 6635 goto out_inval; 6636 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 6637 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 6638 goto out_inval; 6639 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 6640 goto out_inval; 6641 return NFS_OK; 6642out_inval: 6643 return -NFS4ERR_INVAL; 6644} 6645 6646static bool 6647nfs41_same_server_scope(struct nfs41_server_scope *a, 6648 struct nfs41_server_scope *b) 6649{ 6650 if (a->server_scope_sz == b->server_scope_sz && 6651 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 6652 return true; 6653 6654 return false; 6655} 6656 6657/* 6658 * nfs4_proc_bind_conn_to_session() 6659 * 6660 * The 4.1 client currently uses the same TCP connection for the 6661 * fore and backchannel. 6662 */ 6663int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 6664{ 6665 int status; 6666 struct nfs41_bind_conn_to_session_args args = { 6667 .client = clp, 6668 .dir = NFS4_CDFC4_FORE_OR_BOTH, 6669 }; 6670 struct nfs41_bind_conn_to_session_res res; 6671 struct rpc_message msg = { 6672 .rpc_proc = 6673 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6674 .rpc_argp = &args, 6675 .rpc_resp = &res, 6676 .rpc_cred = cred, 6677 }; 6678 6679 dprintk("--> %s\n", __func__); 6680 6681 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 6682 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 6683 args.dir = NFS4_CDFC4_FORE; 6684 6685 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6686 trace_nfs4_bind_conn_to_session(clp, status); 6687 if (status == 0) { 6688 if (memcmp(res.sessionid.data, 6689 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 6690 dprintk("NFS: %s: Session ID mismatch\n", __func__); 6691 status = -EIO; 6692 goto out; 6693 } 6694 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 6695 dprintk("NFS: %s: Unexpected direction from server\n", 6696 __func__); 6697 status = -EIO; 6698 goto out; 6699 } 6700 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 6701 dprintk("NFS: %s: Server returned RDMA mode = true\n", 6702 __func__); 6703 status = -EIO; 6704 goto out; 6705 } 6706 } 6707out: 6708 dprintk("<-- %s status= %d\n", __func__, status); 6709 return status; 6710} 6711 6712/* 6713 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 6714 * and operations we'd like to see to enable certain features in the allow map 6715 */ 6716static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 6717 .how = SP4_MACH_CRED, 6718 .enforce.u.words = { 6719 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6720 1 << (OP_EXCHANGE_ID - 32) | 6721 1 << (OP_CREATE_SESSION - 32) | 6722 1 << (OP_DESTROY_SESSION - 32) | 6723 1 << (OP_DESTROY_CLIENTID - 32) 6724 }, 6725 .allow.u.words = { 6726 [0] = 1 << (OP_CLOSE) | 6727 1 << (OP_LOCKU) | 6728 1 << (OP_COMMIT), 6729 [1] = 1 << (OP_SECINFO - 32) | 6730 1 << (OP_SECINFO_NO_NAME - 32) | 6731 1 << (OP_TEST_STATEID - 32) | 6732 1 << (OP_FREE_STATEID - 32) | 6733 1 << (OP_WRITE - 32) 6734 } 6735}; 6736 6737/* 6738 * Select the state protection mode for client `clp' given the server results 6739 * from exchange_id in `sp'. 6740 * 6741 * Returns 0 on success, negative errno otherwise. 6742 */ 6743static int nfs4_sp4_select_mode(struct nfs_client *clp, 6744 struct nfs41_state_protection *sp) 6745{ 6746 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 6747 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6748 1 << (OP_EXCHANGE_ID - 32) | 6749 1 << (OP_CREATE_SESSION - 32) | 6750 1 << (OP_DESTROY_SESSION - 32) | 6751 1 << (OP_DESTROY_CLIENTID - 32) 6752 }; 6753 unsigned int i; 6754 6755 if (sp->how == SP4_MACH_CRED) { 6756 /* Print state protect result */ 6757 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 6758 for (i = 0; i <= LAST_NFS4_OP; i++) { 6759 if (test_bit(i, sp->enforce.u.longs)) 6760 dfprintk(MOUNT, " enforce op %d\n", i); 6761 if (test_bit(i, sp->allow.u.longs)) 6762 dfprintk(MOUNT, " allow op %d\n", i); 6763 } 6764 6765 /* make sure nothing is on enforce list that isn't supported */ 6766 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 6767 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 6768 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6769 return -EINVAL; 6770 } 6771 } 6772 6773 /* 6774 * Minimal mode - state operations are allowed to use machine 6775 * credential. Note this already happens by default, so the 6776 * client doesn't have to do anything more than the negotiation. 6777 * 6778 * NOTE: we don't care if EXCHANGE_ID is in the list - 6779 * we're already using the machine cred for exchange_id 6780 * and will never use a different cred. 6781 */ 6782 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 6783 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 6784 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 6785 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 6786 dfprintk(MOUNT, "sp4_mach_cred:\n"); 6787 dfprintk(MOUNT, " minimal mode enabled\n"); 6788 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags); 6789 } else { 6790 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6791 return -EINVAL; 6792 } 6793 6794 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 6795 test_bit(OP_LOCKU, sp->allow.u.longs)) { 6796 dfprintk(MOUNT, " cleanup mode enabled\n"); 6797 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags); 6798 } 6799 6800 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 6801 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 6802 dfprintk(MOUNT, " secinfo mode enabled\n"); 6803 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags); 6804 } 6805 6806 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 6807 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 6808 dfprintk(MOUNT, " stateid mode enabled\n"); 6809 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags); 6810 } 6811 6812 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 6813 dfprintk(MOUNT, " write mode enabled\n"); 6814 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags); 6815 } 6816 6817 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 6818 dfprintk(MOUNT, " commit mode enabled\n"); 6819 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags); 6820 } 6821 } 6822 6823 return 0; 6824} 6825 6826/* 6827 * _nfs4_proc_exchange_id() 6828 * 6829 * Wrapper for EXCHANGE_ID operation. 6830 */ 6831static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 6832 u32 sp4_how) 6833{ 6834 nfs4_verifier verifier; 6835 struct nfs41_exchange_id_args args = { 6836 .verifier = &verifier, 6837 .client = clp, 6838#ifdef CONFIG_NFS_V4_1_MIGRATION 6839 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 6840 EXCHGID4_FLAG_BIND_PRINC_STATEID | 6841 EXCHGID4_FLAG_SUPP_MOVED_MIGR, 6842#else 6843 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 6844 EXCHGID4_FLAG_BIND_PRINC_STATEID, 6845#endif 6846 }; 6847 struct nfs41_exchange_id_res res = { 6848 0 6849 }; 6850 int status; 6851 struct rpc_message msg = { 6852 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 6853 .rpc_argp = &args, 6854 .rpc_resp = &res, 6855 .rpc_cred = cred, 6856 }; 6857 6858 nfs4_init_boot_verifier(clp, &verifier); 6859 args.id_len = nfs4_init_uniform_client_string(clp, args.id, 6860 sizeof(args.id)); 6861 dprintk("NFS call exchange_id auth=%s, '%.*s'\n", 6862 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6863 args.id_len, args.id); 6864 6865 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 6866 GFP_NOFS); 6867 if (unlikely(res.server_owner == NULL)) { 6868 status = -ENOMEM; 6869 goto out; 6870 } 6871 6872 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 6873 GFP_NOFS); 6874 if (unlikely(res.server_scope == NULL)) { 6875 status = -ENOMEM; 6876 goto out_server_owner; 6877 } 6878 6879 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 6880 if (unlikely(res.impl_id == NULL)) { 6881 status = -ENOMEM; 6882 goto out_server_scope; 6883 } 6884 6885 switch (sp4_how) { 6886 case SP4_NONE: 6887 args.state_protect.how = SP4_NONE; 6888 break; 6889 6890 case SP4_MACH_CRED: 6891 args.state_protect = nfs4_sp4_mach_cred_request; 6892 break; 6893 6894 default: 6895 /* unsupported! */ 6896 WARN_ON_ONCE(1); 6897 status = -EINVAL; 6898 goto out_server_scope; 6899 } 6900 6901 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6902 trace_nfs4_exchange_id(clp, status); 6903 if (status == 0) 6904 status = nfs4_check_cl_exchange_flags(res.flags); 6905 6906 if (status == 0) 6907 status = nfs4_sp4_select_mode(clp, &res.state_protect); 6908 6909 if (status == 0) { 6910 clp->cl_clientid = res.clientid; 6911 clp->cl_exchange_flags = res.flags; 6912 /* Client ID is not confirmed */ 6913 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 6914 clear_bit(NFS4_SESSION_ESTABLISHED, 6915 &clp->cl_session->session_state); 6916 clp->cl_seqid = res.seqid; 6917 } 6918 6919 kfree(clp->cl_serverowner); 6920 clp->cl_serverowner = res.server_owner; 6921 res.server_owner = NULL; 6922 6923 /* use the most recent implementation id */ 6924 kfree(clp->cl_implid); 6925 clp->cl_implid = res.impl_id; 6926 6927 if (clp->cl_serverscope != NULL && 6928 !nfs41_same_server_scope(clp->cl_serverscope, 6929 res.server_scope)) { 6930 dprintk("%s: server_scope mismatch detected\n", 6931 __func__); 6932 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 6933 kfree(clp->cl_serverscope); 6934 clp->cl_serverscope = NULL; 6935 } 6936 6937 if (clp->cl_serverscope == NULL) { 6938 clp->cl_serverscope = res.server_scope; 6939 goto out; 6940 } 6941 } else 6942 kfree(res.impl_id); 6943 6944out_server_owner: 6945 kfree(res.server_owner); 6946out_server_scope: 6947 kfree(res.server_scope); 6948out: 6949 if (clp->cl_implid != NULL) 6950 dprintk("NFS reply exchange_id: Server Implementation ID: " 6951 "domain: %s, name: %s, date: %llu,%u\n", 6952 clp->cl_implid->domain, clp->cl_implid->name, 6953 clp->cl_implid->date.seconds, 6954 clp->cl_implid->date.nseconds); 6955 dprintk("NFS reply exchange_id: %d\n", status); 6956 return status; 6957} 6958 6959/* 6960 * nfs4_proc_exchange_id() 6961 * 6962 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6963 * 6964 * Since the clientid has expired, all compounds using sessions 6965 * associated with the stale clientid will be returning 6966 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 6967 * be in some phase of session reset. 6968 * 6969 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 6970 */ 6971int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 6972{ 6973 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 6974 int status; 6975 6976 /* try SP4_MACH_CRED if krb5i/p */ 6977 if (authflavor == RPC_AUTH_GSS_KRB5I || 6978 authflavor == RPC_AUTH_GSS_KRB5P) { 6979 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 6980 if (!status) 6981 return 0; 6982 } 6983 6984 /* try SP4_NONE */ 6985 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 6986} 6987 6988static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 6989 struct rpc_cred *cred) 6990{ 6991 struct rpc_message msg = { 6992 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 6993 .rpc_argp = clp, 6994 .rpc_cred = cred, 6995 }; 6996 int status; 6997 6998 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6999 trace_nfs4_destroy_clientid(clp, status); 7000 if (status) 7001 dprintk("NFS: Got error %d from the server %s on " 7002 "DESTROY_CLIENTID.", status, clp->cl_hostname); 7003 return status; 7004} 7005 7006static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 7007 struct rpc_cred *cred) 7008{ 7009 unsigned int loop; 7010 int ret; 7011 7012 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 7013 ret = _nfs4_proc_destroy_clientid(clp, cred); 7014 switch (ret) { 7015 case -NFS4ERR_DELAY: 7016 case -NFS4ERR_CLIENTID_BUSY: 7017 ssleep(1); 7018 break; 7019 default: 7020 return ret; 7021 } 7022 } 7023 return 0; 7024} 7025 7026int nfs4_destroy_clientid(struct nfs_client *clp) 7027{ 7028 struct rpc_cred *cred; 7029 int ret = 0; 7030 7031 if (clp->cl_mvops->minor_version < 1) 7032 goto out; 7033 if (clp->cl_exchange_flags == 0) 7034 goto out; 7035 if (clp->cl_preserve_clid) 7036 goto out; 7037 cred = nfs4_get_clid_cred(clp); 7038 ret = nfs4_proc_destroy_clientid(clp, cred); 7039 if (cred) 7040 put_rpccred(cred); 7041 switch (ret) { 7042 case 0: 7043 case -NFS4ERR_STALE_CLIENTID: 7044 clp->cl_exchange_flags = 0; 7045 } 7046out: 7047 return ret; 7048} 7049 7050struct nfs4_get_lease_time_data { 7051 struct nfs4_get_lease_time_args *args; 7052 struct nfs4_get_lease_time_res *res; 7053 struct nfs_client *clp; 7054}; 7055 7056static void nfs4_get_lease_time_prepare(struct rpc_task *task, 7057 void *calldata) 7058{ 7059 struct nfs4_get_lease_time_data *data = 7060 (struct nfs4_get_lease_time_data *)calldata; 7061 7062 dprintk("--> %s\n", __func__); 7063 /* just setup sequence, do not trigger session recovery 7064 since we're invoked within one */ 7065 nfs41_setup_sequence(data->clp->cl_session, 7066 &data->args->la_seq_args, 7067 &data->res->lr_seq_res, 7068 task); 7069 dprintk("<-- %s\n", __func__); 7070} 7071 7072/* 7073 * Called from nfs4_state_manager thread for session setup, so don't recover 7074 * from sequence operation or clientid errors. 7075 */ 7076static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 7077{ 7078 struct nfs4_get_lease_time_data *data = 7079 (struct nfs4_get_lease_time_data *)calldata; 7080 7081 dprintk("--> %s\n", __func__); 7082 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 7083 return; 7084 switch (task->tk_status) { 7085 case -NFS4ERR_DELAY: 7086 case -NFS4ERR_GRACE: 7087 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 7088 rpc_delay(task, NFS4_POLL_RETRY_MIN); 7089 task->tk_status = 0; 7090 /* fall through */ 7091 case -NFS4ERR_RETRY_UNCACHED_REP: 7092 rpc_restart_call_prepare(task); 7093 return; 7094 } 7095 dprintk("<-- %s\n", __func__); 7096} 7097 7098static const struct rpc_call_ops nfs4_get_lease_time_ops = { 7099 .rpc_call_prepare = nfs4_get_lease_time_prepare, 7100 .rpc_call_done = nfs4_get_lease_time_done, 7101}; 7102 7103int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 7104{ 7105 struct rpc_task *task; 7106 struct nfs4_get_lease_time_args args; 7107 struct nfs4_get_lease_time_res res = { 7108 .lr_fsinfo = fsinfo, 7109 }; 7110 struct nfs4_get_lease_time_data data = { 7111 .args = &args, 7112 .res = &res, 7113 .clp = clp, 7114 }; 7115 struct rpc_message msg = { 7116 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 7117 .rpc_argp = &args, 7118 .rpc_resp = &res, 7119 }; 7120 struct rpc_task_setup task_setup = { 7121 .rpc_client = clp->cl_rpcclient, 7122 .rpc_message = &msg, 7123 .callback_ops = &nfs4_get_lease_time_ops, 7124 .callback_data = &data, 7125 .flags = RPC_TASK_TIMEOUT, 7126 }; 7127 int status; 7128 7129 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 7130 nfs4_set_sequence_privileged(&args.la_seq_args); 7131 dprintk("--> %s\n", __func__); 7132 task = rpc_run_task(&task_setup); 7133 7134 if (IS_ERR(task)) 7135 status = PTR_ERR(task); 7136 else { 7137 status = task->tk_status; 7138 rpc_put_task(task); 7139 } 7140 dprintk("<-- %s return %d\n", __func__, status); 7141 7142 return status; 7143} 7144 7145/* 7146 * Initialize the values to be used by the client in CREATE_SESSION 7147 * If nfs4_init_session set the fore channel request and response sizes, 7148 * use them. 7149 * 7150 * Set the back channel max_resp_sz_cached to zero to force the client to 7151 * always set csa_cachethis to FALSE because the current implementation 7152 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 7153 */ 7154static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 7155{ 7156 unsigned int max_rqst_sz, max_resp_sz; 7157 7158 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 7159 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 7160 7161 /* Fore channel attributes */ 7162 args->fc_attrs.max_rqst_sz = max_rqst_sz; 7163 args->fc_attrs.max_resp_sz = max_resp_sz; 7164 args->fc_attrs.max_ops = NFS4_MAX_OPS; 7165 args->fc_attrs.max_reqs = max_session_slots; 7166 7167 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 7168 "max_ops=%u max_reqs=%u\n", 7169 __func__, 7170 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 7171 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 7172 7173 /* Back channel attributes */ 7174 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 7175 args->bc_attrs.max_resp_sz = PAGE_SIZE; 7176 args->bc_attrs.max_resp_sz_cached = 0; 7177 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7178 args->bc_attrs.max_reqs = 1; 7179 7180 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7181 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 7182 __func__, 7183 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 7184 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 7185 args->bc_attrs.max_reqs); 7186} 7187 7188static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 7189 struct nfs41_create_session_res *res) 7190{ 7191 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7192 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 7193 7194 if (rcvd->max_resp_sz > sent->max_resp_sz) 7195 return -EINVAL; 7196 /* 7197 * Our requested max_ops is the minimum we need; we're not 7198 * prepared to break up compounds into smaller pieces than that. 7199 * So, no point even trying to continue if the server won't 7200 * cooperate: 7201 */ 7202 if (rcvd->max_ops < sent->max_ops) 7203 return -EINVAL; 7204 if (rcvd->max_reqs == 0) 7205 return -EINVAL; 7206 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 7207 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 7208 return 0; 7209} 7210 7211static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 7212 struct nfs41_create_session_res *res) 7213{ 7214 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7215 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 7216 7217 if (!(res->flags & SESSION4_BACK_CHAN)) 7218 goto out; 7219 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7220 return -EINVAL; 7221 if (rcvd->max_resp_sz < sent->max_resp_sz) 7222 return -EINVAL; 7223 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7224 return -EINVAL; 7225 /* These would render the backchannel useless: */ 7226 if (rcvd->max_ops != sent->max_ops) 7227 return -EINVAL; 7228 if (rcvd->max_reqs != sent->max_reqs) 7229 return -EINVAL; 7230out: 7231 return 0; 7232} 7233 7234static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7235 struct nfs41_create_session_res *res) 7236{ 7237 int ret; 7238 7239 ret = nfs4_verify_fore_channel_attrs(args, res); 7240 if (ret) 7241 return ret; 7242 return nfs4_verify_back_channel_attrs(args, res); 7243} 7244 7245static void nfs4_update_session(struct nfs4_session *session, 7246 struct nfs41_create_session_res *res) 7247{ 7248 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7249 /* Mark client id and session as being confirmed */ 7250 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 7251 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 7252 session->flags = res->flags; 7253 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7254 if (res->flags & SESSION4_BACK_CHAN) 7255 memcpy(&session->bc_attrs, &res->bc_attrs, 7256 sizeof(session->bc_attrs)); 7257} 7258 7259static int _nfs4_proc_create_session(struct nfs_client *clp, 7260 struct rpc_cred *cred) 7261{ 7262 struct nfs4_session *session = clp->cl_session; 7263 struct nfs41_create_session_args args = { 7264 .client = clp, 7265 .clientid = clp->cl_clientid, 7266 .seqid = clp->cl_seqid, 7267 .cb_program = NFS4_CALLBACK, 7268 }; 7269 struct nfs41_create_session_res res; 7270 7271 struct rpc_message msg = { 7272 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7273 .rpc_argp = &args, 7274 .rpc_resp = &res, 7275 .rpc_cred = cred, 7276 }; 7277 int status; 7278 7279 nfs4_init_channel_attrs(&args); 7280 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 7281 7282 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7283 trace_nfs4_create_session(clp, status); 7284 7285 if (!status) { 7286 /* Verify the session's negotiated channel_attrs values */ 7287 status = nfs4_verify_channel_attrs(&args, &res); 7288 /* Increment the clientid slot sequence id */ 7289 if (clp->cl_seqid == res.seqid) 7290 clp->cl_seqid++; 7291 if (status) 7292 goto out; 7293 nfs4_update_session(session, &res); 7294 } 7295out: 7296 return status; 7297} 7298 7299/* 7300 * Issues a CREATE_SESSION operation to the server. 7301 * It is the responsibility of the caller to verify the session is 7302 * expired before calling this routine. 7303 */ 7304int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 7305{ 7306 int status; 7307 unsigned *ptr; 7308 struct nfs4_session *session = clp->cl_session; 7309 7310 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 7311 7312 status = _nfs4_proc_create_session(clp, cred); 7313 if (status) 7314 goto out; 7315 7316 /* Init or reset the session slot tables */ 7317 status = nfs4_setup_session_slot_tables(session); 7318 dprintk("slot table setup returned %d\n", status); 7319 if (status) 7320 goto out; 7321 7322 ptr = (unsigned *)&session->sess_id.data[0]; 7323 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 7324 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 7325out: 7326 dprintk("<-- %s\n", __func__); 7327 return status; 7328} 7329 7330/* 7331 * Issue the over-the-wire RPC DESTROY_SESSION. 7332 * The caller must serialize access to this routine. 7333 */ 7334int nfs4_proc_destroy_session(struct nfs4_session *session, 7335 struct rpc_cred *cred) 7336{ 7337 struct rpc_message msg = { 7338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 7339 .rpc_argp = session, 7340 .rpc_cred = cred, 7341 }; 7342 int status = 0; 7343 7344 dprintk("--> nfs4_proc_destroy_session\n"); 7345 7346 /* session is still being setup */ 7347 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 7348 return 0; 7349 7350 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7351 trace_nfs4_destroy_session(session->clp, status); 7352 7353 if (status) 7354 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 7355 "Session has been destroyed regardless...\n", status); 7356 7357 dprintk("<-- nfs4_proc_destroy_session\n"); 7358 return status; 7359} 7360 7361/* 7362 * Renew the cl_session lease. 7363 */ 7364struct nfs4_sequence_data { 7365 struct nfs_client *clp; 7366 struct nfs4_sequence_args args; 7367 struct nfs4_sequence_res res; 7368}; 7369 7370static void nfs41_sequence_release(void *data) 7371{ 7372 struct nfs4_sequence_data *calldata = data; 7373 struct nfs_client *clp = calldata->clp; 7374 7375 if (atomic_read(&clp->cl_count) > 1) 7376 nfs4_schedule_state_renewal(clp); 7377 nfs_put_client(clp); 7378 kfree(calldata); 7379} 7380 7381static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7382{ 7383 switch(task->tk_status) { 7384 case -NFS4ERR_DELAY: 7385 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7386 return -EAGAIN; 7387 default: 7388 nfs4_schedule_lease_recovery(clp); 7389 } 7390 return 0; 7391} 7392 7393static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 7394{ 7395 struct nfs4_sequence_data *calldata = data; 7396 struct nfs_client *clp = calldata->clp; 7397 7398 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 7399 return; 7400 7401 trace_nfs4_sequence(clp, task->tk_status); 7402 if (task->tk_status < 0) { 7403 dprintk("%s ERROR %d\n", __func__, task->tk_status); 7404 if (atomic_read(&clp->cl_count) == 1) 7405 goto out; 7406 7407 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 7408 rpc_restart_call_prepare(task); 7409 return; 7410 } 7411 } 7412 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 7413out: 7414 dprintk("<-- %s\n", __func__); 7415} 7416 7417static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 7418{ 7419 struct nfs4_sequence_data *calldata = data; 7420 struct nfs_client *clp = calldata->clp; 7421 struct nfs4_sequence_args *args; 7422 struct nfs4_sequence_res *res; 7423 7424 args = task->tk_msg.rpc_argp; 7425 res = task->tk_msg.rpc_resp; 7426 7427 nfs41_setup_sequence(clp->cl_session, args, res, task); 7428} 7429 7430static const struct rpc_call_ops nfs41_sequence_ops = { 7431 .rpc_call_done = nfs41_sequence_call_done, 7432 .rpc_call_prepare = nfs41_sequence_prepare, 7433 .rpc_release = nfs41_sequence_release, 7434}; 7435 7436static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 7437 struct rpc_cred *cred, 7438 bool is_privileged) 7439{ 7440 struct nfs4_sequence_data *calldata; 7441 struct rpc_message msg = { 7442 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 7443 .rpc_cred = cred, 7444 }; 7445 struct rpc_task_setup task_setup_data = { 7446 .rpc_client = clp->cl_rpcclient, 7447 .rpc_message = &msg, 7448 .callback_ops = &nfs41_sequence_ops, 7449 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7450 }; 7451 7452 if (!atomic_inc_not_zero(&clp->cl_count)) 7453 return ERR_PTR(-EIO); 7454 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7455 if (calldata == NULL) { 7456 nfs_put_client(clp); 7457 return ERR_PTR(-ENOMEM); 7458 } 7459 nfs4_init_sequence(&calldata->args, &calldata->res, 0); 7460 if (is_privileged) 7461 nfs4_set_sequence_privileged(&calldata->args); 7462 msg.rpc_argp = &calldata->args; 7463 msg.rpc_resp = &calldata->res; 7464 calldata->clp = clp; 7465 task_setup_data.callback_data = calldata; 7466 7467 return rpc_run_task(&task_setup_data); 7468} 7469 7470static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 7471{ 7472 struct rpc_task *task; 7473 int ret = 0; 7474 7475 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7476 return -EAGAIN; 7477 task = _nfs41_proc_sequence(clp, cred, false); 7478 if (IS_ERR(task)) 7479 ret = PTR_ERR(task); 7480 else 7481 rpc_put_task_async(task); 7482 dprintk("<-- %s status=%d\n", __func__, ret); 7483 return ret; 7484} 7485 7486static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 7487{ 7488 struct rpc_task *task; 7489 int ret; 7490 7491 task = _nfs41_proc_sequence(clp, cred, true); 7492 if (IS_ERR(task)) { 7493 ret = PTR_ERR(task); 7494 goto out; 7495 } 7496 ret = rpc_wait_for_completion_task(task); 7497 if (!ret) { 7498 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; 7499 7500 if (task->tk_status == 0) 7501 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 7502 ret = task->tk_status; 7503 } 7504 rpc_put_task(task); 7505out: 7506 dprintk("<-- %s status=%d\n", __func__, ret); 7507 return ret; 7508} 7509 7510struct nfs4_reclaim_complete_data { 7511 struct nfs_client *clp; 7512 struct nfs41_reclaim_complete_args arg; 7513 struct nfs41_reclaim_complete_res res; 7514}; 7515 7516static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 7517{ 7518 struct nfs4_reclaim_complete_data *calldata = data; 7519 7520 nfs41_setup_sequence(calldata->clp->cl_session, 7521 &calldata->arg.seq_args, 7522 &calldata->res.seq_res, 7523 task); 7524} 7525 7526static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7527{ 7528 switch(task->tk_status) { 7529 case 0: 7530 case -NFS4ERR_COMPLETE_ALREADY: 7531 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 7532 break; 7533 case -NFS4ERR_DELAY: 7534 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7535 /* fall through */ 7536 case -NFS4ERR_RETRY_UNCACHED_REP: 7537 return -EAGAIN; 7538 default: 7539 nfs4_schedule_lease_recovery(clp); 7540 } 7541 return 0; 7542} 7543 7544static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 7545{ 7546 struct nfs4_reclaim_complete_data *calldata = data; 7547 struct nfs_client *clp = calldata->clp; 7548 struct nfs4_sequence_res *res = &calldata->res.seq_res; 7549 7550 dprintk("--> %s\n", __func__); 7551 if (!nfs41_sequence_done(task, res)) 7552 return; 7553 7554 trace_nfs4_reclaim_complete(clp, task->tk_status); 7555 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 7556 rpc_restart_call_prepare(task); 7557 return; 7558 } 7559 dprintk("<-- %s\n", __func__); 7560} 7561 7562static void nfs4_free_reclaim_complete_data(void *data) 7563{ 7564 struct nfs4_reclaim_complete_data *calldata = data; 7565 7566 kfree(calldata); 7567} 7568 7569static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 7570 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 7571 .rpc_call_done = nfs4_reclaim_complete_done, 7572 .rpc_release = nfs4_free_reclaim_complete_data, 7573}; 7574 7575/* 7576 * Issue a global reclaim complete. 7577 */ 7578static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 7579 struct rpc_cred *cred) 7580{ 7581 struct nfs4_reclaim_complete_data *calldata; 7582 struct rpc_task *task; 7583 struct rpc_message msg = { 7584 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 7585 .rpc_cred = cred, 7586 }; 7587 struct rpc_task_setup task_setup_data = { 7588 .rpc_client = clp->cl_rpcclient, 7589 .rpc_message = &msg, 7590 .callback_ops = &nfs4_reclaim_complete_call_ops, 7591 .flags = RPC_TASK_ASYNC, 7592 }; 7593 int status = -ENOMEM; 7594 7595 dprintk("--> %s\n", __func__); 7596 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7597 if (calldata == NULL) 7598 goto out; 7599 calldata->clp = clp; 7600 calldata->arg.one_fs = 0; 7601 7602 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 7603 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 7604 msg.rpc_argp = &calldata->arg; 7605 msg.rpc_resp = &calldata->res; 7606 task_setup_data.callback_data = calldata; 7607 task = rpc_run_task(&task_setup_data); 7608 if (IS_ERR(task)) { 7609 status = PTR_ERR(task); 7610 goto out; 7611 } 7612 status = nfs4_wait_for_completion_rpc_task(task); 7613 if (status == 0) 7614 status = task->tk_status; 7615 rpc_put_task(task); 7616 return 0; 7617out: 7618 dprintk("<-- %s status=%d\n", __func__, status); 7619 return status; 7620} 7621 7622static void 7623nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 7624{ 7625 struct nfs4_layoutget *lgp = calldata; 7626 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 7627 struct nfs4_session *session = nfs4_get_session(server); 7628 7629 dprintk("--> %s\n", __func__); 7630 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 7631 * right now covering the LAYOUTGET we are about to send. 7632 * However, that is not so catastrophic, and there seems 7633 * to be no way to prevent it completely. 7634 */ 7635 if (nfs41_setup_sequence(session, &lgp->args.seq_args, 7636 &lgp->res.seq_res, task)) 7637 return; 7638 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 7639 NFS_I(lgp->args.inode)->layout, 7640 &lgp->args.range, 7641 lgp->args.ctx->state)) { 7642 rpc_exit(task, NFS4_OK); 7643 } 7644} 7645 7646static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 7647{ 7648 struct nfs4_layoutget *lgp = calldata; 7649 struct inode *inode = lgp->args.inode; 7650 struct nfs_server *server = NFS_SERVER(inode); 7651 struct pnfs_layout_hdr *lo; 7652 struct nfs4_state *state = NULL; 7653 unsigned long timeo, now, giveup; 7654 7655 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 7656 7657 if (!nfs41_sequence_done(task, &lgp->res.seq_res)) 7658 goto out; 7659 7660 switch (task->tk_status) { 7661 case 0: 7662 goto out; 7663 /* 7664 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 7665 * (or clients) writing to the same RAID stripe 7666 */ 7667 case -NFS4ERR_LAYOUTTRYLATER: 7668 /* 7669 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall 7670 * existing layout before getting a new one). 7671 */ 7672 case -NFS4ERR_RECALLCONFLICT: 7673 timeo = rpc_get_timeout(task->tk_client); 7674 giveup = lgp->args.timestamp + timeo; 7675 now = jiffies; 7676 if (time_after(giveup, now)) { 7677 unsigned long delay; 7678 7679 /* Delay for: 7680 * - Not less then NFS4_POLL_RETRY_MIN. 7681 * - One last time a jiffie before we give up 7682 * - exponential backoff (time_now minus start_attempt) 7683 */ 7684 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN, 7685 min((giveup - now - 1), 7686 now - lgp->args.timestamp)); 7687 7688 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", 7689 __func__, delay); 7690 rpc_delay(task, delay); 7691 task->tk_status = 0; 7692 rpc_restart_call_prepare(task); 7693 goto out; /* Do not call nfs4_async_handle_error() */ 7694 } 7695 break; 7696 case -NFS4ERR_EXPIRED: 7697 case -NFS4ERR_BAD_STATEID: 7698 spin_lock(&inode->i_lock); 7699 lo = NFS_I(inode)->layout; 7700 if (!lo || list_empty(&lo->plh_segs)) { 7701 spin_unlock(&inode->i_lock); 7702 /* If the open stateid was bad, then recover it. */ 7703 state = lgp->args.ctx->state; 7704 } else { 7705 LIST_HEAD(head); 7706 7707 /* 7708 * Mark the bad layout state as invalid, then retry 7709 * with the current stateid. 7710 */ 7711 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 7712 spin_unlock(&inode->i_lock); 7713 pnfs_free_lseg_list(&head); 7714 7715 task->tk_status = 0; 7716 rpc_restart_call_prepare(task); 7717 } 7718 } 7719 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) 7720 rpc_restart_call_prepare(task); 7721out: 7722 dprintk("<-- %s\n", __func__); 7723} 7724 7725static size_t max_response_pages(struct nfs_server *server) 7726{ 7727 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 7728 return nfs_page_array_len(0, max_resp_sz); 7729} 7730 7731static void nfs4_free_pages(struct page **pages, size_t size) 7732{ 7733 int i; 7734 7735 if (!pages) 7736 return; 7737 7738 for (i = 0; i < size; i++) { 7739 if (!pages[i]) 7740 break; 7741 __free_page(pages[i]); 7742 } 7743 kfree(pages); 7744} 7745 7746static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 7747{ 7748 struct page **pages; 7749 int i; 7750 7751 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 7752 if (!pages) { 7753 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 7754 return NULL; 7755 } 7756 7757 for (i = 0; i < size; i++) { 7758 pages[i] = alloc_page(gfp_flags); 7759 if (!pages[i]) { 7760 dprintk("%s: failed to allocate page\n", __func__); 7761 nfs4_free_pages(pages, size); 7762 return NULL; 7763 } 7764 } 7765 7766 return pages; 7767} 7768 7769static void nfs4_layoutget_release(void *calldata) 7770{ 7771 struct nfs4_layoutget *lgp = calldata; 7772 struct inode *inode = lgp->args.inode; 7773 struct nfs_server *server = NFS_SERVER(inode); 7774 size_t max_pages = max_response_pages(server); 7775 7776 dprintk("--> %s\n", __func__); 7777 nfs4_free_pages(lgp->args.layout.pages, max_pages); 7778 pnfs_put_layout_hdr(NFS_I(inode)->layout); 7779 put_nfs_open_context(lgp->args.ctx); 7780 kfree(calldata); 7781 dprintk("<-- %s\n", __func__); 7782} 7783 7784static const struct rpc_call_ops nfs4_layoutget_call_ops = { 7785 .rpc_call_prepare = nfs4_layoutget_prepare, 7786 .rpc_call_done = nfs4_layoutget_done, 7787 .rpc_release = nfs4_layoutget_release, 7788}; 7789 7790struct pnfs_layout_segment * 7791nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 7792{ 7793 struct inode *inode = lgp->args.inode; 7794 struct nfs_server *server = NFS_SERVER(inode); 7795 size_t max_pages = max_response_pages(server); 7796 struct rpc_task *task; 7797 struct rpc_message msg = { 7798 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 7799 .rpc_argp = &lgp->args, 7800 .rpc_resp = &lgp->res, 7801 .rpc_cred = lgp->cred, 7802 }; 7803 struct rpc_task_setup task_setup_data = { 7804 .rpc_client = server->client, 7805 .rpc_message = &msg, 7806 .callback_ops = &nfs4_layoutget_call_ops, 7807 .callback_data = lgp, 7808 .flags = RPC_TASK_ASYNC, 7809 }; 7810 struct pnfs_layout_segment *lseg = NULL; 7811 int status = 0; 7812 7813 dprintk("--> %s\n", __func__); 7814 7815 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 7816 pnfs_get_layout_hdr(NFS_I(inode)->layout); 7817 7818 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 7819 if (!lgp->args.layout.pages) { 7820 nfs4_layoutget_release(lgp); 7821 return ERR_PTR(-ENOMEM); 7822 } 7823 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 7824 lgp->args.timestamp = jiffies; 7825 7826 lgp->res.layoutp = &lgp->args.layout; 7827 lgp->res.seq_res.sr_slot = NULL; 7828 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 7829 7830 task = rpc_run_task(&task_setup_data); 7831 if (IS_ERR(task)) 7832 return ERR_CAST(task); 7833 status = nfs4_wait_for_completion_rpc_task(task); 7834 if (status == 0) 7835 status = task->tk_status; 7836 trace_nfs4_layoutget(lgp->args.ctx, 7837 &lgp->args.range, 7838 &lgp->res.range, 7839 status); 7840 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 7841 if (status == 0 && lgp->res.layoutp->len) 7842 lseg = pnfs_layout_process(lgp); 7843 rpc_put_task(task); 7844 dprintk("<-- %s status=%d\n", __func__, status); 7845 if (status) 7846 return ERR_PTR(status); 7847 return lseg; 7848} 7849 7850static void 7851nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 7852{ 7853 struct nfs4_layoutreturn *lrp = calldata; 7854 7855 dprintk("--> %s\n", __func__); 7856 nfs41_setup_sequence(lrp->clp->cl_session, 7857 &lrp->args.seq_args, 7858 &lrp->res.seq_res, 7859 task); 7860} 7861 7862static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 7863{ 7864 struct nfs4_layoutreturn *lrp = calldata; 7865 struct nfs_server *server; 7866 7867 dprintk("--> %s\n", __func__); 7868 7869 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 7870 return; 7871 7872 server = NFS_SERVER(lrp->args.inode); 7873 switch (task->tk_status) { 7874 default: 7875 task->tk_status = 0; 7876 case 0: 7877 break; 7878 case -NFS4ERR_DELAY: 7879 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 7880 break; 7881 rpc_restart_call_prepare(task); 7882 return; 7883 } 7884 dprintk("<-- %s\n", __func__); 7885} 7886 7887static void nfs4_layoutreturn_release(void *calldata) 7888{ 7889 struct nfs4_layoutreturn *lrp = calldata; 7890 struct pnfs_layout_hdr *lo = lrp->args.layout; 7891 7892 dprintk("--> %s\n", __func__); 7893 spin_lock(&lo->plh_inode->i_lock); 7894 if (lrp->res.lrs_present) 7895 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 7896 pnfs_clear_layoutreturn_waitbit(lo); 7897 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); 7898 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); 7899 lo->plh_block_lgets--; 7900 spin_unlock(&lo->plh_inode->i_lock); 7901 pnfs_put_layout_hdr(lrp->args.layout); 7902 nfs_iput_and_deactive(lrp->inode); 7903 kfree(calldata); 7904 dprintk("<-- %s\n", __func__); 7905} 7906 7907static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 7908 .rpc_call_prepare = nfs4_layoutreturn_prepare, 7909 .rpc_call_done = nfs4_layoutreturn_done, 7910 .rpc_release = nfs4_layoutreturn_release, 7911}; 7912 7913int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 7914{ 7915 struct rpc_task *task; 7916 struct rpc_message msg = { 7917 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 7918 .rpc_argp = &lrp->args, 7919 .rpc_resp = &lrp->res, 7920 .rpc_cred = lrp->cred, 7921 }; 7922 struct rpc_task_setup task_setup_data = { 7923 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 7924 .rpc_message = &msg, 7925 .callback_ops = &nfs4_layoutreturn_call_ops, 7926 .callback_data = lrp, 7927 }; 7928 int status = 0; 7929 7930 dprintk("--> %s\n", __func__); 7931 if (!sync) { 7932 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 7933 if (!lrp->inode) { 7934 nfs4_layoutreturn_release(lrp); 7935 return -EAGAIN; 7936 } 7937 task_setup_data.flags |= RPC_TASK_ASYNC; 7938 } 7939 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 7940 task = rpc_run_task(&task_setup_data); 7941 if (IS_ERR(task)) 7942 return PTR_ERR(task); 7943 if (sync) 7944 status = task->tk_status; 7945 trace_nfs4_layoutreturn(lrp->args.inode, status); 7946 dprintk("<-- %s status=%d\n", __func__, status); 7947 rpc_put_task(task); 7948 return status; 7949} 7950 7951static int 7952_nfs4_proc_getdeviceinfo(struct nfs_server *server, 7953 struct pnfs_device *pdev, 7954 struct rpc_cred *cred) 7955{ 7956 struct nfs4_getdeviceinfo_args args = { 7957 .pdev = pdev, 7958 .notify_types = NOTIFY_DEVICEID4_CHANGE | 7959 NOTIFY_DEVICEID4_DELETE, 7960 }; 7961 struct nfs4_getdeviceinfo_res res = { 7962 .pdev = pdev, 7963 }; 7964 struct rpc_message msg = { 7965 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 7966 .rpc_argp = &args, 7967 .rpc_resp = &res, 7968 .rpc_cred = cred, 7969 }; 7970 int status; 7971 7972 dprintk("--> %s\n", __func__); 7973 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 7974 if (res.notification & ~args.notify_types) 7975 dprintk("%s: unsupported notification\n", __func__); 7976 if (res.notification != args.notify_types) 7977 pdev->nocache = 1; 7978 7979 dprintk("<-- %s status=%d\n", __func__, status); 7980 7981 return status; 7982} 7983 7984int nfs4_proc_getdeviceinfo(struct nfs_server *server, 7985 struct pnfs_device *pdev, 7986 struct rpc_cred *cred) 7987{ 7988 struct nfs4_exception exception = { }; 7989 int err; 7990 7991 do { 7992 err = nfs4_handle_exception(server, 7993 _nfs4_proc_getdeviceinfo(server, pdev, cred), 7994 &exception); 7995 } while (exception.retry); 7996 return err; 7997} 7998EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 7999 8000static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 8001{ 8002 struct nfs4_layoutcommit_data *data = calldata; 8003 struct nfs_server *server = NFS_SERVER(data->args.inode); 8004 struct nfs4_session *session = nfs4_get_session(server); 8005 8006 nfs41_setup_sequence(session, 8007 &data->args.seq_args, 8008 &data->res.seq_res, 8009 task); 8010} 8011 8012static void 8013nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 8014{ 8015 struct nfs4_layoutcommit_data *data = calldata; 8016 struct nfs_server *server = NFS_SERVER(data->args.inode); 8017 8018 if (!nfs41_sequence_done(task, &data->res.seq_res)) 8019 return; 8020 8021 switch (task->tk_status) { /* Just ignore these failures */ 8022 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 8023 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 8024 case -NFS4ERR_BADLAYOUT: /* no layout */ 8025 case -NFS4ERR_GRACE: /* loca_recalim always false */ 8026 task->tk_status = 0; 8027 case 0: 8028 break; 8029 default: 8030 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 8031 rpc_restart_call_prepare(task); 8032 return; 8033 } 8034 } 8035} 8036 8037static void nfs4_layoutcommit_release(void *calldata) 8038{ 8039 struct nfs4_layoutcommit_data *data = calldata; 8040 8041 pnfs_cleanup_layoutcommit(data); 8042 nfs_post_op_update_inode_force_wcc(data->args.inode, 8043 data->res.fattr); 8044 put_rpccred(data->cred); 8045 nfs_iput_and_deactive(data->inode); 8046 kfree(data); 8047} 8048 8049static const struct rpc_call_ops nfs4_layoutcommit_ops = { 8050 .rpc_call_prepare = nfs4_layoutcommit_prepare, 8051 .rpc_call_done = nfs4_layoutcommit_done, 8052 .rpc_release = nfs4_layoutcommit_release, 8053}; 8054 8055int 8056nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 8057{ 8058 struct rpc_message msg = { 8059 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 8060 .rpc_argp = &data->args, 8061 .rpc_resp = &data->res, 8062 .rpc_cred = data->cred, 8063 }; 8064 struct rpc_task_setup task_setup_data = { 8065 .task = &data->task, 8066 .rpc_client = NFS_CLIENT(data->args.inode), 8067 .rpc_message = &msg, 8068 .callback_ops = &nfs4_layoutcommit_ops, 8069 .callback_data = data, 8070 }; 8071 struct rpc_task *task; 8072 int status = 0; 8073 8074 dprintk("NFS: %4d initiating layoutcommit call. sync %d " 8075 "lbw: %llu inode %lu\n", 8076 data->task.tk_pid, sync, 8077 data->args.lastbytewritten, 8078 data->args.inode->i_ino); 8079 8080 if (!sync) { 8081 data->inode = nfs_igrab_and_active(data->args.inode); 8082 if (data->inode == NULL) { 8083 nfs4_layoutcommit_release(data); 8084 return -EAGAIN; 8085 } 8086 task_setup_data.flags = RPC_TASK_ASYNC; 8087 } 8088 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 8089 task = rpc_run_task(&task_setup_data); 8090 if (IS_ERR(task)) 8091 return PTR_ERR(task); 8092 if (sync) 8093 status = task->tk_status; 8094 trace_nfs4_layoutcommit(data->args.inode, status); 8095 dprintk("%s: status %d\n", __func__, status); 8096 rpc_put_task(task); 8097 return status; 8098} 8099 8100/** 8101 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 8102 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 8103 */ 8104static int 8105_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8106 struct nfs_fsinfo *info, 8107 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8108{ 8109 struct nfs41_secinfo_no_name_args args = { 8110 .style = SECINFO_STYLE_CURRENT_FH, 8111 }; 8112 struct nfs4_secinfo_res res = { 8113 .flavors = flavors, 8114 }; 8115 struct rpc_message msg = { 8116 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 8117 .rpc_argp = &args, 8118 .rpc_resp = &res, 8119 }; 8120 struct rpc_clnt *clnt = server->client; 8121 struct rpc_cred *cred = NULL; 8122 int status; 8123 8124 if (use_integrity) { 8125 clnt = server->nfs_client->cl_rpcclient; 8126 cred = nfs4_get_clid_cred(server->nfs_client); 8127 msg.rpc_cred = cred; 8128 } 8129 8130 dprintk("--> %s\n", __func__); 8131 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 8132 &res.seq_res, 0); 8133 dprintk("<-- %s status=%d\n", __func__, status); 8134 8135 if (cred) 8136 put_rpccred(cred); 8137 8138 return status; 8139} 8140 8141static int 8142nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8143 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 8144{ 8145 struct nfs4_exception exception = { }; 8146 int err; 8147 do { 8148 /* first try using integrity protection */ 8149 err = -NFS4ERR_WRONGSEC; 8150 8151 /* try to use integrity protection with machine cred */ 8152 if (_nfs4_is_integrity_protected(server->nfs_client)) 8153 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8154 flavors, true); 8155 8156 /* 8157 * if unable to use integrity protection, or SECINFO with 8158 * integrity protection returns NFS4ERR_WRONGSEC (which is 8159 * disallowed by spec, but exists in deployed servers) use 8160 * the current filesystem's rpc_client and the user cred. 8161 */ 8162 if (err == -NFS4ERR_WRONGSEC) 8163 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8164 flavors, false); 8165 8166 switch (err) { 8167 case 0: 8168 case -NFS4ERR_WRONGSEC: 8169 case -ENOTSUPP: 8170 goto out; 8171 default: 8172 err = nfs4_handle_exception(server, err, &exception); 8173 } 8174 } while (exception.retry); 8175out: 8176 return err; 8177} 8178 8179static int 8180nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 8181 struct nfs_fsinfo *info) 8182{ 8183 int err; 8184 struct page *page; 8185 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 8186 struct nfs4_secinfo_flavors *flavors; 8187 struct nfs4_secinfo4 *secinfo; 8188 int i; 8189 8190 page = alloc_page(GFP_KERNEL); 8191 if (!page) { 8192 err = -ENOMEM; 8193 goto out; 8194 } 8195 8196 flavors = page_address(page); 8197 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 8198 8199 /* 8200 * Fall back on "guess and check" method if 8201 * the server doesn't support SECINFO_NO_NAME 8202 */ 8203 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 8204 err = nfs4_find_root_sec(server, fhandle, info); 8205 goto out_freepage; 8206 } 8207 if (err) 8208 goto out_freepage; 8209 8210 for (i = 0; i < flavors->num_flavors; i++) { 8211 secinfo = &flavors->flavors[i]; 8212 8213 switch (secinfo->flavor) { 8214 case RPC_AUTH_NULL: 8215 case RPC_AUTH_UNIX: 8216 case RPC_AUTH_GSS: 8217 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 8218 &secinfo->flavor_info); 8219 break; 8220 default: 8221 flavor = RPC_AUTH_MAXFLAVOR; 8222 break; 8223 } 8224 8225 if (!nfs_auth_info_match(&server->auth_info, flavor)) 8226 flavor = RPC_AUTH_MAXFLAVOR; 8227 8228 if (flavor != RPC_AUTH_MAXFLAVOR) { 8229 err = nfs4_lookup_root_sec(server, fhandle, 8230 info, flavor); 8231 if (!err) 8232 break; 8233 } 8234 } 8235 8236 if (flavor == RPC_AUTH_MAXFLAVOR) 8237 err = -EPERM; 8238 8239out_freepage: 8240 put_page(page); 8241 if (err == -EACCES) 8242 return -EPERM; 8243out: 8244 return err; 8245} 8246 8247static int _nfs41_test_stateid(struct nfs_server *server, 8248 nfs4_stateid *stateid, 8249 struct rpc_cred *cred) 8250{ 8251 int status; 8252 struct nfs41_test_stateid_args args = { 8253 .stateid = stateid, 8254 }; 8255 struct nfs41_test_stateid_res res; 8256 struct rpc_message msg = { 8257 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 8258 .rpc_argp = &args, 8259 .rpc_resp = &res, 8260 .rpc_cred = cred, 8261 }; 8262 struct rpc_clnt *rpc_client = server->client; 8263 8264 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8265 &rpc_client, &msg); 8266 8267 dprintk("NFS call test_stateid %p\n", stateid); 8268 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 8269 nfs4_set_sequence_privileged(&args.seq_args); 8270 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 8271 &args.seq_args, &res.seq_res); 8272 if (status != NFS_OK) { 8273 dprintk("NFS reply test_stateid: failed, %d\n", status); 8274 return status; 8275 } 8276 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 8277 return -res.status; 8278} 8279 8280/** 8281 * nfs41_test_stateid - perform a TEST_STATEID operation 8282 * 8283 * @server: server / transport on which to perform the operation 8284 * @stateid: state ID to test 8285 * @cred: credential 8286 * 8287 * Returns NFS_OK if the server recognizes that "stateid" is valid. 8288 * Otherwise a negative NFS4ERR value is returned if the operation 8289 * failed or the state ID is not currently valid. 8290 */ 8291static int nfs41_test_stateid(struct nfs_server *server, 8292 nfs4_stateid *stateid, 8293 struct rpc_cred *cred) 8294{ 8295 struct nfs4_exception exception = { }; 8296 int err; 8297 do { 8298 err = _nfs41_test_stateid(server, stateid, cred); 8299 if (err != -NFS4ERR_DELAY) 8300 break; 8301 nfs4_handle_exception(server, err, &exception); 8302 } while (exception.retry); 8303 return err; 8304} 8305 8306struct nfs_free_stateid_data { 8307 struct nfs_server *server; 8308 struct nfs41_free_stateid_args args; 8309 struct nfs41_free_stateid_res res; 8310}; 8311 8312static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 8313{ 8314 struct nfs_free_stateid_data *data = calldata; 8315 nfs41_setup_sequence(nfs4_get_session(data->server), 8316 &data->args.seq_args, 8317 &data->res.seq_res, 8318 task); 8319} 8320 8321static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 8322{ 8323 struct nfs_free_stateid_data *data = calldata; 8324 8325 nfs41_sequence_done(task, &data->res.seq_res); 8326 8327 switch (task->tk_status) { 8328 case -NFS4ERR_DELAY: 8329 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8330 rpc_restart_call_prepare(task); 8331 } 8332} 8333 8334static void nfs41_free_stateid_release(void *calldata) 8335{ 8336 kfree(calldata); 8337} 8338 8339static const struct rpc_call_ops nfs41_free_stateid_ops = { 8340 .rpc_call_prepare = nfs41_free_stateid_prepare, 8341 .rpc_call_done = nfs41_free_stateid_done, 8342 .rpc_release = nfs41_free_stateid_release, 8343}; 8344 8345static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 8346 nfs4_stateid *stateid, 8347 struct rpc_cred *cred, 8348 bool privileged) 8349{ 8350 struct rpc_message msg = { 8351 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 8352 .rpc_cred = cred, 8353 }; 8354 struct rpc_task_setup task_setup = { 8355 .rpc_client = server->client, 8356 .rpc_message = &msg, 8357 .callback_ops = &nfs41_free_stateid_ops, 8358 .flags = RPC_TASK_ASYNC, 8359 }; 8360 struct nfs_free_stateid_data *data; 8361 8362 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8363 &task_setup.rpc_client, &msg); 8364 8365 dprintk("NFS call free_stateid %p\n", stateid); 8366 data = kmalloc(sizeof(*data), GFP_NOFS); 8367 if (!data) 8368 return ERR_PTR(-ENOMEM); 8369 data->server = server; 8370 nfs4_stateid_copy(&data->args.stateid, stateid); 8371 8372 task_setup.callback_data = data; 8373 8374 msg.rpc_argp = &data->args; 8375 msg.rpc_resp = &data->res; 8376 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8377 if (privileged) 8378 nfs4_set_sequence_privileged(&data->args.seq_args); 8379 8380 return rpc_run_task(&task_setup); 8381} 8382 8383/** 8384 * nfs41_free_stateid - perform a FREE_STATEID operation 8385 * 8386 * @server: server / transport on which to perform the operation 8387 * @stateid: state ID to release 8388 * @cred: credential 8389 * 8390 * Returns NFS_OK if the server freed "stateid". Otherwise a 8391 * negative NFS4ERR value is returned. 8392 */ 8393static int nfs41_free_stateid(struct nfs_server *server, 8394 nfs4_stateid *stateid, 8395 struct rpc_cred *cred) 8396{ 8397 struct rpc_task *task; 8398 int ret; 8399 8400 task = _nfs41_free_stateid(server, stateid, cred, true); 8401 if (IS_ERR(task)) 8402 return PTR_ERR(task); 8403 ret = rpc_wait_for_completion_task(task); 8404 if (!ret) 8405 ret = task->tk_status; 8406 rpc_put_task(task); 8407 return ret; 8408} 8409 8410static void 8411nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 8412{ 8413 struct rpc_task *task; 8414 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 8415 8416 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8417 nfs4_free_lock_state(server, lsp); 8418 if (IS_ERR(task)) 8419 return; 8420 rpc_put_task(task); 8421} 8422 8423static bool nfs41_match_stateid(const nfs4_stateid *s1, 8424 const nfs4_stateid *s2) 8425{ 8426 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 8427 return false; 8428 8429 if (s1->seqid == s2->seqid) 8430 return true; 8431 if (s1->seqid == 0 || s2->seqid == 0) 8432 return true; 8433 8434 return false; 8435} 8436 8437#endif /* CONFIG_NFS_V4_1 */ 8438 8439static bool nfs4_match_stateid(const nfs4_stateid *s1, 8440 const nfs4_stateid *s2) 8441{ 8442 return nfs4_stateid_match(s1, s2); 8443} 8444 8445 8446static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 8447 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8448 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8449 .recover_open = nfs4_open_reclaim, 8450 .recover_lock = nfs4_lock_reclaim, 8451 .establish_clid = nfs4_init_clientid, 8452 .detect_trunking = nfs40_discover_server_trunking, 8453}; 8454 8455#if defined(CONFIG_NFS_V4_1) 8456static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 8457 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8458 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8459 .recover_open = nfs4_open_reclaim, 8460 .recover_lock = nfs4_lock_reclaim, 8461 .establish_clid = nfs41_init_clientid, 8462 .reclaim_complete = nfs41_proc_reclaim_complete, 8463 .detect_trunking = nfs41_discover_server_trunking, 8464}; 8465#endif /* CONFIG_NFS_V4_1 */ 8466 8467static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 8468 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8469 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8470 .recover_open = nfs40_open_expired, 8471 .recover_lock = nfs4_lock_expired, 8472 .establish_clid = nfs4_init_clientid, 8473}; 8474 8475#if defined(CONFIG_NFS_V4_1) 8476static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 8477 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8478 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8479 .recover_open = nfs41_open_expired, 8480 .recover_lock = nfs41_lock_expired, 8481 .establish_clid = nfs41_init_clientid, 8482}; 8483#endif /* CONFIG_NFS_V4_1 */ 8484 8485static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 8486 .sched_state_renewal = nfs4_proc_async_renew, 8487 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 8488 .renew_lease = nfs4_proc_renew, 8489}; 8490 8491#if defined(CONFIG_NFS_V4_1) 8492static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 8493 .sched_state_renewal = nfs41_proc_async_sequence, 8494 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 8495 .renew_lease = nfs4_proc_sequence, 8496}; 8497#endif 8498 8499static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 8500 .get_locations = _nfs40_proc_get_locations, 8501 .fsid_present = _nfs40_proc_fsid_present, 8502}; 8503 8504#if defined(CONFIG_NFS_V4_1) 8505static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 8506 .get_locations = _nfs41_proc_get_locations, 8507 .fsid_present = _nfs41_proc_fsid_present, 8508}; 8509#endif /* CONFIG_NFS_V4_1 */ 8510 8511static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 8512 .minor_version = 0, 8513 .init_caps = NFS_CAP_READDIRPLUS 8514 | NFS_CAP_ATOMIC_OPEN 8515 | NFS_CAP_POSIX_LOCK, 8516 .init_client = nfs40_init_client, 8517 .shutdown_client = nfs40_shutdown_client, 8518 .match_stateid = nfs4_match_stateid, 8519 .find_root_sec = nfs4_find_root_sec, 8520 .free_lock_state = nfs4_release_lockowner, 8521 .alloc_seqid = nfs_alloc_seqid, 8522 .call_sync_ops = &nfs40_call_sync_ops, 8523 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 8524 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 8525 .state_renewal_ops = &nfs40_state_renewal_ops, 8526 .mig_recovery_ops = &nfs40_mig_recovery_ops, 8527}; 8528 8529#if defined(CONFIG_NFS_V4_1) 8530static struct nfs_seqid * 8531nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 8532{ 8533 return NULL; 8534} 8535 8536static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 8537 .minor_version = 1, 8538 .init_caps = NFS_CAP_READDIRPLUS 8539 | NFS_CAP_ATOMIC_OPEN 8540 | NFS_CAP_POSIX_LOCK 8541 | NFS_CAP_STATEID_NFSV41 8542 | NFS_CAP_ATOMIC_OPEN_V1, 8543 .init_client = nfs41_init_client, 8544 .shutdown_client = nfs41_shutdown_client, 8545 .match_stateid = nfs41_match_stateid, 8546 .find_root_sec = nfs41_find_root_sec, 8547 .free_lock_state = nfs41_free_lock_state, 8548 .alloc_seqid = nfs_alloc_no_seqid, 8549 .call_sync_ops = &nfs41_call_sync_ops, 8550 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8551 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8552 .state_renewal_ops = &nfs41_state_renewal_ops, 8553 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8554}; 8555#endif 8556 8557#if defined(CONFIG_NFS_V4_2) 8558static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 8559 .minor_version = 2, 8560 .init_caps = NFS_CAP_READDIRPLUS 8561 | NFS_CAP_ATOMIC_OPEN 8562 | NFS_CAP_POSIX_LOCK 8563 | NFS_CAP_STATEID_NFSV41 8564 | NFS_CAP_ATOMIC_OPEN_V1 8565 | NFS_CAP_ALLOCATE 8566 | NFS_CAP_DEALLOCATE 8567 | NFS_CAP_SEEK, 8568 .init_client = nfs41_init_client, 8569 .shutdown_client = nfs41_shutdown_client, 8570 .match_stateid = nfs41_match_stateid, 8571 .find_root_sec = nfs41_find_root_sec, 8572 .free_lock_state = nfs41_free_lock_state, 8573 .call_sync_ops = &nfs41_call_sync_ops, 8574 .alloc_seqid = nfs_alloc_no_seqid, 8575 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8576 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8577 .state_renewal_ops = &nfs41_state_renewal_ops, 8578 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8579}; 8580#endif 8581 8582const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 8583 [0] = &nfs_v4_0_minor_ops, 8584#if defined(CONFIG_NFS_V4_1) 8585 [1] = &nfs_v4_1_minor_ops, 8586#endif 8587#if defined(CONFIG_NFS_V4_2) 8588 [2] = &nfs_v4_2_minor_ops, 8589#endif 8590}; 8591 8592static const struct inode_operations nfs4_dir_inode_operations = { 8593 .create = nfs_create, 8594 .lookup = nfs_lookup, 8595 .atomic_open = nfs_atomic_open, 8596 .link = nfs_link, 8597 .unlink = nfs_unlink, 8598 .symlink = nfs_symlink, 8599 .mkdir = nfs_mkdir, 8600 .rmdir = nfs_rmdir, 8601 .mknod = nfs_mknod, 8602 .rename = nfs_rename, 8603 .permission = nfs_permission, 8604 .getattr = nfs_getattr, 8605 .setattr = nfs_setattr, 8606 .getxattr = generic_getxattr, 8607 .setxattr = generic_setxattr, 8608 .listxattr = generic_listxattr, 8609 .removexattr = generic_removexattr, 8610}; 8611 8612static const struct inode_operations nfs4_file_inode_operations = { 8613 .permission = nfs_permission, 8614 .getattr = nfs_getattr, 8615 .setattr = nfs_setattr, 8616 .getxattr = generic_getxattr, 8617 .setxattr = generic_setxattr, 8618 .listxattr = generic_listxattr, 8619 .removexattr = generic_removexattr, 8620}; 8621 8622const struct nfs_rpc_ops nfs_v4_clientops = { 8623 .version = 4, /* protocol version */ 8624 .dentry_ops = &nfs4_dentry_operations, 8625 .dir_inode_ops = &nfs4_dir_inode_operations, 8626 .file_inode_ops = &nfs4_file_inode_operations, 8627 .file_ops = &nfs4_file_operations, 8628 .getroot = nfs4_proc_get_root, 8629 .submount = nfs4_submount, 8630 .try_mount = nfs4_try_mount, 8631 .getattr = nfs4_proc_getattr, 8632 .setattr = nfs4_proc_setattr, 8633 .lookup = nfs4_proc_lookup, 8634 .access = nfs4_proc_access, 8635 .readlink = nfs4_proc_readlink, 8636 .create = nfs4_proc_create, 8637 .remove = nfs4_proc_remove, 8638 .unlink_setup = nfs4_proc_unlink_setup, 8639 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 8640 .unlink_done = nfs4_proc_unlink_done, 8641 .rename_setup = nfs4_proc_rename_setup, 8642 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 8643 .rename_done = nfs4_proc_rename_done, 8644 .link = nfs4_proc_link, 8645 .symlink = nfs4_proc_symlink, 8646 .mkdir = nfs4_proc_mkdir, 8647 .rmdir = nfs4_proc_remove, 8648 .readdir = nfs4_proc_readdir, 8649 .mknod = nfs4_proc_mknod, 8650 .statfs = nfs4_proc_statfs, 8651 .fsinfo = nfs4_proc_fsinfo, 8652 .pathconf = nfs4_proc_pathconf, 8653 .set_capabilities = nfs4_server_capabilities, 8654 .decode_dirent = nfs4_decode_dirent, 8655 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 8656 .read_setup = nfs4_proc_read_setup, 8657 .read_done = nfs4_read_done, 8658 .write_setup = nfs4_proc_write_setup, 8659 .write_done = nfs4_write_done, 8660 .commit_setup = nfs4_proc_commit_setup, 8661 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 8662 .commit_done = nfs4_commit_done, 8663 .lock = nfs4_proc_lock, 8664 .clear_acl_cache = nfs4_zap_acl_attr, 8665 .close_context = nfs4_close_context, 8666 .open_context = nfs4_atomic_open, 8667 .have_delegation = nfs4_have_delegation, 8668 .return_delegation = nfs4_inode_return_delegation, 8669 .alloc_client = nfs4_alloc_client, 8670 .init_client = nfs4_init_client, 8671 .free_client = nfs4_free_client, 8672 .create_server = nfs4_create_server, 8673 .clone_server = nfs_clone_server, 8674}; 8675 8676static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 8677 .prefix = XATTR_NAME_NFSV4_ACL, 8678 .list = nfs4_xattr_list_nfs4_acl, 8679 .get = nfs4_xattr_get_nfs4_acl, 8680 .set = nfs4_xattr_set_nfs4_acl, 8681}; 8682 8683const struct xattr_handler *nfs4_xattr_handlers[] = { 8684 &nfs4_xattr_nfs4_acl_handler, 8685#ifdef CONFIG_NFS_V4_SECURITY_LABEL 8686 &nfs4_xattr_nfs4_label_handler, 8687#endif 8688 NULL 8689}; 8690 8691/* 8692 * Local variables: 8693 * c-basic-offset: 8 8694 * End: 8695 */ 8696