1/* 2 * linux/net/sunrpc/xprtsock.c 3 * 4 * Client-side transport implementation for sockets. 5 * 6 * TCP callback races fixes (C) 1998 Red Hat 7 * TCP send fixes (C) 1998 Red Hat 8 * TCP NFS related read + write fixes 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 10 * 11 * Rewrite of larges part of the code in order to stabilize TCP stuff. 12 * Fix behaviour when socket buffer is full. 13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 14 * 15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> 16 * 17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005. 18 * <gilles.quillard@bull.net> 19 */ 20 21#include <linux/types.h> 22#include <linux/string.h> 23#include <linux/slab.h> 24#include <linux/module.h> 25#include <linux/capability.h> 26#include <linux/pagemap.h> 27#include <linux/errno.h> 28#include <linux/socket.h> 29#include <linux/in.h> 30#include <linux/net.h> 31#include <linux/mm.h> 32#include <linux/un.h> 33#include <linux/udp.h> 34#include <linux/tcp.h> 35#include <linux/sunrpc/clnt.h> 36#include <linux/sunrpc/addr.h> 37#include <linux/sunrpc/sched.h> 38#include <linux/sunrpc/svcsock.h> 39#include <linux/sunrpc/xprtsock.h> 40#include <linux/file.h> 41#ifdef CONFIG_SUNRPC_BACKCHANNEL 42#include <linux/sunrpc/bc_xprt.h> 43#endif 44 45#include <net/sock.h> 46#include <net/checksum.h> 47#include <net/udp.h> 48#include <net/tcp.h> 49 50#include <trace/events/sunrpc.h> 51 52#include "sunrpc.h" 53 54static void xs_close(struct rpc_xprt *xprt); 55 56/* 57 * xprtsock tunables 58 */ 59static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 60static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; 61static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; 62 63static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 64static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 65 66#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 67 68#define XS_TCP_LINGER_TO (15U * HZ) 69static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; 70 71/* 72 * We can register our own files under /proc/sys/sunrpc by 73 * calling register_sysctl_table() again. The files in that 74 * directory become the union of all files registered there. 75 * 76 * We simply need to make sure that we don't collide with 77 * someone else's file names! 78 */ 79 80static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 81static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 82static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; 83static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 84static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 85 86static struct ctl_table_header *sunrpc_table_header; 87 88/* 89 * FIXME: changing the UDP slot table size should also resize the UDP 90 * socket buffers for existing UDP transports 91 */ 92static struct ctl_table xs_tunables_table[] = { 93 { 94 .procname = "udp_slot_table_entries", 95 .data = &xprt_udp_slot_table_entries, 96 .maxlen = sizeof(unsigned int), 97 .mode = 0644, 98 .proc_handler = proc_dointvec_minmax, 99 .extra1 = &min_slot_table_size, 100 .extra2 = &max_slot_table_size 101 }, 102 { 103 .procname = "tcp_slot_table_entries", 104 .data = &xprt_tcp_slot_table_entries, 105 .maxlen = sizeof(unsigned int), 106 .mode = 0644, 107 .proc_handler = proc_dointvec_minmax, 108 .extra1 = &min_slot_table_size, 109 .extra2 = &max_slot_table_size 110 }, 111 { 112 .procname = "tcp_max_slot_table_entries", 113 .data = &xprt_max_tcp_slot_table_entries, 114 .maxlen = sizeof(unsigned int), 115 .mode = 0644, 116 .proc_handler = proc_dointvec_minmax, 117 .extra1 = &min_slot_table_size, 118 .extra2 = &max_tcp_slot_table_limit 119 }, 120 { 121 .procname = "min_resvport", 122 .data = &xprt_min_resvport, 123 .maxlen = sizeof(unsigned int), 124 .mode = 0644, 125 .proc_handler = proc_dointvec_minmax, 126 .extra1 = &xprt_min_resvport_limit, 127 .extra2 = &xprt_max_resvport_limit 128 }, 129 { 130 .procname = "max_resvport", 131 .data = &xprt_max_resvport, 132 .maxlen = sizeof(unsigned int), 133 .mode = 0644, 134 .proc_handler = proc_dointvec_minmax, 135 .extra1 = &xprt_min_resvport_limit, 136 .extra2 = &xprt_max_resvport_limit 137 }, 138 { 139 .procname = "tcp_fin_timeout", 140 .data = &xs_tcp_fin_timeout, 141 .maxlen = sizeof(xs_tcp_fin_timeout), 142 .mode = 0644, 143 .proc_handler = proc_dointvec_jiffies, 144 }, 145 { }, 146}; 147 148static struct ctl_table sunrpc_table[] = { 149 { 150 .procname = "sunrpc", 151 .mode = 0555, 152 .child = xs_tunables_table 153 }, 154 { }, 155}; 156 157#endif 158 159/* 160 * Wait duration for a reply from the RPC portmapper. 161 */ 162#define XS_BIND_TO (60U * HZ) 163 164/* 165 * Delay if a UDP socket connect error occurs. This is most likely some 166 * kind of resource problem on the local host. 167 */ 168#define XS_UDP_REEST_TO (2U * HZ) 169 170/* 171 * The reestablish timeout allows clients to delay for a bit before attempting 172 * to reconnect to a server that just dropped our connection. 173 * 174 * We implement an exponential backoff when trying to reestablish a TCP 175 * transport connection with the server. Some servers like to drop a TCP 176 * connection when they are overworked, so we start with a short timeout and 177 * increase over time if the server is down or not responding. 178 */ 179#define XS_TCP_INIT_REEST_TO (3U * HZ) 180#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) 181 182/* 183 * TCP idle timeout; client drops the transport socket if it is idle 184 * for this long. Note that we also timeout UDP sockets to prevent 185 * holding port numbers when there is no RPC traffic. 186 */ 187#define XS_IDLE_DISC_TO (5U * 60 * HZ) 188 189#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 190# undef RPC_DEBUG_DATA 191# define RPCDBG_FACILITY RPCDBG_TRANS 192#endif 193 194#ifdef RPC_DEBUG_DATA 195static void xs_pktdump(char *msg, u32 *packet, unsigned int count) 196{ 197 u8 *buf = (u8 *) packet; 198 int j; 199 200 dprintk("RPC: %s\n", msg); 201 for (j = 0; j < count && j < 128; j += 4) { 202 if (!(j & 31)) { 203 if (j) 204 dprintk("\n"); 205 dprintk("0x%04x ", j); 206 } 207 dprintk("%02x%02x%02x%02x ", 208 buf[j], buf[j+1], buf[j+2], buf[j+3]); 209 } 210 dprintk("\n"); 211} 212#else 213static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) 214{ 215 /* NOP */ 216} 217#endif 218 219static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 220{ 221 return (struct rpc_xprt *) sk->sk_user_data; 222} 223 224static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 225{ 226 return (struct sockaddr *) &xprt->addr; 227} 228 229static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt) 230{ 231 return (struct sockaddr_un *) &xprt->addr; 232} 233 234static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 235{ 236 return (struct sockaddr_in *) &xprt->addr; 237} 238 239static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt) 240{ 241 return (struct sockaddr_in6 *) &xprt->addr; 242} 243 244static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) 245{ 246 struct sockaddr *sap = xs_addr(xprt); 247 struct sockaddr_in6 *sin6; 248 struct sockaddr_in *sin; 249 struct sockaddr_un *sun; 250 char buf[128]; 251 252 switch (sap->sa_family) { 253 case AF_LOCAL: 254 sun = xs_addr_un(xprt); 255 strlcpy(buf, sun->sun_path, sizeof(buf)); 256 xprt->address_strings[RPC_DISPLAY_ADDR] = 257 kstrdup(buf, GFP_KERNEL); 258 break; 259 case AF_INET: 260 (void)rpc_ntop(sap, buf, sizeof(buf)); 261 xprt->address_strings[RPC_DISPLAY_ADDR] = 262 kstrdup(buf, GFP_KERNEL); 263 sin = xs_addr_in(xprt); 264 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 265 break; 266 case AF_INET6: 267 (void)rpc_ntop(sap, buf, sizeof(buf)); 268 xprt->address_strings[RPC_DISPLAY_ADDR] = 269 kstrdup(buf, GFP_KERNEL); 270 sin6 = xs_addr_in6(xprt); 271 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 272 break; 273 default: 274 BUG(); 275 } 276 277 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 278} 279 280static void xs_format_common_peer_ports(struct rpc_xprt *xprt) 281{ 282 struct sockaddr *sap = xs_addr(xprt); 283 char buf[128]; 284 285 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 286 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 287 288 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 289 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 290} 291 292static void xs_format_peer_addresses(struct rpc_xprt *xprt, 293 const char *protocol, 294 const char *netid) 295{ 296 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; 297 xprt->address_strings[RPC_DISPLAY_NETID] = netid; 298 xs_format_common_peer_addresses(xprt); 299 xs_format_common_peer_ports(xprt); 300} 301 302static void xs_update_peer_port(struct rpc_xprt *xprt) 303{ 304 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 305 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 306 307 xs_format_common_peer_ports(xprt); 308} 309 310static void xs_free_peer_addresses(struct rpc_xprt *xprt) 311{ 312 unsigned int i; 313 314 for (i = 0; i < RPC_DISPLAY_MAX; i++) 315 switch (i) { 316 case RPC_DISPLAY_PROTO: 317 case RPC_DISPLAY_NETID: 318 continue; 319 default: 320 kfree(xprt->address_strings[i]); 321 } 322} 323 324#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 325 326static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) 327{ 328 struct msghdr msg = { 329 .msg_name = addr, 330 .msg_namelen = addrlen, 331 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0), 332 }; 333 struct kvec iov = { 334 .iov_base = vec->iov_base + base, 335 .iov_len = vec->iov_len - base, 336 }; 337 338 if (iov.iov_len != 0) 339 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 340 return kernel_sendmsg(sock, &msg, NULL, 0, 0); 341} 342 343static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p) 344{ 345 ssize_t (*do_sendpage)(struct socket *sock, struct page *page, 346 int offset, size_t size, int flags); 347 struct page **ppage; 348 unsigned int remainder; 349 int err; 350 351 remainder = xdr->page_len - base; 352 base += xdr->page_base; 353 ppage = xdr->pages + (base >> PAGE_SHIFT); 354 base &= ~PAGE_MASK; 355 do_sendpage = sock->ops->sendpage; 356 if (!zerocopy) 357 do_sendpage = sock_no_sendpage; 358 for(;;) { 359 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); 360 int flags = XS_SENDMSG_FLAGS; 361 362 remainder -= len; 363 if (more) 364 flags |= MSG_MORE; 365 if (remainder != 0) 366 flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE; 367 err = do_sendpage(sock, *ppage, base, len, flags); 368 if (remainder == 0 || err != len) 369 break; 370 *sent_p += err; 371 ppage++; 372 base = 0; 373 } 374 if (err > 0) { 375 *sent_p += err; 376 err = 0; 377 } 378 return err; 379} 380 381/** 382 * xs_sendpages - write pages directly to a socket 383 * @sock: socket to send on 384 * @addr: UDP only -- address of destination 385 * @addrlen: UDP only -- length of destination address 386 * @xdr: buffer containing this request 387 * @base: starting position in the buffer 388 * @zerocopy: true if it is safe to use sendpage() 389 * @sent_p: return the total number of bytes successfully queued for sending 390 * 391 */ 392static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p) 393{ 394 unsigned int remainder = xdr->len - base; 395 int err = 0; 396 int sent = 0; 397 398 if (unlikely(!sock)) 399 return -ENOTSOCK; 400 401 if (base != 0) { 402 addr = NULL; 403 addrlen = 0; 404 } 405 406 if (base < xdr->head[0].iov_len || addr != NULL) { 407 unsigned int len = xdr->head[0].iov_len - base; 408 remainder -= len; 409 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); 410 if (remainder == 0 || err != len) 411 goto out; 412 *sent_p += err; 413 base = 0; 414 } else 415 base -= xdr->head[0].iov_len; 416 417 if (base < xdr->page_len) { 418 unsigned int len = xdr->page_len - base; 419 remainder -= len; 420 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent); 421 *sent_p += sent; 422 if (remainder == 0 || sent != len) 423 goto out; 424 base = 0; 425 } else 426 base -= xdr->page_len; 427 428 if (base >= xdr->tail[0].iov_len) 429 return 0; 430 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); 431out: 432 if (err > 0) { 433 *sent_p += err; 434 err = 0; 435 } 436 return err; 437} 438 439static void xs_nospace_callback(struct rpc_task *task) 440{ 441 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); 442 443 transport->inet->sk_write_pending--; 444} 445 446/** 447 * xs_nospace - place task on wait queue if transmit was incomplete 448 * @task: task to put to sleep 449 * 450 */ 451static int xs_nospace(struct rpc_task *task) 452{ 453 struct rpc_rqst *req = task->tk_rqstp; 454 struct rpc_xprt *xprt = req->rq_xprt; 455 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 456 struct sock *sk = transport->inet; 457 int ret = -EAGAIN; 458 459 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 460 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 461 req->rq_slen); 462 463 /* Protect against races with write_space */ 464 spin_lock_bh(&xprt->transport_lock); 465 466 /* Don't race with disconnect */ 467 if (xprt_connected(xprt)) { 468 /* wait for more buffer space */ 469 sk->sk_write_pending++; 470 xprt_wait_for_buffer_space(task, xs_nospace_callback); 471 } else 472 ret = -ENOTCONN; 473 474 spin_unlock_bh(&xprt->transport_lock); 475 476 /* Race breaker in case memory is freed before above code is called */ 477 sk->sk_write_space(sk); 478 return ret; 479} 480 481/* 482 * Construct a stream transport record marker in @buf. 483 */ 484static inline void xs_encode_stream_record_marker(struct xdr_buf *buf) 485{ 486 u32 reclen = buf->len - sizeof(rpc_fraghdr); 487 rpc_fraghdr *base = buf->head[0].iov_base; 488 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen); 489} 490 491/** 492 * xs_local_send_request - write an RPC request to an AF_LOCAL socket 493 * @task: RPC task that manages the state of an RPC request 494 * 495 * Return values: 496 * 0: The request has been sent 497 * EAGAIN: The socket was blocked, please call again later to 498 * complete the request 499 * ENOTCONN: Caller needs to invoke connect logic then call again 500 * other: Some other error occured, the request was not sent 501 */ 502static int xs_local_send_request(struct rpc_task *task) 503{ 504 struct rpc_rqst *req = task->tk_rqstp; 505 struct rpc_xprt *xprt = req->rq_xprt; 506 struct sock_xprt *transport = 507 container_of(xprt, struct sock_xprt, xprt); 508 struct xdr_buf *xdr = &req->rq_snd_buf; 509 int status; 510 int sent = 0; 511 512 xs_encode_stream_record_marker(&req->rq_snd_buf); 513 514 xs_pktdump("packet data:", 515 req->rq_svec->iov_base, req->rq_svec->iov_len); 516 517 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent, 518 true, &sent); 519 dprintk("RPC: %s(%u) = %d\n", 520 __func__, xdr->len - req->rq_bytes_sent, status); 521 522 if (status == -EAGAIN && sock_writeable(transport->inet)) 523 status = -ENOBUFS; 524 525 if (likely(sent > 0) || status == 0) { 526 req->rq_bytes_sent += sent; 527 req->rq_xmit_bytes_sent += sent; 528 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 529 req->rq_bytes_sent = 0; 530 return 0; 531 } 532 status = -EAGAIN; 533 } 534 535 switch (status) { 536 case -ENOBUFS: 537 break; 538 case -EAGAIN: 539 status = xs_nospace(task); 540 break; 541 default: 542 dprintk("RPC: sendmsg returned unrecognized error %d\n", 543 -status); 544 case -EPIPE: 545 xs_close(xprt); 546 status = -ENOTCONN; 547 } 548 549 return status; 550} 551 552/** 553 * xs_udp_send_request - write an RPC request to a UDP socket 554 * @task: address of RPC task that manages the state of an RPC request 555 * 556 * Return values: 557 * 0: The request has been sent 558 * EAGAIN: The socket was blocked, please call again later to 559 * complete the request 560 * ENOTCONN: Caller needs to invoke connect logic then call again 561 * other: Some other error occurred, the request was not sent 562 */ 563static int xs_udp_send_request(struct rpc_task *task) 564{ 565 struct rpc_rqst *req = task->tk_rqstp; 566 struct rpc_xprt *xprt = req->rq_xprt; 567 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 568 struct xdr_buf *xdr = &req->rq_snd_buf; 569 int sent = 0; 570 int status; 571 572 xs_pktdump("packet data:", 573 req->rq_svec->iov_base, 574 req->rq_svec->iov_len); 575 576 if (!xprt_bound(xprt)) 577 return -ENOTCONN; 578 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, 579 xdr, req->rq_bytes_sent, true, &sent); 580 581 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 582 xdr->len - req->rq_bytes_sent, status); 583 584 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ 585 if (status == -EPERM) 586 goto process_status; 587 588 if (status == -EAGAIN && sock_writeable(transport->inet)) 589 status = -ENOBUFS; 590 591 if (sent > 0 || status == 0) { 592 req->rq_xmit_bytes_sent += sent; 593 if (sent >= req->rq_slen) 594 return 0; 595 /* Still some bytes left; set up for a retry later. */ 596 status = -EAGAIN; 597 } 598 599process_status: 600 switch (status) { 601 case -ENOTSOCK: 602 status = -ENOTCONN; 603 /* Should we call xs_close() here? */ 604 break; 605 case -EAGAIN: 606 status = xs_nospace(task); 607 break; 608 case -ENETUNREACH: 609 case -ENOBUFS: 610 case -EPIPE: 611 case -ECONNREFUSED: 612 case -EPERM: 613 /* When the server has died, an ICMP port unreachable message 614 * prompts ECONNREFUSED. */ 615 break; 616 default: 617 dprintk("RPC: sendmsg returned unrecognized error %d\n", 618 -status); 619 } 620 621 return status; 622} 623 624/** 625 * xs_tcp_send_request - write an RPC request to a TCP socket 626 * @task: address of RPC task that manages the state of an RPC request 627 * 628 * Return values: 629 * 0: The request has been sent 630 * EAGAIN: The socket was blocked, please call again later to 631 * complete the request 632 * ENOTCONN: Caller needs to invoke connect logic then call again 633 * other: Some other error occurred, the request was not sent 634 * 635 * XXX: In the case of soft timeouts, should we eventually give up 636 * if sendmsg is not able to make progress? 637 */ 638static int xs_tcp_send_request(struct rpc_task *task) 639{ 640 struct rpc_rqst *req = task->tk_rqstp; 641 struct rpc_xprt *xprt = req->rq_xprt; 642 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 643 struct xdr_buf *xdr = &req->rq_snd_buf; 644 bool zerocopy = true; 645 int status; 646 int sent; 647 648 xs_encode_stream_record_marker(&req->rq_snd_buf); 649 650 xs_pktdump("packet data:", 651 req->rq_svec->iov_base, 652 req->rq_svec->iov_len); 653 /* Don't use zero copy if this is a resend. If the RPC call 654 * completes while the socket holds a reference to the pages, 655 * then we may end up resending corrupted data. 656 */ 657 if (task->tk_flags & RPC_TASK_SENT) 658 zerocopy = false; 659 660 /* Continue transmitting the packet/record. We must be careful 661 * to cope with writespace callbacks arriving _after_ we have 662 * called sendmsg(). */ 663 while (1) { 664 sent = 0; 665 status = xs_sendpages(transport->sock, NULL, 0, xdr, 666 req->rq_bytes_sent, zerocopy, &sent); 667 668 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 669 xdr->len - req->rq_bytes_sent, status); 670 671 /* If we've sent the entire packet, immediately 672 * reset the count of bytes sent. */ 673 req->rq_bytes_sent += sent; 674 req->rq_xmit_bytes_sent += sent; 675 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 676 req->rq_bytes_sent = 0; 677 return 0; 678 } 679 680 if (status < 0) 681 break; 682 if (sent == 0) { 683 status = -EAGAIN; 684 break; 685 } 686 } 687 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet)) 688 status = -ENOBUFS; 689 690 switch (status) { 691 case -ENOTSOCK: 692 status = -ENOTCONN; 693 /* Should we call xs_close() here? */ 694 break; 695 case -EAGAIN: 696 status = xs_nospace(task); 697 break; 698 case -ECONNRESET: 699 case -ECONNREFUSED: 700 case -ENOTCONN: 701 case -EADDRINUSE: 702 case -ENOBUFS: 703 case -EPIPE: 704 break; 705 default: 706 dprintk("RPC: sendmsg returned unrecognized error %d\n", 707 -status); 708 } 709 710 return status; 711} 712 713/** 714 * xs_tcp_release_xprt - clean up after a tcp transmission 715 * @xprt: transport 716 * @task: rpc task 717 * 718 * This cleans up if an error causes us to abort the transmission of a request. 719 * In this case, the socket may need to be reset in order to avoid confusing 720 * the server. 721 */ 722static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 723{ 724 struct rpc_rqst *req; 725 726 if (task != xprt->snd_task) 727 return; 728 if (task == NULL) 729 goto out_release; 730 req = task->tk_rqstp; 731 if (req == NULL) 732 goto out_release; 733 if (req->rq_bytes_sent == 0) 734 goto out_release; 735 if (req->rq_bytes_sent == req->rq_snd_buf.len) 736 goto out_release; 737 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 738out_release: 739 xprt_release_xprt(xprt, task); 740} 741 742static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) 743{ 744 transport->old_data_ready = sk->sk_data_ready; 745 transport->old_state_change = sk->sk_state_change; 746 transport->old_write_space = sk->sk_write_space; 747 transport->old_error_report = sk->sk_error_report; 748} 749 750static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 751{ 752 sk->sk_data_ready = transport->old_data_ready; 753 sk->sk_state_change = transport->old_state_change; 754 sk->sk_write_space = transport->old_write_space; 755 sk->sk_error_report = transport->old_error_report; 756} 757 758static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 759{ 760 smp_mb__before_atomic(); 761 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 762 clear_bit(XPRT_CLOSING, &xprt->state); 763 smp_mb__after_atomic(); 764} 765 766static void xs_sock_mark_closed(struct rpc_xprt *xprt) 767{ 768 xs_sock_reset_connection_flags(xprt); 769 /* Mark transport as closed and wake up all pending tasks */ 770 xprt_disconnect_done(xprt); 771} 772 773/** 774 * xs_error_report - callback to handle TCP socket state errors 775 * @sk: socket 776 * 777 * Note: we don't call sock_error() since there may be a rpc_task 778 * using the socket, and so we don't want to clear sk->sk_err. 779 */ 780static void xs_error_report(struct sock *sk) 781{ 782 struct rpc_xprt *xprt; 783 int err; 784 785 read_lock_bh(&sk->sk_callback_lock); 786 if (!(xprt = xprt_from_sock(sk))) 787 goto out; 788 789 err = -sk->sk_err; 790 if (err == 0) 791 goto out; 792 /* Is this a reset event? */ 793 if (sk->sk_state == TCP_CLOSE) 794 xs_sock_mark_closed(xprt); 795 dprintk("RPC: xs_error_report client %p, error=%d...\n", 796 xprt, -err); 797 trace_rpc_socket_error(xprt, sk->sk_socket, err); 798 xprt_wake_pending_tasks(xprt, err); 799 out: 800 read_unlock_bh(&sk->sk_callback_lock); 801} 802 803static void xs_reset_transport(struct sock_xprt *transport) 804{ 805 struct socket *sock = transport->sock; 806 struct sock *sk = transport->inet; 807 struct rpc_xprt *xprt = &transport->xprt; 808 809 if (sk == NULL) 810 return; 811 812 if (atomic_read(&transport->xprt.swapper)) 813 sk_clear_memalloc(sk); 814 815 kernel_sock_shutdown(sock, SHUT_RDWR); 816 817 mutex_lock(&transport->recv_mutex); 818 write_lock_bh(&sk->sk_callback_lock); 819 transport->inet = NULL; 820 transport->sock = NULL; 821 822 sk->sk_user_data = NULL; 823 824 xs_restore_old_callbacks(transport, sk); 825 xprt_clear_connected(xprt); 826 write_unlock_bh(&sk->sk_callback_lock); 827 xs_sock_reset_connection_flags(xprt); 828 mutex_unlock(&transport->recv_mutex); 829 830 trace_rpc_socket_close(xprt, sock); 831 sock_release(sock); 832} 833 834/** 835 * xs_close - close a socket 836 * @xprt: transport 837 * 838 * This is used when all requests are complete; ie, no DRC state remains 839 * on the server we want to save. 840 * 841 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with 842 * xs_reset_transport() zeroing the socket from underneath a writer. 843 */ 844static void xs_close(struct rpc_xprt *xprt) 845{ 846 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 847 848 dprintk("RPC: xs_close xprt %p\n", xprt); 849 850 xs_reset_transport(transport); 851 xprt->reestablish_timeout = 0; 852 853 xprt_disconnect_done(xprt); 854} 855 856static void xs_inject_disconnect(struct rpc_xprt *xprt) 857{ 858 dprintk("RPC: injecting transport disconnect on xprt=%p\n", 859 xprt); 860 xprt_disconnect_done(xprt); 861} 862 863static void xs_xprt_free(struct rpc_xprt *xprt) 864{ 865 xs_free_peer_addresses(xprt); 866 xprt_free(xprt); 867} 868 869/** 870 * xs_destroy - prepare to shutdown a transport 871 * @xprt: doomed transport 872 * 873 */ 874static void xs_destroy(struct rpc_xprt *xprt) 875{ 876 struct sock_xprt *transport = container_of(xprt, 877 struct sock_xprt, xprt); 878 dprintk("RPC: xs_destroy xprt %p\n", xprt); 879 880 cancel_delayed_work_sync(&transport->connect_worker); 881 xs_close(xprt); 882 cancel_work_sync(&transport->recv_worker); 883 xs_xprt_free(xprt); 884 module_put(THIS_MODULE); 885} 886 887static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) 888{ 889 struct xdr_skb_reader desc = { 890 .skb = skb, 891 .offset = sizeof(rpc_fraghdr), 892 .count = skb->len - sizeof(rpc_fraghdr), 893 }; 894 895 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) 896 return -1; 897 if (desc.count) 898 return -1; 899 return 0; 900} 901 902/** 903 * xs_local_data_read_skb 904 * @xprt: transport 905 * @sk: socket 906 * @skb: skbuff 907 * 908 * Currently this assumes we can read the whole reply in a single gulp. 909 */ 910static void xs_local_data_read_skb(struct rpc_xprt *xprt, 911 struct sock *sk, 912 struct sk_buff *skb) 913{ 914 struct rpc_task *task; 915 struct rpc_rqst *rovr; 916 int repsize, copied; 917 u32 _xid; 918 __be32 *xp; 919 920 repsize = skb->len - sizeof(rpc_fraghdr); 921 if (repsize < 4) { 922 dprintk("RPC: impossible RPC reply size %d\n", repsize); 923 return; 924 } 925 926 /* Copy the XID from the skb... */ 927 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid); 928 if (xp == NULL) 929 return; 930 931 /* Look up and lock the request corresponding to the given XID */ 932 spin_lock_bh(&xprt->transport_lock); 933 rovr = xprt_lookup_rqst(xprt, *xp); 934 if (!rovr) 935 goto out_unlock; 936 task = rovr->rq_task; 937 938 copied = rovr->rq_private_buf.buflen; 939 if (copied > repsize) 940 copied = repsize; 941 942 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) { 943 dprintk("RPC: sk_buff copy failed\n"); 944 goto out_unlock; 945 } 946 947 xprt_complete_rqst(task, copied); 948 949 out_unlock: 950 spin_unlock_bh(&xprt->transport_lock); 951} 952 953static void xs_local_data_receive(struct sock_xprt *transport) 954{ 955 struct sk_buff *skb; 956 struct sock *sk; 957 int err; 958 959 mutex_lock(&transport->recv_mutex); 960 sk = transport->inet; 961 if (sk == NULL) 962 goto out; 963 for (;;) { 964 skb = skb_recv_datagram(sk, 0, 1, &err); 965 if (skb == NULL) 966 break; 967 xs_local_data_read_skb(&transport->xprt, sk, skb); 968 skb_free_datagram(sk, skb); 969 } 970out: 971 mutex_unlock(&transport->recv_mutex); 972} 973 974static void xs_local_data_receive_workfn(struct work_struct *work) 975{ 976 struct sock_xprt *transport = 977 container_of(work, struct sock_xprt, recv_worker); 978 xs_local_data_receive(transport); 979} 980 981/** 982 * xs_udp_data_read_skb - receive callback for UDP sockets 983 * @xprt: transport 984 * @sk: socket 985 * @skb: skbuff 986 * 987 */ 988static void xs_udp_data_read_skb(struct rpc_xprt *xprt, 989 struct sock *sk, 990 struct sk_buff *skb) 991{ 992 struct rpc_task *task; 993 struct rpc_rqst *rovr; 994 int repsize, copied; 995 u32 _xid; 996 __be32 *xp; 997 998 repsize = skb->len - sizeof(struct udphdr); 999 if (repsize < 4) { 1000 dprintk("RPC: impossible RPC reply size %d!\n", repsize); 1001 return; 1002 } 1003 1004 /* Copy the XID from the skb... */ 1005 xp = skb_header_pointer(skb, sizeof(struct udphdr), 1006 sizeof(_xid), &_xid); 1007 if (xp == NULL) 1008 return; 1009 1010 /* Look up and lock the request corresponding to the given XID */ 1011 spin_lock_bh(&xprt->transport_lock); 1012 rovr = xprt_lookup_rqst(xprt, *xp); 1013 if (!rovr) 1014 goto out_unlock; 1015 task = rovr->rq_task; 1016 1017 if ((copied = rovr->rq_private_buf.buflen) > repsize) 1018 copied = repsize; 1019 1020 /* Suck it into the iovec, verify checksum if not done by hw. */ 1021 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { 1022 UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS); 1023 goto out_unlock; 1024 } 1025 1026 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); 1027 1028 xprt_adjust_cwnd(xprt, task, copied); 1029 xprt_complete_rqst(task, copied); 1030 1031 out_unlock: 1032 spin_unlock_bh(&xprt->transport_lock); 1033} 1034 1035static void xs_udp_data_receive(struct sock_xprt *transport) 1036{ 1037 struct sk_buff *skb; 1038 struct sock *sk; 1039 int err; 1040 1041 mutex_lock(&transport->recv_mutex); 1042 sk = transport->inet; 1043 if (sk == NULL) 1044 goto out; 1045 for (;;) { 1046 skb = skb_recv_datagram(sk, 0, 1, &err); 1047 if (skb == NULL) 1048 break; 1049 xs_udp_data_read_skb(&transport->xprt, sk, skb); 1050 skb_free_datagram(sk, skb); 1051 } 1052out: 1053 mutex_unlock(&transport->recv_mutex); 1054} 1055 1056static void xs_udp_data_receive_workfn(struct work_struct *work) 1057{ 1058 struct sock_xprt *transport = 1059 container_of(work, struct sock_xprt, recv_worker); 1060 xs_udp_data_receive(transport); 1061} 1062 1063/** 1064 * xs_data_ready - "data ready" callback for UDP sockets 1065 * @sk: socket with data to read 1066 * 1067 */ 1068static void xs_data_ready(struct sock *sk) 1069{ 1070 struct rpc_xprt *xprt; 1071 1072 read_lock_bh(&sk->sk_callback_lock); 1073 dprintk("RPC: xs_data_ready...\n"); 1074 xprt = xprt_from_sock(sk); 1075 if (xprt != NULL) { 1076 struct sock_xprt *transport = container_of(xprt, 1077 struct sock_xprt, xprt); 1078 queue_work(rpciod_workqueue, &transport->recv_worker); 1079 } 1080 read_unlock_bh(&sk->sk_callback_lock); 1081} 1082 1083/* 1084 * Helper function to force a TCP close if the server is sending 1085 * junk and/or it has put us in CLOSE_WAIT 1086 */ 1087static void xs_tcp_force_close(struct rpc_xprt *xprt) 1088{ 1089 xprt_force_disconnect(xprt); 1090} 1091 1092static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 1093{ 1094 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1095 size_t len, used; 1096 char *p; 1097 1098 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; 1099 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; 1100 used = xdr_skb_read_bits(desc, p, len); 1101 transport->tcp_offset += used; 1102 if (used != len) 1103 return; 1104 1105 transport->tcp_reclen = ntohl(transport->tcp_fraghdr); 1106 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) 1107 transport->tcp_flags |= TCP_RCV_LAST_FRAG; 1108 else 1109 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; 1110 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; 1111 1112 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; 1113 transport->tcp_offset = 0; 1114 1115 /* Sanity check of the record length */ 1116 if (unlikely(transport->tcp_reclen < 8)) { 1117 dprintk("RPC: invalid TCP record fragment length\n"); 1118 xs_tcp_force_close(xprt); 1119 return; 1120 } 1121 dprintk("RPC: reading TCP record fragment of length %d\n", 1122 transport->tcp_reclen); 1123} 1124 1125static void xs_tcp_check_fraghdr(struct sock_xprt *transport) 1126{ 1127 if (transport->tcp_offset == transport->tcp_reclen) { 1128 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; 1129 transport->tcp_offset = 0; 1130 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { 1131 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1132 transport->tcp_flags |= TCP_RCV_COPY_XID; 1133 transport->tcp_copied = 0; 1134 } 1135 } 1136} 1137 1138static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1139{ 1140 size_t len, used; 1141 char *p; 1142 1143 len = sizeof(transport->tcp_xid) - transport->tcp_offset; 1144 dprintk("RPC: reading XID (%Zu bytes)\n", len); 1145 p = ((char *) &transport->tcp_xid) + transport->tcp_offset; 1146 used = xdr_skb_read_bits(desc, p, len); 1147 transport->tcp_offset += used; 1148 if (used != len) 1149 return; 1150 transport->tcp_flags &= ~TCP_RCV_COPY_XID; 1151 transport->tcp_flags |= TCP_RCV_READ_CALLDIR; 1152 transport->tcp_copied = 4; 1153 dprintk("RPC: reading %s XID %08x\n", 1154 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for" 1155 : "request with", 1156 ntohl(transport->tcp_xid)); 1157 xs_tcp_check_fraghdr(transport); 1158} 1159 1160static inline void xs_tcp_read_calldir(struct sock_xprt *transport, 1161 struct xdr_skb_reader *desc) 1162{ 1163 size_t len, used; 1164 u32 offset; 1165 char *p; 1166 1167 /* 1168 * We want transport->tcp_offset to be 8 at the end of this routine 1169 * (4 bytes for the xid and 4 bytes for the call/reply flag). 1170 * When this function is called for the first time, 1171 * transport->tcp_offset is 4 (after having already read the xid). 1172 */ 1173 offset = transport->tcp_offset - sizeof(transport->tcp_xid); 1174 len = sizeof(transport->tcp_calldir) - offset; 1175 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); 1176 p = ((char *) &transport->tcp_calldir) + offset; 1177 used = xdr_skb_read_bits(desc, p, len); 1178 transport->tcp_offset += used; 1179 if (used != len) 1180 return; 1181 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; 1182 /* 1183 * We don't yet have the XDR buffer, so we will write the calldir 1184 * out after we get the buffer from the 'struct rpc_rqst' 1185 */ 1186 switch (ntohl(transport->tcp_calldir)) { 1187 case RPC_REPLY: 1188 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; 1189 transport->tcp_flags |= TCP_RCV_COPY_DATA; 1190 transport->tcp_flags |= TCP_RPC_REPLY; 1191 break; 1192 case RPC_CALL: 1193 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; 1194 transport->tcp_flags |= TCP_RCV_COPY_DATA; 1195 transport->tcp_flags &= ~TCP_RPC_REPLY; 1196 break; 1197 default: 1198 dprintk("RPC: invalid request message type\n"); 1199 xs_tcp_force_close(&transport->xprt); 1200 } 1201 xs_tcp_check_fraghdr(transport); 1202} 1203 1204static inline void xs_tcp_read_common(struct rpc_xprt *xprt, 1205 struct xdr_skb_reader *desc, 1206 struct rpc_rqst *req) 1207{ 1208 struct sock_xprt *transport = 1209 container_of(xprt, struct sock_xprt, xprt); 1210 struct xdr_buf *rcvbuf; 1211 size_t len; 1212 ssize_t r; 1213 1214 rcvbuf = &req->rq_private_buf; 1215 1216 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) { 1217 /* 1218 * Save the RPC direction in the XDR buffer 1219 */ 1220 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, 1221 &transport->tcp_calldir, 1222 sizeof(transport->tcp_calldir)); 1223 transport->tcp_copied += sizeof(transport->tcp_calldir); 1224 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; 1225 } 1226 1227 len = desc->count; 1228 if (len > transport->tcp_reclen - transport->tcp_offset) { 1229 struct xdr_skb_reader my_desc; 1230 1231 len = transport->tcp_reclen - transport->tcp_offset; 1232 memcpy(&my_desc, desc, sizeof(my_desc)); 1233 my_desc.count = len; 1234 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 1235 &my_desc, xdr_skb_read_bits); 1236 desc->count -= r; 1237 desc->offset += r; 1238 } else 1239 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 1240 desc, xdr_skb_read_bits); 1241 1242 if (r > 0) { 1243 transport->tcp_copied += r; 1244 transport->tcp_offset += r; 1245 } 1246 if (r != len) { 1247 /* Error when copying to the receive buffer, 1248 * usually because we weren't able to allocate 1249 * additional buffer pages. All we can do now 1250 * is turn off TCP_RCV_COPY_DATA, so the request 1251 * will not receive any additional updates, 1252 * and time out. 1253 * Any remaining data from this record will 1254 * be discarded. 1255 */ 1256 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1257 dprintk("RPC: XID %08x truncated request\n", 1258 ntohl(transport->tcp_xid)); 1259 dprintk("RPC: xprt = %p, tcp_copied = %lu, " 1260 "tcp_offset = %u, tcp_reclen = %u\n", 1261 xprt, transport->tcp_copied, 1262 transport->tcp_offset, transport->tcp_reclen); 1263 return; 1264 } 1265 1266 dprintk("RPC: XID %08x read %Zd bytes\n", 1267 ntohl(transport->tcp_xid), r); 1268 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " 1269 "tcp_reclen = %u\n", xprt, transport->tcp_copied, 1270 transport->tcp_offset, transport->tcp_reclen); 1271 1272 if (transport->tcp_copied == req->rq_private_buf.buflen) 1273 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1274 else if (transport->tcp_offset == transport->tcp_reclen) { 1275 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) 1276 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1277 } 1278} 1279 1280/* 1281 * Finds the request corresponding to the RPC xid and invokes the common 1282 * tcp read code to read the data. 1283 */ 1284static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, 1285 struct xdr_skb_reader *desc) 1286{ 1287 struct sock_xprt *transport = 1288 container_of(xprt, struct sock_xprt, xprt); 1289 struct rpc_rqst *req; 1290 1291 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid)); 1292 1293 /* Find and lock the request corresponding to this xid */ 1294 spin_lock_bh(&xprt->transport_lock); 1295 req = xprt_lookup_rqst(xprt, transport->tcp_xid); 1296 if (!req) { 1297 dprintk("RPC: XID %08x request not found!\n", 1298 ntohl(transport->tcp_xid)); 1299 spin_unlock_bh(&xprt->transport_lock); 1300 return -1; 1301 } 1302 1303 xs_tcp_read_common(xprt, desc, req); 1304 1305 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1306 xprt_complete_rqst(req->rq_task, transport->tcp_copied); 1307 1308 spin_unlock_bh(&xprt->transport_lock); 1309 return 0; 1310} 1311 1312#if defined(CONFIG_SUNRPC_BACKCHANNEL) 1313/* 1314 * Obtains an rpc_rqst previously allocated and invokes the common 1315 * tcp read code to read the data. The result is placed in the callback 1316 * queue. 1317 * If we're unable to obtain the rpc_rqst we schedule the closing of the 1318 * connection and return -1. 1319 */ 1320static int xs_tcp_read_callback(struct rpc_xprt *xprt, 1321 struct xdr_skb_reader *desc) 1322{ 1323 struct sock_xprt *transport = 1324 container_of(xprt, struct sock_xprt, xprt); 1325 struct rpc_rqst *req; 1326 1327 /* Look up and lock the request corresponding to the given XID */ 1328 spin_lock_bh(&xprt->transport_lock); 1329 req = xprt_lookup_bc_request(xprt, transport->tcp_xid); 1330 if (req == NULL) { 1331 spin_unlock_bh(&xprt->transport_lock); 1332 printk(KERN_WARNING "Callback slot table overflowed\n"); 1333 xprt_force_disconnect(xprt); 1334 return -1; 1335 } 1336 1337 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid)); 1338 xs_tcp_read_common(xprt, desc, req); 1339 1340 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1341 xprt_complete_bc_request(req, transport->tcp_copied); 1342 spin_unlock_bh(&xprt->transport_lock); 1343 1344 return 0; 1345} 1346 1347static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, 1348 struct xdr_skb_reader *desc) 1349{ 1350 struct sock_xprt *transport = 1351 container_of(xprt, struct sock_xprt, xprt); 1352 1353 return (transport->tcp_flags & TCP_RPC_REPLY) ? 1354 xs_tcp_read_reply(xprt, desc) : 1355 xs_tcp_read_callback(xprt, desc); 1356} 1357 1358static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net) 1359{ 1360 int ret; 1361 1362 ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0, 1363 SVC_SOCK_ANONYMOUS); 1364 if (ret < 0) 1365 return ret; 1366 return 0; 1367} 1368#else 1369static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, 1370 struct xdr_skb_reader *desc) 1371{ 1372 return xs_tcp_read_reply(xprt, desc); 1373} 1374#endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1375 1376/* 1377 * Read data off the transport. This can be either an RPC_CALL or an 1378 * RPC_REPLY. Relay the processing to helper functions. 1379 */ 1380static void xs_tcp_read_data(struct rpc_xprt *xprt, 1381 struct xdr_skb_reader *desc) 1382{ 1383 struct sock_xprt *transport = 1384 container_of(xprt, struct sock_xprt, xprt); 1385 1386 if (_xs_tcp_read_data(xprt, desc) == 0) 1387 xs_tcp_check_fraghdr(transport); 1388 else { 1389 /* 1390 * The transport_lock protects the request handling. 1391 * There's no need to hold it to update the tcp_flags. 1392 */ 1393 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1394 } 1395} 1396 1397static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1398{ 1399 size_t len; 1400 1401 len = transport->tcp_reclen - transport->tcp_offset; 1402 if (len > desc->count) 1403 len = desc->count; 1404 desc->count -= len; 1405 desc->offset += len; 1406 transport->tcp_offset += len; 1407 dprintk("RPC: discarded %Zu bytes\n", len); 1408 xs_tcp_check_fraghdr(transport); 1409} 1410 1411static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) 1412{ 1413 struct rpc_xprt *xprt = rd_desc->arg.data; 1414 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1415 struct xdr_skb_reader desc = { 1416 .skb = skb, 1417 .offset = offset, 1418 .count = len, 1419 }; 1420 1421 dprintk("RPC: xs_tcp_data_recv started\n"); 1422 do { 1423 trace_xs_tcp_data_recv(transport); 1424 /* Read in a new fragment marker if necessary */ 1425 /* Can we ever really expect to get completely empty fragments? */ 1426 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { 1427 xs_tcp_read_fraghdr(xprt, &desc); 1428 continue; 1429 } 1430 /* Read in the xid if necessary */ 1431 if (transport->tcp_flags & TCP_RCV_COPY_XID) { 1432 xs_tcp_read_xid(transport, &desc); 1433 continue; 1434 } 1435 /* Read in the call/reply flag */ 1436 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) { 1437 xs_tcp_read_calldir(transport, &desc); 1438 continue; 1439 } 1440 /* Read in the request data */ 1441 if (transport->tcp_flags & TCP_RCV_COPY_DATA) { 1442 xs_tcp_read_data(xprt, &desc); 1443 continue; 1444 } 1445 /* Skip over any trailing bytes on short reads */ 1446 xs_tcp_read_discard(transport, &desc); 1447 } while (desc.count); 1448 trace_xs_tcp_data_recv(transport); 1449 dprintk("RPC: xs_tcp_data_recv done\n"); 1450 return len - desc.count; 1451} 1452 1453static void xs_tcp_data_receive(struct sock_xprt *transport) 1454{ 1455 struct rpc_xprt *xprt = &transport->xprt; 1456 struct sock *sk; 1457 read_descriptor_t rd_desc = { 1458 .count = 2*1024*1024, 1459 .arg.data = xprt, 1460 }; 1461 unsigned long total = 0; 1462 int read = 0; 1463 1464 mutex_lock(&transport->recv_mutex); 1465 sk = transport->inet; 1466 if (sk == NULL) 1467 goto out; 1468 1469 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ 1470 for (;;) { 1471 lock_sock(sk); 1472 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1473 release_sock(sk); 1474 if (read <= 0) 1475 break; 1476 total += read; 1477 rd_desc.count = 65536; 1478 } 1479out: 1480 mutex_unlock(&transport->recv_mutex); 1481 trace_xs_tcp_data_ready(xprt, read, total); 1482} 1483 1484static void xs_tcp_data_receive_workfn(struct work_struct *work) 1485{ 1486 struct sock_xprt *transport = 1487 container_of(work, struct sock_xprt, recv_worker); 1488 xs_tcp_data_receive(transport); 1489} 1490 1491/** 1492 * xs_tcp_data_ready - "data ready" callback for TCP sockets 1493 * @sk: socket with data to read 1494 * 1495 */ 1496static void xs_tcp_data_ready(struct sock *sk) 1497{ 1498 struct sock_xprt *transport; 1499 struct rpc_xprt *xprt; 1500 1501 dprintk("RPC: xs_tcp_data_ready...\n"); 1502 1503 read_lock_bh(&sk->sk_callback_lock); 1504 if (!(xprt = xprt_from_sock(sk))) 1505 goto out; 1506 transport = container_of(xprt, struct sock_xprt, xprt); 1507 1508 /* Any data means we had a useful conversation, so 1509 * the we don't need to delay the next reconnect 1510 */ 1511 if (xprt->reestablish_timeout) 1512 xprt->reestablish_timeout = 0; 1513 queue_work(rpciod_workqueue, &transport->recv_worker); 1514 1515out: 1516 read_unlock_bh(&sk->sk_callback_lock); 1517} 1518 1519/** 1520 * xs_tcp_state_change - callback to handle TCP socket state changes 1521 * @sk: socket whose state has changed 1522 * 1523 */ 1524static void xs_tcp_state_change(struct sock *sk) 1525{ 1526 struct rpc_xprt *xprt; 1527 struct sock_xprt *transport; 1528 1529 read_lock_bh(&sk->sk_callback_lock); 1530 if (!(xprt = xprt_from_sock(sk))) 1531 goto out; 1532 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1533 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", 1534 sk->sk_state, xprt_connected(xprt), 1535 sock_flag(sk, SOCK_DEAD), 1536 sock_flag(sk, SOCK_ZAPPED), 1537 sk->sk_shutdown); 1538 1539 transport = container_of(xprt, struct sock_xprt, xprt); 1540 trace_rpc_socket_state_change(xprt, sk->sk_socket); 1541 switch (sk->sk_state) { 1542 case TCP_ESTABLISHED: 1543 spin_lock(&xprt->transport_lock); 1544 if (!xprt_test_and_set_connected(xprt)) { 1545 1546 /* Reset TCP record info */ 1547 transport->tcp_offset = 0; 1548 transport->tcp_reclen = 0; 1549 transport->tcp_copied = 0; 1550 transport->tcp_flags = 1551 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; 1552 xprt->connect_cookie++; 1553 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 1554 xprt_clear_connecting(xprt); 1555 1556 xprt_wake_pending_tasks(xprt, -EAGAIN); 1557 } 1558 spin_unlock(&xprt->transport_lock); 1559 break; 1560 case TCP_FIN_WAIT1: 1561 /* The client initiated a shutdown of the socket */ 1562 xprt->connect_cookie++; 1563 xprt->reestablish_timeout = 0; 1564 set_bit(XPRT_CLOSING, &xprt->state); 1565 smp_mb__before_atomic(); 1566 clear_bit(XPRT_CONNECTED, &xprt->state); 1567 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1568 smp_mb__after_atomic(); 1569 break; 1570 case TCP_CLOSE_WAIT: 1571 /* The server initiated a shutdown of the socket */ 1572 xprt->connect_cookie++; 1573 clear_bit(XPRT_CONNECTED, &xprt->state); 1574 xs_tcp_force_close(xprt); 1575 case TCP_CLOSING: 1576 /* 1577 * If the server closed down the connection, make sure that 1578 * we back off before reconnecting 1579 */ 1580 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 1581 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1582 break; 1583 case TCP_LAST_ACK: 1584 set_bit(XPRT_CLOSING, &xprt->state); 1585 smp_mb__before_atomic(); 1586 clear_bit(XPRT_CONNECTED, &xprt->state); 1587 smp_mb__after_atomic(); 1588 break; 1589 case TCP_CLOSE: 1590 if (test_and_clear_bit(XPRT_SOCK_CONNECTING, 1591 &transport->sock_state)) 1592 xprt_clear_connecting(xprt); 1593 xs_sock_mark_closed(xprt); 1594 } 1595 out: 1596 read_unlock_bh(&sk->sk_callback_lock); 1597} 1598 1599static void xs_write_space(struct sock *sk) 1600{ 1601 struct socket_wq *wq; 1602 struct rpc_xprt *xprt; 1603 1604 if (!sk->sk_socket) 1605 return; 1606 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1607 1608 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1609 return; 1610 rcu_read_lock(); 1611 wq = rcu_dereference(sk->sk_wq); 1612 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) 1613 goto out; 1614 1615 xprt_write_space(xprt); 1616out: 1617 rcu_read_unlock(); 1618} 1619 1620/** 1621 * xs_udp_write_space - callback invoked when socket buffer space 1622 * becomes available 1623 * @sk: socket whose state has changed 1624 * 1625 * Called when more output buffer space is available for this socket. 1626 * We try not to wake our writers until they can make "significant" 1627 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1628 * with a bunch of small requests. 1629 */ 1630static void xs_udp_write_space(struct sock *sk) 1631{ 1632 read_lock_bh(&sk->sk_callback_lock); 1633 1634 /* from net/core/sock.c:sock_def_write_space */ 1635 if (sock_writeable(sk)) 1636 xs_write_space(sk); 1637 1638 read_unlock_bh(&sk->sk_callback_lock); 1639} 1640 1641/** 1642 * xs_tcp_write_space - callback invoked when socket buffer space 1643 * becomes available 1644 * @sk: socket whose state has changed 1645 * 1646 * Called when more output buffer space is available for this socket. 1647 * We try not to wake our writers until they can make "significant" 1648 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1649 * with a bunch of small requests. 1650 */ 1651static void xs_tcp_write_space(struct sock *sk) 1652{ 1653 read_lock_bh(&sk->sk_callback_lock); 1654 1655 /* from net/core/stream.c:sk_stream_write_space */ 1656 if (sk_stream_is_writeable(sk)) 1657 xs_write_space(sk); 1658 1659 read_unlock_bh(&sk->sk_callback_lock); 1660} 1661 1662static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1663{ 1664 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1665 struct sock *sk = transport->inet; 1666 1667 if (transport->rcvsize) { 1668 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 1669 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; 1670 } 1671 if (transport->sndsize) { 1672 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1673 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; 1674 sk->sk_write_space(sk); 1675 } 1676} 1677 1678/** 1679 * xs_udp_set_buffer_size - set send and receive limits 1680 * @xprt: generic transport 1681 * @sndsize: requested size of send buffer, in bytes 1682 * @rcvsize: requested size of receive buffer, in bytes 1683 * 1684 * Set socket send and receive buffer size limits. 1685 */ 1686static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 1687{ 1688 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1689 1690 transport->sndsize = 0; 1691 if (sndsize) 1692 transport->sndsize = sndsize + 1024; 1693 transport->rcvsize = 0; 1694 if (rcvsize) 1695 transport->rcvsize = rcvsize + 1024; 1696 1697 xs_udp_do_set_buffer_size(xprt); 1698} 1699 1700/** 1701 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport 1702 * @task: task that timed out 1703 * 1704 * Adjust the congestion window after a retransmit timeout has occurred. 1705 */ 1706static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) 1707{ 1708 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); 1709} 1710 1711static unsigned short xs_get_random_port(void) 1712{ 1713 unsigned short range = xprt_max_resvport - xprt_min_resvport; 1714 unsigned short rand = (unsigned short) prandom_u32() % range; 1715 return rand + xprt_min_resvport; 1716} 1717 1718/** 1719 * xs_set_reuseaddr_port - set the socket's port and address reuse options 1720 * @sock: socket 1721 * 1722 * Note that this function has to be called on all sockets that share the 1723 * same port, and it must be called before binding. 1724 */ 1725static void xs_sock_set_reuseport(struct socket *sock) 1726{ 1727 int opt = 1; 1728 1729 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, 1730 (char *)&opt, sizeof(opt)); 1731} 1732 1733static unsigned short xs_sock_getport(struct socket *sock) 1734{ 1735 struct sockaddr_storage buf; 1736 int buflen; 1737 unsigned short port = 0; 1738 1739 if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0) 1740 goto out; 1741 switch (buf.ss_family) { 1742 case AF_INET6: 1743 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); 1744 break; 1745 case AF_INET: 1746 port = ntohs(((struct sockaddr_in *)&buf)->sin_port); 1747 } 1748out: 1749 return port; 1750} 1751 1752/** 1753 * xs_set_port - reset the port number in the remote endpoint address 1754 * @xprt: generic transport 1755 * @port: new port number 1756 * 1757 */ 1758static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1759{ 1760 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1761 1762 rpc_set_port(xs_addr(xprt), port); 1763 xs_update_peer_port(xprt); 1764} 1765 1766static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) 1767{ 1768 if (transport->srcport == 0) 1769 transport->srcport = xs_sock_getport(sock); 1770} 1771 1772static unsigned short xs_get_srcport(struct sock_xprt *transport) 1773{ 1774 unsigned short port = transport->srcport; 1775 1776 if (port == 0 && transport->xprt.resvport) 1777 port = xs_get_random_port(); 1778 return port; 1779} 1780 1781static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) 1782{ 1783 if (transport->srcport != 0) 1784 transport->srcport = 0; 1785 if (!transport->xprt.resvport) 1786 return 0; 1787 if (port <= xprt_min_resvport || port > xprt_max_resvport) 1788 return xprt_max_resvport; 1789 return --port; 1790} 1791static int xs_bind(struct sock_xprt *transport, struct socket *sock) 1792{ 1793 struct sockaddr_storage myaddr; 1794 int err, nloop = 0; 1795 unsigned short port = xs_get_srcport(transport); 1796 unsigned short last; 1797 1798 /* 1799 * If we are asking for any ephemeral port (i.e. port == 0 && 1800 * transport->xprt.resvport == 0), don't bind. Let the local 1801 * port selection happen implicitly when the socket is used 1802 * (for example at connect time). 1803 * 1804 * This ensures that we can continue to establish TCP 1805 * connections even when all local ephemeral ports are already 1806 * a part of some TCP connection. This makes no difference 1807 * for UDP sockets, but also doens't harm them. 1808 * 1809 * If we're asking for any reserved port (i.e. port == 0 && 1810 * transport->xprt.resvport == 1) xs_get_srcport above will 1811 * ensure that port is non-zero and we will bind as needed. 1812 */ 1813 if (port == 0) 1814 return 0; 1815 1816 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); 1817 do { 1818 rpc_set_port((struct sockaddr *)&myaddr, port); 1819 err = kernel_bind(sock, (struct sockaddr *)&myaddr, 1820 transport->xprt.addrlen); 1821 if (err == 0) { 1822 transport->srcport = port; 1823 break; 1824 } 1825 last = port; 1826 port = xs_next_srcport(transport, port); 1827 if (port > last) 1828 nloop++; 1829 } while (err == -EADDRINUSE && nloop != 2); 1830 1831 if (myaddr.ss_family == AF_INET) 1832 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, 1833 &((struct sockaddr_in *)&myaddr)->sin_addr, 1834 port, err ? "failed" : "ok", err); 1835 else 1836 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, 1837 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, 1838 port, err ? "failed" : "ok", err); 1839 return err; 1840} 1841 1842/* 1843 * We don't support autobind on AF_LOCAL sockets 1844 */ 1845static void xs_local_rpcbind(struct rpc_task *task) 1846{ 1847 rcu_read_lock(); 1848 xprt_set_bound(rcu_dereference(task->tk_client->cl_xprt)); 1849 rcu_read_unlock(); 1850} 1851 1852static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) 1853{ 1854} 1855 1856#ifdef CONFIG_DEBUG_LOCK_ALLOC 1857static struct lock_class_key xs_key[2]; 1858static struct lock_class_key xs_slock_key[2]; 1859 1860static inline void xs_reclassify_socketu(struct socket *sock) 1861{ 1862 struct sock *sk = sock->sk; 1863 1864 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", 1865 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); 1866} 1867 1868static inline void xs_reclassify_socket4(struct socket *sock) 1869{ 1870 struct sock *sk = sock->sk; 1871 1872 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1873 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); 1874} 1875 1876static inline void xs_reclassify_socket6(struct socket *sock) 1877{ 1878 struct sock *sk = sock->sk; 1879 1880 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1881 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); 1882} 1883 1884static inline void xs_reclassify_socket(int family, struct socket *sock) 1885{ 1886 WARN_ON_ONCE(sock_owned_by_user(sock->sk)); 1887 if (sock_owned_by_user(sock->sk)) 1888 return; 1889 1890 switch (family) { 1891 case AF_LOCAL: 1892 xs_reclassify_socketu(sock); 1893 break; 1894 case AF_INET: 1895 xs_reclassify_socket4(sock); 1896 break; 1897 case AF_INET6: 1898 xs_reclassify_socket6(sock); 1899 break; 1900 } 1901} 1902#else 1903static inline void xs_reclassify_socketu(struct socket *sock) 1904{ 1905} 1906 1907static inline void xs_reclassify_socket4(struct socket *sock) 1908{ 1909} 1910 1911static inline void xs_reclassify_socket6(struct socket *sock) 1912{ 1913} 1914 1915static inline void xs_reclassify_socket(int family, struct socket *sock) 1916{ 1917} 1918#endif 1919 1920static void xs_dummy_setup_socket(struct work_struct *work) 1921{ 1922} 1923 1924static struct socket *xs_create_sock(struct rpc_xprt *xprt, 1925 struct sock_xprt *transport, int family, int type, 1926 int protocol, bool reuseport) 1927{ 1928 struct socket *sock; 1929 int err; 1930 1931 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); 1932 if (err < 0) { 1933 dprintk("RPC: can't create %d transport socket (%d).\n", 1934 protocol, -err); 1935 goto out; 1936 } 1937 xs_reclassify_socket(family, sock); 1938 1939 if (reuseport) 1940 xs_sock_set_reuseport(sock); 1941 1942 err = xs_bind(transport, sock); 1943 if (err) { 1944 sock_release(sock); 1945 goto out; 1946 } 1947 1948 return sock; 1949out: 1950 return ERR_PTR(err); 1951} 1952 1953static int xs_local_finish_connecting(struct rpc_xprt *xprt, 1954 struct socket *sock) 1955{ 1956 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 1957 xprt); 1958 1959 if (!transport->inet) { 1960 struct sock *sk = sock->sk; 1961 1962 write_lock_bh(&sk->sk_callback_lock); 1963 1964 xs_save_old_callbacks(transport, sk); 1965 1966 sk->sk_user_data = xprt; 1967 sk->sk_data_ready = xs_data_ready; 1968 sk->sk_write_space = xs_udp_write_space; 1969 sk->sk_error_report = xs_error_report; 1970 sk->sk_allocation = GFP_NOIO; 1971 1972 xprt_clear_connected(xprt); 1973 1974 /* Reset to new socket */ 1975 transport->sock = sock; 1976 transport->inet = sk; 1977 1978 write_unlock_bh(&sk->sk_callback_lock); 1979 } 1980 1981 /* Tell the socket layer to start connecting... */ 1982 xprt->stat.connect_count++; 1983 xprt->stat.connect_start = jiffies; 1984 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); 1985} 1986 1987/** 1988 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint 1989 * @transport: socket transport to connect 1990 */ 1991static int xs_local_setup_socket(struct sock_xprt *transport) 1992{ 1993 struct rpc_xprt *xprt = &transport->xprt; 1994 struct socket *sock; 1995 int status = -EIO; 1996 1997 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1998 SOCK_STREAM, 0, &sock, 1); 1999 if (status < 0) { 2000 dprintk("RPC: can't create AF_LOCAL " 2001 "transport socket (%d).\n", -status); 2002 goto out; 2003 } 2004 xs_reclassify_socketu(sock); 2005 2006 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", 2007 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2008 2009 status = xs_local_finish_connecting(xprt, sock); 2010 trace_rpc_socket_connect(xprt, sock, status); 2011 switch (status) { 2012 case 0: 2013 dprintk("RPC: xprt %p connected to %s\n", 2014 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2015 xprt_set_connected(xprt); 2016 case -ENOBUFS: 2017 break; 2018 case -ENOENT: 2019 dprintk("RPC: xprt %p: socket %s does not exist\n", 2020 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2021 break; 2022 case -ECONNREFUSED: 2023 dprintk("RPC: xprt %p: connection refused for %s\n", 2024 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2025 break; 2026 default: 2027 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n", 2028 __func__, -status, 2029 xprt->address_strings[RPC_DISPLAY_ADDR]); 2030 } 2031 2032out: 2033 xprt_clear_connecting(xprt); 2034 xprt_wake_pending_tasks(xprt, status); 2035 return status; 2036} 2037 2038static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2039{ 2040 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2041 int ret; 2042 2043 if (RPC_IS_ASYNC(task)) { 2044 /* 2045 * We want the AF_LOCAL connect to be resolved in the 2046 * filesystem namespace of the process making the rpc 2047 * call. Thus we connect synchronously. 2048 * 2049 * If we want to support asynchronous AF_LOCAL calls, 2050 * we'll need to figure out how to pass a namespace to 2051 * connect. 2052 */ 2053 rpc_exit(task, -ENOTCONN); 2054 return; 2055 } 2056 ret = xs_local_setup_socket(transport); 2057 if (ret && !RPC_IS_SOFTCONN(task)) 2058 msleep_interruptible(15000); 2059} 2060 2061#if IS_ENABLED(CONFIG_SUNRPC_SWAP) 2062/* 2063 * Note that this should be called with XPRT_LOCKED held (or when we otherwise 2064 * know that we have exclusive access to the socket), to guard against 2065 * races with xs_reset_transport. 2066 */ 2067static void xs_set_memalloc(struct rpc_xprt *xprt) 2068{ 2069 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 2070 xprt); 2071 2072 /* 2073 * If there's no sock, then we have nothing to set. The 2074 * reconnecting process will get it for us. 2075 */ 2076 if (!transport->inet) 2077 return; 2078 if (atomic_read(&xprt->swapper)) 2079 sk_set_memalloc(transport->inet); 2080} 2081 2082/** 2083 * xs_enable_swap - Tag this transport as being used for swap. 2084 * @xprt: transport to tag 2085 * 2086 * Take a reference to this transport on behalf of the rpc_clnt, and 2087 * optionally mark it for swapping if it wasn't already. 2088 */ 2089static int 2090xs_enable_swap(struct rpc_xprt *xprt) 2091{ 2092 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 2093 2094 if (atomic_inc_return(&xprt->swapper) != 1) 2095 return 0; 2096 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) 2097 return -ERESTARTSYS; 2098 if (xs->inet) 2099 sk_set_memalloc(xs->inet); 2100 xprt_release_xprt(xprt, NULL); 2101 return 0; 2102} 2103 2104/** 2105 * xs_disable_swap - Untag this transport as being used for swap. 2106 * @xprt: transport to tag 2107 * 2108 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the 2109 * swapper refcount goes to 0, untag the socket as a memalloc socket. 2110 */ 2111static void 2112xs_disable_swap(struct rpc_xprt *xprt) 2113{ 2114 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 2115 2116 if (!atomic_dec_and_test(&xprt->swapper)) 2117 return; 2118 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) 2119 return; 2120 if (xs->inet) 2121 sk_clear_memalloc(xs->inet); 2122 xprt_release_xprt(xprt, NULL); 2123} 2124#else 2125static void xs_set_memalloc(struct rpc_xprt *xprt) 2126{ 2127} 2128 2129static int 2130xs_enable_swap(struct rpc_xprt *xprt) 2131{ 2132 return -EINVAL; 2133} 2134 2135static void 2136xs_disable_swap(struct rpc_xprt *xprt) 2137{ 2138} 2139#endif 2140 2141static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2142{ 2143 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2144 2145 if (!transport->inet) { 2146 struct sock *sk = sock->sk; 2147 2148 write_lock_bh(&sk->sk_callback_lock); 2149 2150 xs_save_old_callbacks(transport, sk); 2151 2152 sk->sk_user_data = xprt; 2153 sk->sk_data_ready = xs_data_ready; 2154 sk->sk_write_space = xs_udp_write_space; 2155 sk->sk_allocation = GFP_NOIO; 2156 2157 xprt_set_connected(xprt); 2158 2159 /* Reset to new socket */ 2160 transport->sock = sock; 2161 transport->inet = sk; 2162 2163 xs_set_memalloc(xprt); 2164 2165 write_unlock_bh(&sk->sk_callback_lock); 2166 } 2167 xs_udp_do_set_buffer_size(xprt); 2168} 2169 2170static void xs_udp_setup_socket(struct work_struct *work) 2171{ 2172 struct sock_xprt *transport = 2173 container_of(work, struct sock_xprt, connect_worker.work); 2174 struct rpc_xprt *xprt = &transport->xprt; 2175 struct socket *sock = transport->sock; 2176 int status = -EIO; 2177 2178 sock = xs_create_sock(xprt, transport, 2179 xs_addr(xprt)->sa_family, SOCK_DGRAM, 2180 IPPROTO_UDP, false); 2181 if (IS_ERR(sock)) 2182 goto out; 2183 2184 dprintk("RPC: worker connecting xprt %p via %s to " 2185 "%s (port %s)\n", xprt, 2186 xprt->address_strings[RPC_DISPLAY_PROTO], 2187 xprt->address_strings[RPC_DISPLAY_ADDR], 2188 xprt->address_strings[RPC_DISPLAY_PORT]); 2189 2190 xs_udp_finish_connecting(xprt, sock); 2191 trace_rpc_socket_connect(xprt, sock, 0); 2192 status = 0; 2193out: 2194 xprt_unlock_connect(xprt, transport); 2195 xprt_clear_connecting(xprt); 2196 xprt_wake_pending_tasks(xprt, status); 2197} 2198 2199/** 2200 * xs_tcp_shutdown - gracefully shut down a TCP socket 2201 * @xprt: transport 2202 * 2203 * Initiates a graceful shutdown of the TCP socket by calling the 2204 * equivalent of shutdown(SHUT_RDWR); 2205 */ 2206static void xs_tcp_shutdown(struct rpc_xprt *xprt) 2207{ 2208 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2209 struct socket *sock = transport->sock; 2210 2211 if (sock == NULL) 2212 return; 2213 if (xprt_connected(xprt)) { 2214 kernel_sock_shutdown(sock, SHUT_RDWR); 2215 trace_rpc_socket_shutdown(xprt, sock); 2216 } else 2217 xs_reset_transport(transport); 2218} 2219 2220static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2221{ 2222 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2223 int ret = -ENOTCONN; 2224 2225 if (!transport->inet) { 2226 struct sock *sk = sock->sk; 2227 unsigned int keepidle = xprt->timeout->to_initval / HZ; 2228 unsigned int keepcnt = xprt->timeout->to_retries + 1; 2229 unsigned int opt_on = 1; 2230 unsigned int timeo; 2231 2232 /* TCP Keepalive options */ 2233 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 2234 (char *)&opt_on, sizeof(opt_on)); 2235 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, 2236 (char *)&keepidle, sizeof(keepidle)); 2237 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, 2238 (char *)&keepidle, sizeof(keepidle)); 2239 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, 2240 (char *)&keepcnt, sizeof(keepcnt)); 2241 2242 /* TCP user timeout (see RFC5482) */ 2243 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * 2244 (xprt->timeout->to_retries + 1); 2245 kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, 2246 (char *)&timeo, sizeof(timeo)); 2247 2248 write_lock_bh(&sk->sk_callback_lock); 2249 2250 xs_save_old_callbacks(transport, sk); 2251 2252 sk->sk_user_data = xprt; 2253 sk->sk_data_ready = xs_tcp_data_ready; 2254 sk->sk_state_change = xs_tcp_state_change; 2255 sk->sk_write_space = xs_tcp_write_space; 2256 sk->sk_error_report = xs_error_report; 2257 sk->sk_allocation = GFP_NOIO; 2258 2259 /* socket options */ 2260 sock_reset_flag(sk, SOCK_LINGER); 2261 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 2262 2263 xprt_clear_connected(xprt); 2264 2265 /* Reset to new socket */ 2266 transport->sock = sock; 2267 transport->inet = sk; 2268 2269 write_unlock_bh(&sk->sk_callback_lock); 2270 } 2271 2272 if (!xprt_bound(xprt)) 2273 goto out; 2274 2275 xs_set_memalloc(xprt); 2276 2277 /* Tell the socket layer to start connecting... */ 2278 xprt->stat.connect_count++; 2279 xprt->stat.connect_start = jiffies; 2280 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 2281 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2282 switch (ret) { 2283 case 0: 2284 xs_set_srcport(transport, sock); 2285 case -EINPROGRESS: 2286 /* SYN_SENT! */ 2287 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2288 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2289 } 2290out: 2291 return ret; 2292} 2293 2294/** 2295 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint 2296 * 2297 * Invoked by a work queue tasklet. 2298 */ 2299static void xs_tcp_setup_socket(struct work_struct *work) 2300{ 2301 struct sock_xprt *transport = 2302 container_of(work, struct sock_xprt, connect_worker.work); 2303 struct socket *sock = transport->sock; 2304 struct rpc_xprt *xprt = &transport->xprt; 2305 int status = -EIO; 2306 2307 if (!sock) { 2308 sock = xs_create_sock(xprt, transport, 2309 xs_addr(xprt)->sa_family, SOCK_STREAM, 2310 IPPROTO_TCP, true); 2311 if (IS_ERR(sock)) { 2312 status = PTR_ERR(sock); 2313 goto out; 2314 } 2315 } 2316 2317 dprintk("RPC: worker connecting xprt %p via %s to " 2318 "%s (port %s)\n", xprt, 2319 xprt->address_strings[RPC_DISPLAY_PROTO], 2320 xprt->address_strings[RPC_DISPLAY_ADDR], 2321 xprt->address_strings[RPC_DISPLAY_PORT]); 2322 2323 status = xs_tcp_finish_connecting(xprt, sock); 2324 trace_rpc_socket_connect(xprt, sock, status); 2325 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 2326 xprt, -status, xprt_connected(xprt), 2327 sock->sk->sk_state); 2328 switch (status) { 2329 default: 2330 printk("%s: connect returned unhandled error %d\n", 2331 __func__, status); 2332 case -EADDRNOTAVAIL: 2333 /* We're probably in TIME_WAIT. Get rid of existing socket, 2334 * and retry 2335 */ 2336 xs_tcp_force_close(xprt); 2337 break; 2338 case 0: 2339 case -EINPROGRESS: 2340 case -EALREADY: 2341 xprt_unlock_connect(xprt, transport); 2342 return; 2343 case -EINVAL: 2344 /* Happens, for instance, if the user specified a link 2345 * local IPv6 address without a scope-id. 2346 */ 2347 case -ECONNREFUSED: 2348 case -ECONNRESET: 2349 case -ENETUNREACH: 2350 case -EADDRINUSE: 2351 case -ENOBUFS: 2352 /* retry with existing socket, after a delay */ 2353 xs_tcp_force_close(xprt); 2354 goto out; 2355 } 2356 status = -EAGAIN; 2357out: 2358 xprt_unlock_connect(xprt, transport); 2359 xprt_clear_connecting(xprt); 2360 xprt_wake_pending_tasks(xprt, status); 2361} 2362 2363/** 2364 * xs_connect - connect a socket to a remote endpoint 2365 * @xprt: pointer to transport structure 2366 * @task: address of RPC task that manages state of connect request 2367 * 2368 * TCP: If the remote end dropped the connection, delay reconnecting. 2369 * 2370 * UDP socket connects are synchronous, but we use a work queue anyway 2371 * to guarantee that even unprivileged user processes can set up a 2372 * socket on a privileged port. 2373 * 2374 * If a UDP socket connect fails, the delay behavior here prevents 2375 * retry floods (hard mounts). 2376 */ 2377static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2378{ 2379 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2380 2381 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2382 2383 if (transport->sock != NULL) { 2384 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2385 "seconds\n", 2386 xprt, xprt->reestablish_timeout / HZ); 2387 2388 /* Start by resetting any existing state */ 2389 xs_reset_transport(transport); 2390 2391 queue_delayed_work(rpciod_workqueue, 2392 &transport->connect_worker, 2393 xprt->reestablish_timeout); 2394 xprt->reestablish_timeout <<= 1; 2395 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2396 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2397 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 2398 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 2399 } else { 2400 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2401 queue_delayed_work(rpciod_workqueue, 2402 &transport->connect_worker, 0); 2403 } 2404} 2405 2406/** 2407 * xs_local_print_stats - display AF_LOCAL socket-specifc stats 2408 * @xprt: rpc_xprt struct containing statistics 2409 * @seq: output file 2410 * 2411 */ 2412static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2413{ 2414 long idle_time = 0; 2415 2416 if (xprt_connected(xprt)) 2417 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2418 2419 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu " 2420 "%llu %llu %lu %llu %llu\n", 2421 xprt->stat.bind_count, 2422 xprt->stat.connect_count, 2423 xprt->stat.connect_time, 2424 idle_time, 2425 xprt->stat.sends, 2426 xprt->stat.recvs, 2427 xprt->stat.bad_xids, 2428 xprt->stat.req_u, 2429 xprt->stat.bklog_u, 2430 xprt->stat.max_slots, 2431 xprt->stat.sending_u, 2432 xprt->stat.pending_u); 2433} 2434 2435/** 2436 * xs_udp_print_stats - display UDP socket-specifc stats 2437 * @xprt: rpc_xprt struct containing statistics 2438 * @seq: output file 2439 * 2440 */ 2441static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2442{ 2443 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2444 2445 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu " 2446 "%lu %llu %llu\n", 2447 transport->srcport, 2448 xprt->stat.bind_count, 2449 xprt->stat.sends, 2450 xprt->stat.recvs, 2451 xprt->stat.bad_xids, 2452 xprt->stat.req_u, 2453 xprt->stat.bklog_u, 2454 xprt->stat.max_slots, 2455 xprt->stat.sending_u, 2456 xprt->stat.pending_u); 2457} 2458 2459/** 2460 * xs_tcp_print_stats - display TCP socket-specifc stats 2461 * @xprt: rpc_xprt struct containing statistics 2462 * @seq: output file 2463 * 2464 */ 2465static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2466{ 2467 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2468 long idle_time = 0; 2469 2470 if (xprt_connected(xprt)) 2471 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2472 2473 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu " 2474 "%llu %llu %lu %llu %llu\n", 2475 transport->srcport, 2476 xprt->stat.bind_count, 2477 xprt->stat.connect_count, 2478 xprt->stat.connect_time, 2479 idle_time, 2480 xprt->stat.sends, 2481 xprt->stat.recvs, 2482 xprt->stat.bad_xids, 2483 xprt->stat.req_u, 2484 xprt->stat.bklog_u, 2485 xprt->stat.max_slots, 2486 xprt->stat.sending_u, 2487 xprt->stat.pending_u); 2488} 2489 2490/* 2491 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason 2492 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2493 * to use the server side send routines. 2494 */ 2495static void *bc_malloc(struct rpc_task *task, size_t size) 2496{ 2497 struct page *page; 2498 struct rpc_buffer *buf; 2499 2500 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer)); 2501 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) 2502 return NULL; 2503 2504 page = alloc_page(GFP_KERNEL); 2505 if (!page) 2506 return NULL; 2507 2508 buf = page_address(page); 2509 buf->len = PAGE_SIZE; 2510 2511 return buf->data; 2512} 2513 2514/* 2515 * Free the space allocated in the bc_alloc routine 2516 */ 2517static void bc_free(void *buffer) 2518{ 2519 struct rpc_buffer *buf; 2520 2521 if (!buffer) 2522 return; 2523 2524 buf = container_of(buffer, struct rpc_buffer, data); 2525 free_page((unsigned long)buf); 2526} 2527 2528/* 2529 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex 2530 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request. 2531 */ 2532static int bc_sendto(struct rpc_rqst *req) 2533{ 2534 int len; 2535 struct xdr_buf *xbufp = &req->rq_snd_buf; 2536 struct rpc_xprt *xprt = req->rq_xprt; 2537 struct sock_xprt *transport = 2538 container_of(xprt, struct sock_xprt, xprt); 2539 struct socket *sock = transport->sock; 2540 unsigned long headoff; 2541 unsigned long tailoff; 2542 2543 xs_encode_stream_record_marker(xbufp); 2544 2545 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; 2546 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; 2547 len = svc_send_common(sock, xbufp, 2548 virt_to_page(xbufp->head[0].iov_base), headoff, 2549 xbufp->tail[0].iov_base, tailoff); 2550 2551 if (len != xbufp->len) { 2552 printk(KERN_NOTICE "Error sending entire callback!\n"); 2553 len = -EAGAIN; 2554 } 2555 2556 return len; 2557} 2558 2559/* 2560 * The send routine. Borrows from svc_send 2561 */ 2562static int bc_send_request(struct rpc_task *task) 2563{ 2564 struct rpc_rqst *req = task->tk_rqstp; 2565 struct svc_xprt *xprt; 2566 int len; 2567 2568 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); 2569 /* 2570 * Get the server socket associated with this callback xprt 2571 */ 2572 xprt = req->rq_xprt->bc_xprt; 2573 2574 /* 2575 * Grab the mutex to serialize data as the connection is shared 2576 * with the fore channel 2577 */ 2578 if (!mutex_trylock(&xprt->xpt_mutex)) { 2579 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL); 2580 if (!mutex_trylock(&xprt->xpt_mutex)) 2581 return -EAGAIN; 2582 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task); 2583 } 2584 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 2585 len = -ENOTCONN; 2586 else 2587 len = bc_sendto(req); 2588 mutex_unlock(&xprt->xpt_mutex); 2589 2590 if (len > 0) 2591 len = 0; 2592 2593 return len; 2594} 2595 2596/* 2597 * The close routine. Since this is client initiated, we do nothing 2598 */ 2599 2600static void bc_close(struct rpc_xprt *xprt) 2601{ 2602} 2603 2604/* 2605 * The xprt destroy routine. Again, because this connection is client 2606 * initiated, we do nothing 2607 */ 2608 2609static void bc_destroy(struct rpc_xprt *xprt) 2610{ 2611 dprintk("RPC: bc_destroy xprt %p\n", xprt); 2612 2613 xs_xprt_free(xprt); 2614 module_put(THIS_MODULE); 2615} 2616 2617static struct rpc_xprt_ops xs_local_ops = { 2618 .reserve_xprt = xprt_reserve_xprt, 2619 .release_xprt = xs_tcp_release_xprt, 2620 .alloc_slot = xprt_alloc_slot, 2621 .rpcbind = xs_local_rpcbind, 2622 .set_port = xs_local_set_port, 2623 .connect = xs_local_connect, 2624 .buf_alloc = rpc_malloc, 2625 .buf_free = rpc_free, 2626 .send_request = xs_local_send_request, 2627 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2628 .close = xs_close, 2629 .destroy = xs_destroy, 2630 .print_stats = xs_local_print_stats, 2631 .enable_swap = xs_enable_swap, 2632 .disable_swap = xs_disable_swap, 2633}; 2634 2635static struct rpc_xprt_ops xs_udp_ops = { 2636 .set_buffer_size = xs_udp_set_buffer_size, 2637 .reserve_xprt = xprt_reserve_xprt_cong, 2638 .release_xprt = xprt_release_xprt_cong, 2639 .alloc_slot = xprt_alloc_slot, 2640 .rpcbind = rpcb_getport_async, 2641 .set_port = xs_set_port, 2642 .connect = xs_connect, 2643 .buf_alloc = rpc_malloc, 2644 .buf_free = rpc_free, 2645 .send_request = xs_udp_send_request, 2646 .set_retrans_timeout = xprt_set_retrans_timeout_rtt, 2647 .timer = xs_udp_timer, 2648 .release_request = xprt_release_rqst_cong, 2649 .close = xs_close, 2650 .destroy = xs_destroy, 2651 .print_stats = xs_udp_print_stats, 2652 .enable_swap = xs_enable_swap, 2653 .disable_swap = xs_disable_swap, 2654 .inject_disconnect = xs_inject_disconnect, 2655}; 2656 2657static struct rpc_xprt_ops xs_tcp_ops = { 2658 .reserve_xprt = xprt_reserve_xprt, 2659 .release_xprt = xs_tcp_release_xprt, 2660 .alloc_slot = xprt_lock_and_alloc_slot, 2661 .rpcbind = rpcb_getport_async, 2662 .set_port = xs_set_port, 2663 .connect = xs_connect, 2664 .buf_alloc = rpc_malloc, 2665 .buf_free = rpc_free, 2666 .send_request = xs_tcp_send_request, 2667 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2668 .close = xs_tcp_shutdown, 2669 .destroy = xs_destroy, 2670 .print_stats = xs_tcp_print_stats, 2671 .enable_swap = xs_enable_swap, 2672 .disable_swap = xs_disable_swap, 2673 .inject_disconnect = xs_inject_disconnect, 2674#ifdef CONFIG_SUNRPC_BACKCHANNEL 2675 .bc_setup = xprt_setup_bc, 2676 .bc_up = xs_tcp_bc_up, 2677 .bc_free_rqst = xprt_free_bc_rqst, 2678 .bc_destroy = xprt_destroy_bc, 2679#endif 2680}; 2681 2682/* 2683 * The rpc_xprt_ops for the server backchannel 2684 */ 2685 2686static struct rpc_xprt_ops bc_tcp_ops = { 2687 .reserve_xprt = xprt_reserve_xprt, 2688 .release_xprt = xprt_release_xprt, 2689 .alloc_slot = xprt_alloc_slot, 2690 .buf_alloc = bc_malloc, 2691 .buf_free = bc_free, 2692 .send_request = bc_send_request, 2693 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2694 .close = bc_close, 2695 .destroy = bc_destroy, 2696 .print_stats = xs_tcp_print_stats, 2697 .enable_swap = xs_enable_swap, 2698 .disable_swap = xs_disable_swap, 2699 .inject_disconnect = xs_inject_disconnect, 2700}; 2701 2702static int xs_init_anyaddr(const int family, struct sockaddr *sap) 2703{ 2704 static const struct sockaddr_in sin = { 2705 .sin_family = AF_INET, 2706 .sin_addr.s_addr = htonl(INADDR_ANY), 2707 }; 2708 static const struct sockaddr_in6 sin6 = { 2709 .sin6_family = AF_INET6, 2710 .sin6_addr = IN6ADDR_ANY_INIT, 2711 }; 2712 2713 switch (family) { 2714 case AF_LOCAL: 2715 break; 2716 case AF_INET: 2717 memcpy(sap, &sin, sizeof(sin)); 2718 break; 2719 case AF_INET6: 2720 memcpy(sap, &sin6, sizeof(sin6)); 2721 break; 2722 default: 2723 dprintk("RPC: %s: Bad address family\n", __func__); 2724 return -EAFNOSUPPORT; 2725 } 2726 return 0; 2727} 2728 2729static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 2730 unsigned int slot_table_size, 2731 unsigned int max_slot_table_size) 2732{ 2733 struct rpc_xprt *xprt; 2734 struct sock_xprt *new; 2735 2736 if (args->addrlen > sizeof(xprt->addr)) { 2737 dprintk("RPC: xs_setup_xprt: address too large\n"); 2738 return ERR_PTR(-EBADF); 2739 } 2740 2741 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, 2742 max_slot_table_size); 2743 if (xprt == NULL) { 2744 dprintk("RPC: xs_setup_xprt: couldn't allocate " 2745 "rpc_xprt\n"); 2746 return ERR_PTR(-ENOMEM); 2747 } 2748 2749 new = container_of(xprt, struct sock_xprt, xprt); 2750 mutex_init(&new->recv_mutex); 2751 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 2752 xprt->addrlen = args->addrlen; 2753 if (args->srcaddr) 2754 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); 2755 else { 2756 int err; 2757 err = xs_init_anyaddr(args->dstaddr->sa_family, 2758 (struct sockaddr *)&new->srcaddr); 2759 if (err != 0) { 2760 xprt_free(xprt); 2761 return ERR_PTR(err); 2762 } 2763 } 2764 2765 return xprt; 2766} 2767 2768static const struct rpc_timeout xs_local_default_timeout = { 2769 .to_initval = 10 * HZ, 2770 .to_maxval = 10 * HZ, 2771 .to_retries = 2, 2772}; 2773 2774/** 2775 * xs_setup_local - Set up transport to use an AF_LOCAL socket 2776 * @args: rpc transport creation arguments 2777 * 2778 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP 2779 */ 2780static struct rpc_xprt *xs_setup_local(struct xprt_create *args) 2781{ 2782 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; 2783 struct sock_xprt *transport; 2784 struct rpc_xprt *xprt; 2785 struct rpc_xprt *ret; 2786 2787 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2788 xprt_max_tcp_slot_table_entries); 2789 if (IS_ERR(xprt)) 2790 return xprt; 2791 transport = container_of(xprt, struct sock_xprt, xprt); 2792 2793 xprt->prot = 0; 2794 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2795 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2796 2797 xprt->bind_timeout = XS_BIND_TO; 2798 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2799 xprt->idle_timeout = XS_IDLE_DISC_TO; 2800 2801 xprt->ops = &xs_local_ops; 2802 xprt->timeout = &xs_local_default_timeout; 2803 2804 INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn); 2805 INIT_DELAYED_WORK(&transport->connect_worker, 2806 xs_dummy_setup_socket); 2807 2808 switch (sun->sun_family) { 2809 case AF_LOCAL: 2810 if (sun->sun_path[0] != '/') { 2811 dprintk("RPC: bad AF_LOCAL address: %s\n", 2812 sun->sun_path); 2813 ret = ERR_PTR(-EINVAL); 2814 goto out_err; 2815 } 2816 xprt_set_bound(xprt); 2817 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); 2818 ret = ERR_PTR(xs_local_setup_socket(transport)); 2819 if (ret) 2820 goto out_err; 2821 break; 2822 default: 2823 ret = ERR_PTR(-EAFNOSUPPORT); 2824 goto out_err; 2825 } 2826 2827 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", 2828 xprt->address_strings[RPC_DISPLAY_ADDR]); 2829 2830 if (try_module_get(THIS_MODULE)) 2831 return xprt; 2832 ret = ERR_PTR(-EINVAL); 2833out_err: 2834 xs_xprt_free(xprt); 2835 return ret; 2836} 2837 2838static const struct rpc_timeout xs_udp_default_timeout = { 2839 .to_initval = 5 * HZ, 2840 .to_maxval = 30 * HZ, 2841 .to_increment = 5 * HZ, 2842 .to_retries = 5, 2843}; 2844 2845/** 2846 * xs_setup_udp - Set up transport to use a UDP socket 2847 * @args: rpc transport creation arguments 2848 * 2849 */ 2850static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) 2851{ 2852 struct sockaddr *addr = args->dstaddr; 2853 struct rpc_xprt *xprt; 2854 struct sock_xprt *transport; 2855 struct rpc_xprt *ret; 2856 2857 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries, 2858 xprt_udp_slot_table_entries); 2859 if (IS_ERR(xprt)) 2860 return xprt; 2861 transport = container_of(xprt, struct sock_xprt, xprt); 2862 2863 xprt->prot = IPPROTO_UDP; 2864 xprt->tsh_size = 0; 2865 /* XXX: header size can vary due to auth type, IPv6, etc. */ 2866 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 2867 2868 xprt->bind_timeout = XS_BIND_TO; 2869 xprt->reestablish_timeout = XS_UDP_REEST_TO; 2870 xprt->idle_timeout = XS_IDLE_DISC_TO; 2871 2872 xprt->ops = &xs_udp_ops; 2873 2874 xprt->timeout = &xs_udp_default_timeout; 2875 2876 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); 2877 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); 2878 2879 switch (addr->sa_family) { 2880 case AF_INET: 2881 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2882 xprt_set_bound(xprt); 2883 2884 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); 2885 break; 2886 case AF_INET6: 2887 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2888 xprt_set_bound(xprt); 2889 2890 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2891 break; 2892 default: 2893 ret = ERR_PTR(-EAFNOSUPPORT); 2894 goto out_err; 2895 } 2896 2897 if (xprt_bound(xprt)) 2898 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2899 xprt->address_strings[RPC_DISPLAY_ADDR], 2900 xprt->address_strings[RPC_DISPLAY_PORT], 2901 xprt->address_strings[RPC_DISPLAY_PROTO]); 2902 else 2903 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2904 xprt->address_strings[RPC_DISPLAY_ADDR], 2905 xprt->address_strings[RPC_DISPLAY_PROTO]); 2906 2907 if (try_module_get(THIS_MODULE)) 2908 return xprt; 2909 ret = ERR_PTR(-EINVAL); 2910out_err: 2911 xs_xprt_free(xprt); 2912 return ret; 2913} 2914 2915static const struct rpc_timeout xs_tcp_default_timeout = { 2916 .to_initval = 60 * HZ, 2917 .to_maxval = 60 * HZ, 2918 .to_retries = 2, 2919}; 2920 2921/** 2922 * xs_setup_tcp - Set up transport to use a TCP socket 2923 * @args: rpc transport creation arguments 2924 * 2925 */ 2926static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) 2927{ 2928 struct sockaddr *addr = args->dstaddr; 2929 struct rpc_xprt *xprt; 2930 struct sock_xprt *transport; 2931 struct rpc_xprt *ret; 2932 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries; 2933 2934 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) 2935 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT; 2936 2937 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2938 max_slot_table_size); 2939 if (IS_ERR(xprt)) 2940 return xprt; 2941 transport = container_of(xprt, struct sock_xprt, xprt); 2942 2943 xprt->prot = IPPROTO_TCP; 2944 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2945 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2946 2947 xprt->bind_timeout = XS_BIND_TO; 2948 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2949 xprt->idle_timeout = XS_IDLE_DISC_TO; 2950 2951 xprt->ops = &xs_tcp_ops; 2952 xprt->timeout = &xs_tcp_default_timeout; 2953 2954 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); 2955 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); 2956 2957 switch (addr->sa_family) { 2958 case AF_INET: 2959 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2960 xprt_set_bound(xprt); 2961 2962 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 2963 break; 2964 case AF_INET6: 2965 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2966 xprt_set_bound(xprt); 2967 2968 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2969 break; 2970 default: 2971 ret = ERR_PTR(-EAFNOSUPPORT); 2972 goto out_err; 2973 } 2974 2975 if (xprt_bound(xprt)) 2976 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2977 xprt->address_strings[RPC_DISPLAY_ADDR], 2978 xprt->address_strings[RPC_DISPLAY_PORT], 2979 xprt->address_strings[RPC_DISPLAY_PROTO]); 2980 else 2981 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2982 xprt->address_strings[RPC_DISPLAY_ADDR], 2983 xprt->address_strings[RPC_DISPLAY_PROTO]); 2984 2985 if (try_module_get(THIS_MODULE)) 2986 return xprt; 2987 ret = ERR_PTR(-EINVAL); 2988out_err: 2989 xs_xprt_free(xprt); 2990 return ret; 2991} 2992 2993/** 2994 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket 2995 * @args: rpc transport creation arguments 2996 * 2997 */ 2998static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) 2999{ 3000 struct sockaddr *addr = args->dstaddr; 3001 struct rpc_xprt *xprt; 3002 struct sock_xprt *transport; 3003 struct svc_sock *bc_sock; 3004 struct rpc_xprt *ret; 3005 3006 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3007 xprt_tcp_slot_table_entries); 3008 if (IS_ERR(xprt)) 3009 return xprt; 3010 transport = container_of(xprt, struct sock_xprt, xprt); 3011 3012 xprt->prot = IPPROTO_TCP; 3013 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 3014 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3015 xprt->timeout = &xs_tcp_default_timeout; 3016 3017 /* backchannel */ 3018 xprt_set_bound(xprt); 3019 xprt->bind_timeout = 0; 3020 xprt->reestablish_timeout = 0; 3021 xprt->idle_timeout = 0; 3022 3023 xprt->ops = &bc_tcp_ops; 3024 3025 switch (addr->sa_family) { 3026 case AF_INET: 3027 xs_format_peer_addresses(xprt, "tcp", 3028 RPCBIND_NETID_TCP); 3029 break; 3030 case AF_INET6: 3031 xs_format_peer_addresses(xprt, "tcp", 3032 RPCBIND_NETID_TCP6); 3033 break; 3034 default: 3035 ret = ERR_PTR(-EAFNOSUPPORT); 3036 goto out_err; 3037 } 3038 3039 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3040 xprt->address_strings[RPC_DISPLAY_ADDR], 3041 xprt->address_strings[RPC_DISPLAY_PORT], 3042 xprt->address_strings[RPC_DISPLAY_PROTO]); 3043 3044 /* 3045 * Once we've associated a backchannel xprt with a connection, 3046 * we want to keep it around as long as the connection lasts, 3047 * in case we need to start using it for a backchannel again; 3048 * this reference won't be dropped until bc_xprt is destroyed. 3049 */ 3050 xprt_get(xprt); 3051 args->bc_xprt->xpt_bc_xprt = xprt; 3052 xprt->bc_xprt = args->bc_xprt; 3053 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); 3054 transport->sock = bc_sock->sk_sock; 3055 transport->inet = bc_sock->sk_sk; 3056 3057 /* 3058 * Since we don't want connections for the backchannel, we set 3059 * the xprt status to connected 3060 */ 3061 xprt_set_connected(xprt); 3062 3063 if (try_module_get(THIS_MODULE)) 3064 return xprt; 3065 3066 args->bc_xprt->xpt_bc_xprt = NULL; 3067 xprt_put(xprt); 3068 ret = ERR_PTR(-EINVAL); 3069out_err: 3070 xs_xprt_free(xprt); 3071 return ret; 3072} 3073 3074static struct xprt_class xs_local_transport = { 3075 .list = LIST_HEAD_INIT(xs_local_transport.list), 3076 .name = "named UNIX socket", 3077 .owner = THIS_MODULE, 3078 .ident = XPRT_TRANSPORT_LOCAL, 3079 .setup = xs_setup_local, 3080}; 3081 3082static struct xprt_class xs_udp_transport = { 3083 .list = LIST_HEAD_INIT(xs_udp_transport.list), 3084 .name = "udp", 3085 .owner = THIS_MODULE, 3086 .ident = XPRT_TRANSPORT_UDP, 3087 .setup = xs_setup_udp, 3088}; 3089 3090static struct xprt_class xs_tcp_transport = { 3091 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 3092 .name = "tcp", 3093 .owner = THIS_MODULE, 3094 .ident = XPRT_TRANSPORT_TCP, 3095 .setup = xs_setup_tcp, 3096}; 3097 3098static struct xprt_class xs_bc_tcp_transport = { 3099 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), 3100 .name = "tcp NFSv4.1 backchannel", 3101 .owner = THIS_MODULE, 3102 .ident = XPRT_TRANSPORT_BC_TCP, 3103 .setup = xs_setup_bc_tcp, 3104}; 3105 3106/** 3107 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 3108 * 3109 */ 3110int init_socket_xprt(void) 3111{ 3112#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3113 if (!sunrpc_table_header) 3114 sunrpc_table_header = register_sysctl_table(sunrpc_table); 3115#endif 3116 3117 xprt_register_transport(&xs_local_transport); 3118 xprt_register_transport(&xs_udp_transport); 3119 xprt_register_transport(&xs_tcp_transport); 3120 xprt_register_transport(&xs_bc_tcp_transport); 3121 3122 return 0; 3123} 3124 3125/** 3126 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister 3127 * 3128 */ 3129void cleanup_socket_xprt(void) 3130{ 3131#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3132 if (sunrpc_table_header) { 3133 unregister_sysctl_table(sunrpc_table_header); 3134 sunrpc_table_header = NULL; 3135 } 3136#endif 3137 3138 xprt_unregister_transport(&xs_local_transport); 3139 xprt_unregister_transport(&xs_udp_transport); 3140 xprt_unregister_transport(&xs_tcp_transport); 3141 xprt_unregister_transport(&xs_bc_tcp_transport); 3142} 3143 3144static int param_set_uint_minmax(const char *val, 3145 const struct kernel_param *kp, 3146 unsigned int min, unsigned int max) 3147{ 3148 unsigned int num; 3149 int ret; 3150 3151 if (!val) 3152 return -EINVAL; 3153 ret = kstrtouint(val, 0, &num); 3154 if (ret == -EINVAL || num < min || num > max) 3155 return -EINVAL; 3156 *((unsigned int *)kp->arg) = num; 3157 return 0; 3158} 3159 3160static int param_set_portnr(const char *val, const struct kernel_param *kp) 3161{ 3162 return param_set_uint_minmax(val, kp, 3163 RPC_MIN_RESVPORT, 3164 RPC_MAX_RESVPORT); 3165} 3166 3167static const struct kernel_param_ops param_ops_portnr = { 3168 .set = param_set_portnr, 3169 .get = param_get_uint, 3170}; 3171 3172#define param_check_portnr(name, p) \ 3173 __param_check(name, p, unsigned int); 3174 3175module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); 3176module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); 3177 3178static int param_set_slot_table_size(const char *val, 3179 const struct kernel_param *kp) 3180{ 3181 return param_set_uint_minmax(val, kp, 3182 RPC_MIN_SLOT_TABLE, 3183 RPC_MAX_SLOT_TABLE); 3184} 3185 3186static const struct kernel_param_ops param_ops_slot_table_size = { 3187 .set = param_set_slot_table_size, 3188 .get = param_get_uint, 3189}; 3190 3191#define param_check_slot_table_size(name, p) \ 3192 __param_check(name, p, unsigned int); 3193 3194static int param_set_max_slot_table_size(const char *val, 3195 const struct kernel_param *kp) 3196{ 3197 return param_set_uint_minmax(val, kp, 3198 RPC_MIN_SLOT_TABLE, 3199 RPC_MAX_SLOT_TABLE_LIMIT); 3200} 3201 3202static const struct kernel_param_ops param_ops_max_slot_table_size = { 3203 .set = param_set_max_slot_table_size, 3204 .get = param_get_uint, 3205}; 3206 3207#define param_check_max_slot_table_size(name, p) \ 3208 __param_check(name, p, unsigned int); 3209 3210module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, 3211 slot_table_size, 0644); 3212module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries, 3213 max_slot_table_size, 0644); 3214module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 3215 slot_table_size, 0644); 3216 3217