root/include/linux/sunrpc/svc.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. svc_get
  2. svc_getnl
  3. svc_putnl
  4. svc_getu32
  5. svc_ungetu32
  6. svc_putu32
  7. svc_addr_in
  8. svc_addr_in6
  9. svc_addr
  10. svc_daddr_in
  11. svc_daddr_in6
  12. svc_daddr
  13. xdr_argsize_check
  14. xdr_ressize_check
  15. svc_free_res_pages
  16. svc_reserve_auth

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * linux/include/linux/sunrpc/svc.h
   4  *
   5  * RPC server declarations.
   6  *
   7  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   8  */
   9 
  10 
  11 #ifndef SUNRPC_SVC_H
  12 #define SUNRPC_SVC_H
  13 
  14 #include <linux/in.h>
  15 #include <linux/in6.h>
  16 #include <linux/sunrpc/types.h>
  17 #include <linux/sunrpc/xdr.h>
  18 #include <linux/sunrpc/auth.h>
  19 #include <linux/sunrpc/svcauth.h>
  20 #include <linux/wait.h>
  21 #include <linux/mm.h>
  22 
  23 /* statistics for svc_pool structures */
  24 struct svc_pool_stats {
  25         atomic_long_t   packets;
  26         unsigned long   sockets_queued;
  27         atomic_long_t   threads_woken;
  28         atomic_long_t   threads_timedout;
  29 };
  30 
  31 /*
  32  *
  33  * RPC service thread pool.
  34  *
  35  * Pool of threads and temporary sockets.  Generally there is only
  36  * a single one of these per RPC service, but on NUMA machines those
  37  * services that can benefit from it (i.e. nfs but not lockd) will
  38  * have one pool per NUMA node.  This optimisation reduces cross-
  39  * node traffic on multi-node NUMA NFS servers.
  40  */
  41 struct svc_pool {
  42         unsigned int            sp_id;          /* pool id; also node id on NUMA */
  43         spinlock_t              sp_lock;        /* protects all fields */
  44         struct list_head        sp_sockets;     /* pending sockets */
  45         unsigned int            sp_nrthreads;   /* # of threads in pool */
  46         struct list_head        sp_all_threads; /* all server threads */
  47         struct svc_pool_stats   sp_stats;       /* statistics on pool operation */
  48 #define SP_TASK_PENDING         (0)             /* still work to do even if no
  49                                                  * xprt is queued. */
  50 #define SP_CONGESTED            (1)
  51         unsigned long           sp_flags;
  52 } ____cacheline_aligned_in_smp;
  53 
  54 struct svc_serv;
  55 
  56 struct svc_serv_ops {
  57         /* Callback to use when last thread exits. */
  58         void            (*svo_shutdown)(struct svc_serv *, struct net *);
  59 
  60         /* function for service threads to run */
  61         int             (*svo_function)(void *);
  62 
  63         /* queue up a transport for servicing */
  64         void            (*svo_enqueue_xprt)(struct svc_xprt *);
  65 
  66         /* set up thread (or whatever) execution context */
  67         int             (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
  68 
  69         /* optional module to count when adding threads (pooled svcs only) */
  70         struct module   *svo_module;
  71 };
  72 
  73 /*
  74  * RPC service.
  75  *
  76  * An RPC service is a ``daemon,'' possibly multithreaded, which
  77  * receives and processes incoming RPC messages.
  78  * It has one or more transport sockets associated with it, and maintains
  79  * a list of idle threads waiting for input.
  80  *
  81  * We currently do not support more than one RPC program per daemon.
  82  */
  83 struct svc_serv {
  84         struct svc_program *    sv_program;     /* RPC program */
  85         struct svc_stat *       sv_stats;       /* RPC statistics */
  86         spinlock_t              sv_lock;
  87         unsigned int            sv_nrthreads;   /* # of server threads */
  88         unsigned int            sv_maxconn;     /* max connections allowed or
  89                                                  * '0' causing max to be based
  90                                                  * on number of threads. */
  91 
  92         unsigned int            sv_max_payload; /* datagram payload size */
  93         unsigned int            sv_max_mesg;    /* max_payload + 1 page for overheads */
  94         unsigned int            sv_xdrsize;     /* XDR buffer size */
  95         struct list_head        sv_permsocks;   /* all permanent sockets */
  96         struct list_head        sv_tempsocks;   /* all temporary sockets */
  97         int                     sv_tmpcnt;      /* count of temporary sockets */
  98         struct timer_list       sv_temptimer;   /* timer for aging temporary sockets */
  99 
 100         char *                  sv_name;        /* service name */
 101 
 102         unsigned int            sv_nrpools;     /* number of thread pools */
 103         struct svc_pool *       sv_pools;       /* array of thread pools */
 104         const struct svc_serv_ops *sv_ops;      /* server operations */
 105 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 106         struct list_head        sv_cb_list;     /* queue for callback requests
 107                                                  * that arrive over the same
 108                                                  * connection */
 109         spinlock_t              sv_cb_lock;     /* protects the svc_cb_list */
 110         wait_queue_head_t       sv_cb_waitq;    /* sleep here if there are no
 111                                                  * entries in the svc_cb_list */
 112         bool                    sv_bc_enabled;  /* service uses backchannel */
 113 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
 114 };
 115 
 116 /*
 117  * We use sv_nrthreads as a reference count.  svc_destroy() drops
 118  * this refcount, so we need to bump it up around operations that
 119  * change the number of threads.  Horrible, but there it is.
 120  * Should be called with the "service mutex" held.
 121  */
 122 static inline void svc_get(struct svc_serv *serv)
 123 {
 124         serv->sv_nrthreads++;
 125 }
 126 
 127 /*
 128  * Maximum payload size supported by a kernel RPC server.
 129  * This is use to determine the max number of pages nfsd is
 130  * willing to return in a single READ operation.
 131  *
 132  * These happen to all be powers of 2, which is not strictly
 133  * necessary but helps enforce the real limitation, which is
 134  * that they should be multiples of PAGE_SIZE.
 135  *
 136  * For UDP transports, a block plus NFS,RPC, and UDP headers
 137  * has to fit into the IP datagram limit of 64K.  The largest
 138  * feasible number for all known page sizes is probably 48K,
 139  * but we choose 32K here.  This is the same as the historical
 140  * Linux limit; someone who cares more about NFS/UDP performance
 141  * can test a larger number.
 142  *
 143  * For TCP transports we have more freedom.  A size of 1MB is
 144  * chosen to match the client limit.  Other OSes are known to
 145  * have larger limits, but those numbers are probably beyond
 146  * the point of diminishing returns.
 147  */
 148 #define RPCSVC_MAXPAYLOAD       (1*1024*1024u)
 149 #define RPCSVC_MAXPAYLOAD_TCP   RPCSVC_MAXPAYLOAD
 150 #define RPCSVC_MAXPAYLOAD_UDP   (32*1024u)
 151 
 152 extern u32 svc_max_payload(const struct svc_rqst *rqstp);
 153 
 154 /*
 155  * RPC Requsts and replies are stored in one or more pages.
 156  * We maintain an array of pages for each server thread.
 157  * Requests are copied into these pages as they arrive.  Remaining
 158  * pages are available to write the reply into.
 159  *
 160  * Pages are sent using ->sendpage so each server thread needs to
 161  * allocate more to replace those used in sending.  To help keep track
 162  * of these pages we have a receive list where all pages initialy live,
 163  * and a send list where pages are moved to when there are to be part
 164  * of a reply.
 165  *
 166  * We use xdr_buf for holding responses as it fits well with NFS
 167  * read responses (that have a header, and some data pages, and possibly
 168  * a tail) and means we can share some client side routines.
 169  *
 170  * The xdr_buf.head kvec always points to the first page in the rq_*pages
 171  * list.  The xdr_buf.pages pointer points to the second page on that
 172  * list.  xdr_buf.tail points to the end of the first page.
 173  * This assumes that the non-page part of an rpc reply will fit
 174  * in a page - NFSd ensures this.  lockd also has no trouble.
 175  *
 176  * Each request/reply pair can have at most one "payload", plus two pages,
 177  * one for the request, and one for the reply.
 178  * We using ->sendfile to return read data, we might need one extra page
 179  * if the request is not page-aligned.  So add another '1'.
 180  */
 181 #define RPCSVC_MAXPAGES         ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
 182                                 + 2 + 1)
 183 
 184 static inline u32 svc_getnl(struct kvec *iov)
 185 {
 186         __be32 val, *vp;
 187         vp = iov->iov_base;
 188         val = *vp++;
 189         iov->iov_base = (void*)vp;
 190         iov->iov_len -= sizeof(__be32);
 191         return ntohl(val);
 192 }
 193 
 194 static inline void svc_putnl(struct kvec *iov, u32 val)
 195 {
 196         __be32 *vp = iov->iov_base + iov->iov_len;
 197         *vp = htonl(val);
 198         iov->iov_len += sizeof(__be32);
 199 }
 200 
 201 static inline __be32 svc_getu32(struct kvec *iov)
 202 {
 203         __be32 val, *vp;
 204         vp = iov->iov_base;
 205         val = *vp++;
 206         iov->iov_base = (void*)vp;
 207         iov->iov_len -= sizeof(__be32);
 208         return val;
 209 }
 210 
 211 static inline void svc_ungetu32(struct kvec *iov)
 212 {
 213         __be32 *vp = (__be32 *)iov->iov_base;
 214         iov->iov_base = (void *)(vp - 1);
 215         iov->iov_len += sizeof(*vp);
 216 }
 217 
 218 static inline void svc_putu32(struct kvec *iov, __be32 val)
 219 {
 220         __be32 *vp = iov->iov_base + iov->iov_len;
 221         *vp = val;
 222         iov->iov_len += sizeof(__be32);
 223 }
 224 
 225 /*
 226  * The context of a single thread, including the request currently being
 227  * processed.
 228  */
 229 struct svc_rqst {
 230         struct list_head        rq_all;         /* all threads list */
 231         struct rcu_head         rq_rcu_head;    /* for RCU deferred kfree */
 232         struct svc_xprt *       rq_xprt;        /* transport ptr */
 233 
 234         struct sockaddr_storage rq_addr;        /* peer address */
 235         size_t                  rq_addrlen;
 236         struct sockaddr_storage rq_daddr;       /* dest addr of request
 237                                                  *  - reply from here */
 238         size_t                  rq_daddrlen;
 239 
 240         struct svc_serv *       rq_server;      /* RPC service definition */
 241         struct svc_pool *       rq_pool;        /* thread pool */
 242         const struct svc_procedure *rq_procinfo;/* procedure info */
 243         struct auth_ops *       rq_authop;      /* authentication flavour */
 244         struct svc_cred         rq_cred;        /* auth info */
 245         void *                  rq_xprt_ctxt;   /* transport specific context ptr */
 246         struct svc_deferred_req*rq_deferred;    /* deferred request we are replaying */
 247 
 248         size_t                  rq_xprt_hlen;   /* xprt header len */
 249         struct xdr_buf          rq_arg;
 250         struct xdr_buf          rq_res;
 251         struct page             *rq_pages[RPCSVC_MAXPAGES + 1];
 252         struct page *           *rq_respages;   /* points into rq_pages */
 253         struct page *           *rq_next_page; /* next reply page to use */
 254         struct page *           *rq_page_end;  /* one past the last page */
 255 
 256         struct kvec             rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
 257 
 258         __be32                  rq_xid;         /* transmission id */
 259         u32                     rq_prog;        /* program number */
 260         u32                     rq_vers;        /* program version */
 261         u32                     rq_proc;        /* procedure number */
 262         u32                     rq_prot;        /* IP protocol */
 263         int                     rq_cachetype;   /* catering to nfsd */
 264 #define RQ_SECURE       (0)                     /* secure port */
 265 #define RQ_LOCAL        (1)                     /* local request */
 266 #define RQ_USEDEFERRAL  (2)                     /* use deferral */
 267 #define RQ_DROPME       (3)                     /* drop current reply */
 268 #define RQ_SPLICE_OK    (4)                     /* turned off in gss privacy
 269                                                  * to prevent encrypting page
 270                                                  * cache pages */
 271 #define RQ_VICTIM       (5)                     /* about to be shut down */
 272 #define RQ_BUSY         (6)                     /* request is busy */
 273 #define RQ_DATA         (7)                     /* request has data */
 274 #define RQ_AUTHERR      (8)                     /* Request status is auth error */
 275         unsigned long           rq_flags;       /* flags field */
 276         ktime_t                 rq_qtime;       /* enqueue time */
 277 
 278         void *                  rq_argp;        /* decoded arguments */
 279         void *                  rq_resp;        /* xdr'd results */
 280         void *                  rq_auth_data;   /* flavor-specific data */
 281         int                     rq_auth_slack;  /* extra space xdr code
 282                                                  * should leave in head
 283                                                  * for krb5i, krb5p.
 284                                                  */
 285         int                     rq_reserved;    /* space on socket outq
 286                                                  * reserved for this request
 287                                                  */
 288         ktime_t                 rq_stime;       /* start time */
 289 
 290         struct cache_req        rq_chandle;     /* handle passed to caches for 
 291                                                  * request delaying 
 292                                                  */
 293         /* Catering to nfsd */
 294         struct auth_domain *    rq_client;      /* RPC peer info */
 295         struct auth_domain *    rq_gssclient;   /* "gss/"-style peer info */
 296         struct svc_cacherep *   rq_cacherep;    /* cache info */
 297         struct task_struct      *rq_task;       /* service thread */
 298         spinlock_t              rq_lock;        /* per-request lock */
 299         struct net              *rq_bc_net;     /* pointer to backchannel's
 300                                                  * net namespace
 301                                                  */
 302 };
 303 
 304 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
 305 
 306 /*
 307  * Rigorous type checking on sockaddr type conversions
 308  */
 309 static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
 310 {
 311         return (struct sockaddr_in *) &rqst->rq_addr;
 312 }
 313 
 314 static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
 315 {
 316         return (struct sockaddr_in6 *) &rqst->rq_addr;
 317 }
 318 
 319 static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
 320 {
 321         return (struct sockaddr *) &rqst->rq_addr;
 322 }
 323 
 324 static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
 325 {
 326         return (struct sockaddr_in *) &rqst->rq_daddr;
 327 }
 328 
 329 static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
 330 {
 331         return (struct sockaddr_in6 *) &rqst->rq_daddr;
 332 }
 333 
 334 static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
 335 {
 336         return (struct sockaddr *) &rqst->rq_daddr;
 337 }
 338 
 339 /*
 340  * Check buffer bounds after decoding arguments
 341  */
 342 static inline int
 343 xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
 344 {
 345         char *cp = (char *)p;
 346         struct kvec *vec = &rqstp->rq_arg.head[0];
 347         return cp >= (char*)vec->iov_base
 348                 && cp <= (char*)vec->iov_base + vec->iov_len;
 349 }
 350 
 351 static inline int
 352 xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
 353 {
 354         struct kvec *vec = &rqstp->rq_res.head[0];
 355         char *cp = (char*)p;
 356 
 357         vec->iov_len = cp - (char*)vec->iov_base;
 358 
 359         return vec->iov_len <= PAGE_SIZE;
 360 }
 361 
 362 static inline void svc_free_res_pages(struct svc_rqst *rqstp)
 363 {
 364         while (rqstp->rq_next_page != rqstp->rq_respages) {
 365                 struct page **pp = --rqstp->rq_next_page;
 366                 if (*pp) {
 367                         put_page(*pp);
 368                         *pp = NULL;
 369                 }
 370         }
 371 }
 372 
 373 struct svc_deferred_req {
 374         u32                     prot;   /* protocol (UDP or TCP) */
 375         struct svc_xprt         *xprt;
 376         struct sockaddr_storage addr;   /* where reply must go */
 377         size_t                  addrlen;
 378         struct sockaddr_storage daddr;  /* where reply must come from */
 379         size_t                  daddrlen;
 380         struct cache_deferred_req handle;
 381         size_t                  xprt_hlen;
 382         int                     argslen;
 383         __be32                  args[0];
 384 };
 385 
 386 struct svc_process_info {
 387         union {
 388                 int  (*dispatch)(struct svc_rqst *, __be32 *);
 389                 struct {
 390                         unsigned int lovers;
 391                         unsigned int hivers;
 392                 } mismatch;
 393         };
 394 };
 395 
 396 /*
 397  * List of RPC programs on the same transport endpoint
 398  */
 399 struct svc_program {
 400         struct svc_program *    pg_next;        /* other programs (same xprt) */
 401         u32                     pg_prog;        /* program number */
 402         unsigned int            pg_lovers;      /* lowest version */
 403         unsigned int            pg_hivers;      /* highest version */
 404         unsigned int            pg_nvers;       /* number of versions */
 405         const struct svc_version **pg_vers;     /* version array */
 406         char *                  pg_name;        /* service name */
 407         char *                  pg_class;       /* class name: services sharing authentication */
 408         struct svc_stat *       pg_stats;       /* rpc statistics */
 409         int                     (*pg_authenticate)(struct svc_rqst *);
 410         __be32                  (*pg_init_request)(struct svc_rqst *,
 411                                                    const struct svc_program *,
 412                                                    struct svc_process_info *);
 413         int                     (*pg_rpcbind_set)(struct net *net,
 414                                                   const struct svc_program *,
 415                                                   u32 version, int family,
 416                                                   unsigned short proto,
 417                                                   unsigned short port);
 418 };
 419 
 420 /*
 421  * RPC program version
 422  */
 423 struct svc_version {
 424         u32                     vs_vers;        /* version number */
 425         u32                     vs_nproc;       /* number of procedures */
 426         const struct svc_procedure *vs_proc;    /* per-procedure info */
 427         unsigned int            *vs_count;      /* call counts */
 428         u32                     vs_xdrsize;     /* xdrsize needed for this version */
 429 
 430         /* Don't register with rpcbind */
 431         bool                    vs_hidden;
 432 
 433         /* Don't care if the rpcbind registration fails */
 434         bool                    vs_rpcb_optnl;
 435 
 436         /* Need xprt with congestion control */
 437         bool                    vs_need_cong_ctrl;
 438 
 439         /* Override dispatch function (e.g. when caching replies).
 440          * A return value of 0 means drop the request. 
 441          * vs_dispatch == NULL means use default dispatcher.
 442          */
 443         int                     (*vs_dispatch)(struct svc_rqst *, __be32 *);
 444 };
 445 
 446 /*
 447  * RPC procedure info
 448  */
 449 struct svc_procedure {
 450         /* process the request: */
 451         __be32                  (*pc_func)(struct svc_rqst *);
 452         /* XDR decode args: */
 453         int                     (*pc_decode)(struct svc_rqst *, __be32 *data);
 454         /* XDR encode result: */
 455         int                     (*pc_encode)(struct svc_rqst *, __be32 *data);
 456         /* XDR free result: */
 457         void                    (*pc_release)(struct svc_rqst *);
 458         unsigned int            pc_argsize;     /* argument struct size */
 459         unsigned int            pc_ressize;     /* result struct size */
 460         unsigned int            pc_cachetype;   /* cache info (NFS) */
 461         unsigned int            pc_xdrressize;  /* maximum size of XDR reply */
 462 };
 463 
 464 /*
 465  * Mode for mapping cpus to pools.
 466  */
 467 enum {
 468         SVC_POOL_AUTO = -1,     /* choose one of the others */
 469         SVC_POOL_GLOBAL,        /* no mapping, just a single global pool
 470                                  * (legacy & UP mode) */
 471         SVC_POOL_PERCPU,        /* one pool per cpu */
 472         SVC_POOL_PERNODE        /* one pool per numa node */
 473 };
 474 
 475 struct svc_pool_map {
 476         int count;                      /* How many svc_servs use us */
 477         int mode;                       /* Note: int not enum to avoid
 478                                          * warnings about "enumeration value
 479                                          * not handled in switch" */
 480         unsigned int npools;
 481         unsigned int *pool_to;          /* maps pool id to cpu or node */
 482         unsigned int *to_pool;          /* maps cpu or node to pool id */
 483 };
 484 
 485 extern struct svc_pool_map svc_pool_map;
 486 
 487 /*
 488  * Function prototypes.
 489  */
 490 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
 491 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
 492 int svc_bind(struct svc_serv *serv, struct net *net);
 493 struct svc_serv *svc_create(struct svc_program *, unsigned int,
 494                             const struct svc_serv_ops *);
 495 struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
 496                                         struct svc_pool *pool, int node);
 497 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
 498                                         struct svc_pool *pool, int node);
 499 void               svc_rqst_free(struct svc_rqst *);
 500 void               svc_exit_thread(struct svc_rqst *);
 501 unsigned int       svc_pool_map_get(void);
 502 void               svc_pool_map_put(void);
 503 struct svc_serv *  svc_create_pooled(struct svc_program *, unsigned int,
 504                         const struct svc_serv_ops *);
 505 int                svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
 506 int                svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
 507 int                svc_pool_stats_open(struct svc_serv *serv, struct file *file);
 508 void               svc_destroy(struct svc_serv *);
 509 void               svc_shutdown_net(struct svc_serv *, struct net *);
 510 int                svc_process(struct svc_rqst *);
 511 int                bc_svc_process(struct svc_serv *, struct rpc_rqst *,
 512                         struct svc_rqst *);
 513 int                svc_register(const struct svc_serv *, struct net *, const int,
 514                                 const unsigned short, const unsigned short);
 515 
 516 void               svc_wake_up(struct svc_serv *);
 517 void               svc_reserve(struct svc_rqst *rqstp, int space);
 518 struct svc_pool *  svc_pool_for_cpu(struct svc_serv *serv, int cpu);
 519 char *             svc_print_addr(struct svc_rqst *, char *, size_t);
 520 unsigned int       svc_fill_write_vector(struct svc_rqst *rqstp,
 521                                          struct page **pages,
 522                                          struct kvec *first, size_t total);
 523 char              *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
 524                                              struct kvec *first, void *p,
 525                                              size_t total);
 526 __be32             svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err);
 527 __be32             svc_generic_init_request(struct svc_rqst *rqstp,
 528                                             const struct svc_program *progp,
 529                                             struct svc_process_info *procinfo);
 530 int                svc_generic_rpcbind_set(struct net *net,
 531                                            const struct svc_program *progp,
 532                                            u32 version, int family,
 533                                            unsigned short proto,
 534                                            unsigned short port);
 535 int                svc_rpcbind_set_version(struct net *net,
 536                                            const struct svc_program *progp,
 537                                            u32 version, int family,
 538                                            unsigned short proto,
 539                                            unsigned short port);
 540 
 541 #define RPC_MAX_ADDRBUFLEN      (63U)
 542 
 543 /*
 544  * When we want to reduce the size of the reserved space in the response
 545  * buffer, we need to take into account the size of any checksum data that
 546  * may be at the end of the packet. This is difficult to determine exactly
 547  * for all cases without actually generating the checksum, so we just use a
 548  * static value.
 549  */
 550 static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
 551 {
 552         svc_reserve(rqstp, space + rqstp->rq_auth_slack);
 553 }
 554 
 555 #endif /* SUNRPC_SVC_H */

/* [<][>][^][v][top][bottom][index][help] */