root/net/sunrpc/socklib.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xdr_skb_read_bits
  2. xdr_skb_read_and_csum_bits
  3. xdr_partial_copy_from_skb
  4. csum_partial_copy_to_xdr

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * linux/net/sunrpc/socklib.c
   4  *
   5  * Common socket helper routines for RPC client and server
   6  *
   7  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   8  */
   9 
  10 #include <linux/compiler.h>
  11 #include <linux/netdevice.h>
  12 #include <linux/gfp.h>
  13 #include <linux/skbuff.h>
  14 #include <linux/types.h>
  15 #include <linux/pagemap.h>
  16 #include <linux/udp.h>
  17 #include <linux/sunrpc/xdr.h>
  18 #include <linux/export.h>
  19 
  20 
  21 /**
  22  * xdr_skb_read_bits - copy some data bits from skb to internal buffer
  23  * @desc: sk_buff copy helper
  24  * @to: copy destination
  25  * @len: number of bytes to copy
  26  *
  27  * Possibly called several times to iterate over an sk_buff and copy
  28  * data out of it.
  29  */
  30 static size_t
  31 xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
  32 {
  33         if (len > desc->count)
  34                 len = desc->count;
  35         if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
  36                 return 0;
  37         desc->count -= len;
  38         desc->offset += len;
  39         return len;
  40 }
  41 
  42 /**
  43  * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
  44  * @desc: sk_buff copy helper
  45  * @to: copy destination
  46  * @len: number of bytes to copy
  47  *
  48  * Same as skb_read_bits, but calculate a checksum at the same time.
  49  */
  50 static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
  51 {
  52         unsigned int pos;
  53         __wsum csum2;
  54 
  55         if (len > desc->count)
  56                 len = desc->count;
  57         pos = desc->offset;
  58         csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
  59         desc->csum = csum_block_add(desc->csum, csum2, pos);
  60         desc->count -= len;
  61         desc->offset += len;
  62         return len;
  63 }
  64 
  65 /**
  66  * xdr_partial_copy_from_skb - copy data out of an skb
  67  * @xdr: target XDR buffer
  68  * @base: starting offset
  69  * @desc: sk_buff copy helper
  70  * @copy_actor: virtual method for copying data
  71  *
  72  */
  73 static ssize_t
  74 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
  75 {
  76         struct page     **ppage = xdr->pages;
  77         unsigned int    len, pglen = xdr->page_len;
  78         ssize_t         copied = 0;
  79         size_t          ret;
  80 
  81         len = xdr->head[0].iov_len;
  82         if (base < len) {
  83                 len -= base;
  84                 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
  85                 copied += ret;
  86                 if (ret != len || !desc->count)
  87                         goto out;
  88                 base = 0;
  89         } else
  90                 base -= len;
  91 
  92         if (unlikely(pglen == 0))
  93                 goto copy_tail;
  94         if (unlikely(base >= pglen)) {
  95                 base -= pglen;
  96                 goto copy_tail;
  97         }
  98         if (base || xdr->page_base) {
  99                 pglen -= base;
 100                 base += xdr->page_base;
 101                 ppage += base >> PAGE_SHIFT;
 102                 base &= ~PAGE_MASK;
 103         }
 104         do {
 105                 char *kaddr;
 106 
 107                 /* ACL likes to be lazy in allocating pages - ACLs
 108                  * are small by default but can get huge. */
 109                 if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) {
 110                         *ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
 111                         if (unlikely(*ppage == NULL)) {
 112                                 if (copied == 0)
 113                                         copied = -ENOMEM;
 114                                 goto out;
 115                         }
 116                 }
 117 
 118                 len = PAGE_SIZE;
 119                 kaddr = kmap_atomic(*ppage);
 120                 if (base) {
 121                         len -= base;
 122                         if (pglen < len)
 123                                 len = pglen;
 124                         ret = copy_actor(desc, kaddr + base, len);
 125                         base = 0;
 126                 } else {
 127                         if (pglen < len)
 128                                 len = pglen;
 129                         ret = copy_actor(desc, kaddr, len);
 130                 }
 131                 flush_dcache_page(*ppage);
 132                 kunmap_atomic(kaddr);
 133                 copied += ret;
 134                 if (ret != len || !desc->count)
 135                         goto out;
 136                 ppage++;
 137         } while ((pglen -= len) != 0);
 138 copy_tail:
 139         len = xdr->tail[0].iov_len;
 140         if (base < len)
 141                 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
 142 out:
 143         return copied;
 144 }
 145 
 146 /**
 147  * csum_partial_copy_to_xdr - checksum and copy data
 148  * @xdr: target XDR buffer
 149  * @skb: source skb
 150  *
 151  * We have set things up such that we perform the checksum of the UDP
 152  * packet in parallel with the copies into the RPC client iovec.  -DaveM
 153  */
 154 int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 155 {
 156         struct xdr_skb_reader   desc;
 157 
 158         desc.skb = skb;
 159         desc.offset = 0;
 160         desc.count = skb->len - desc.offset;
 161 
 162         if (skb_csum_unnecessary(skb))
 163                 goto no_checksum;
 164 
 165         desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
 166         if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
 167                 return -1;
 168         if (desc.offset != skb->len) {
 169                 __wsum csum2;
 170                 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
 171                 desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
 172         }
 173         if (desc.count)
 174                 return -1;
 175         if (csum_fold(desc.csum))
 176                 return -1;
 177         if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
 178             !skb->csum_complete_sw)
 179                 netdev_rx_csum_fault(skb->dev, skb);
 180         return 0;
 181 no_checksum:
 182         if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
 183                 return -1;
 184         if (desc.count)
 185                 return -1;
 186         return 0;
 187 }
 188 EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);

/* [<][>][^][v][top][bottom][index][help] */