root/net/ipv4/esp4_offload.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. esp4_gro_receive
  2. esp4_gso_encap
  3. xfrm4_tunnel_gso_segment
  4. xfrm4_transport_gso_segment
  5. xfrm4_outer_mode_gso_segment
  6. esp4_gso_segment
  7. esp_input_tail
  8. esp_xmit
  9. esp4_offload_init
  10. esp4_offload_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * IPV4 GSO/GRO offload support
   4  * Linux INET implementation
   5  *
   6  * Copyright (C) 2016 secunet Security Networks AG
   7  * Author: Steffen Klassert <steffen.klassert@secunet.com>
   8  *
   9  * ESP GRO support
  10  */
  11 
  12 #include <linux/skbuff.h>
  13 #include <linux/init.h>
  14 #include <net/protocol.h>
  15 #include <crypto/aead.h>
  16 #include <crypto/authenc.h>
  17 #include <linux/err.h>
  18 #include <linux/module.h>
  19 #include <net/ip.h>
  20 #include <net/xfrm.h>
  21 #include <net/esp.h>
  22 #include <linux/scatterlist.h>
  23 #include <linux/kernel.h>
  24 #include <linux/slab.h>
  25 #include <linux/spinlock.h>
  26 #include <net/udp.h>
  27 
  28 static struct sk_buff *esp4_gro_receive(struct list_head *head,
  29                                         struct sk_buff *skb)
  30 {
  31         int offset = skb_gro_offset(skb);
  32         struct xfrm_offload *xo;
  33         struct xfrm_state *x;
  34         __be32 seq;
  35         __be32 spi;
  36         int err;
  37 
  38         if (!pskb_pull(skb, offset))
  39                 return NULL;
  40 
  41         if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
  42                 goto out;
  43 
  44         xo = xfrm_offload(skb);
  45         if (!xo || !(xo->flags & CRYPTO_DONE)) {
  46                 struct sec_path *sp = secpath_set(skb);
  47 
  48                 if (!sp)
  49                         goto out;
  50 
  51                 if (sp->len == XFRM_MAX_DEPTH)
  52                         goto out_reset;
  53 
  54                 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
  55                                       (xfrm_address_t *)&ip_hdr(skb)->daddr,
  56                                       spi, IPPROTO_ESP, AF_INET);
  57                 if (!x)
  58                         goto out_reset;
  59 
  60                 skb->mark = xfrm_smark_get(skb->mark, x);
  61 
  62                 sp->xvec[sp->len++] = x;
  63                 sp->olen++;
  64 
  65                 xo = xfrm_offload(skb);
  66                 if (!xo)
  67                         goto out_reset;
  68         }
  69 
  70         xo->flags |= XFRM_GRO;
  71 
  72         XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
  73         XFRM_SPI_SKB_CB(skb)->family = AF_INET;
  74         XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
  75         XFRM_SPI_SKB_CB(skb)->seq = seq;
  76 
  77         /* We don't need to handle errors from xfrm_input, it does all
  78          * the error handling and frees the resources on error. */
  79         xfrm_input(skb, IPPROTO_ESP, spi, -2);
  80 
  81         return ERR_PTR(-EINPROGRESS);
  82 out_reset:
  83         secpath_reset(skb);
  84 out:
  85         skb_push(skb, offset);
  86         NAPI_GRO_CB(skb)->same_flow = 0;
  87         NAPI_GRO_CB(skb)->flush = 1;
  88 
  89         return NULL;
  90 }
  91 
  92 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
  93 {
  94         struct ip_esp_hdr *esph;
  95         struct iphdr *iph = ip_hdr(skb);
  96         struct xfrm_offload *xo = xfrm_offload(skb);
  97         int proto = iph->protocol;
  98 
  99         skb_push(skb, -skb_network_offset(skb));
 100         esph = ip_esp_hdr(skb);
 101         *skb_mac_header(skb) = IPPROTO_ESP;
 102 
 103         esph->spi = x->id.spi;
 104         esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 105 
 106         xo->proto = proto;
 107 }
 108 
 109 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
 110                                                 struct sk_buff *skb,
 111                                                 netdev_features_t features)
 112 {
 113         __skb_push(skb, skb->mac_len);
 114         return skb_mac_gso_segment(skb, features);
 115 }
 116 
 117 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
 118                                                    struct sk_buff *skb,
 119                                                    netdev_features_t features)
 120 {
 121         const struct net_offload *ops;
 122         struct sk_buff *segs = ERR_PTR(-EINVAL);
 123         struct xfrm_offload *xo = xfrm_offload(skb);
 124 
 125         skb->transport_header += x->props.header_len;
 126         ops = rcu_dereference(inet_offloads[xo->proto]);
 127         if (likely(ops && ops->callbacks.gso_segment))
 128                 segs = ops->callbacks.gso_segment(skb, features);
 129 
 130         return segs;
 131 }
 132 
 133 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
 134                                                     struct sk_buff *skb,
 135                                                     netdev_features_t features)
 136 {
 137         switch (x->outer_mode.encap) {
 138         case XFRM_MODE_TUNNEL:
 139                 return xfrm4_tunnel_gso_segment(x, skb, features);
 140         case XFRM_MODE_TRANSPORT:
 141                 return xfrm4_transport_gso_segment(x, skb, features);
 142         }
 143 
 144         return ERR_PTR(-EOPNOTSUPP);
 145 }
 146 
 147 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
 148                                         netdev_features_t features)
 149 {
 150         struct xfrm_state *x;
 151         struct ip_esp_hdr *esph;
 152         struct crypto_aead *aead;
 153         netdev_features_t esp_features = features;
 154         struct xfrm_offload *xo = xfrm_offload(skb);
 155         struct sec_path *sp;
 156 
 157         if (!xo)
 158                 return ERR_PTR(-EINVAL);
 159 
 160         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
 161                 return ERR_PTR(-EINVAL);
 162 
 163         sp = skb_sec_path(skb);
 164         x = sp->xvec[sp->len - 1];
 165         aead = x->data;
 166         esph = ip_esp_hdr(skb);
 167 
 168         if (esph->spi != x->id.spi)
 169                 return ERR_PTR(-EINVAL);
 170 
 171         if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
 172                 return ERR_PTR(-EINVAL);
 173 
 174         __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 175 
 176         skb->encap_hdr_csum = 1;
 177 
 178         if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
 179              !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
 180                 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 181         else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
 182                  !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
 183                 esp_features = features & ~NETIF_F_CSUM_MASK;
 184 
 185         xo->flags |= XFRM_GSO_SEGMENT;
 186 
 187         return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
 188 }
 189 
 190 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
 191 {
 192         struct crypto_aead *aead = x->data;
 193         struct xfrm_offload *xo = xfrm_offload(skb);
 194 
 195         if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
 196                 return -EINVAL;
 197 
 198         if (!(xo->flags & CRYPTO_DONE))
 199                 skb->ip_summed = CHECKSUM_NONE;
 200 
 201         return esp_input_done2(skb, 0);
 202 }
 203 
 204 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
 205 {
 206         int err;
 207         int alen;
 208         int blksize;
 209         struct xfrm_offload *xo;
 210         struct ip_esp_hdr *esph;
 211         struct crypto_aead *aead;
 212         struct esp_info esp;
 213         bool hw_offload = true;
 214         __u32 seq;
 215 
 216         esp.inplace = true;
 217 
 218         xo = xfrm_offload(skb);
 219 
 220         if (!xo)
 221                 return -EINVAL;
 222 
 223         if ((!(features & NETIF_F_HW_ESP) &&
 224              !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
 225             x->xso.dev != skb->dev) {
 226                 xo->flags |= CRYPTO_FALLBACK;
 227                 hw_offload = false;
 228         }
 229 
 230         esp.proto = xo->proto;
 231 
 232         /* skb is pure payload to encrypt */
 233 
 234         aead = x->data;
 235         alen = crypto_aead_authsize(aead);
 236 
 237         esp.tfclen = 0;
 238         /* XXX: Add support for tfc padding here. */
 239 
 240         blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 241         esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
 242         esp.plen = esp.clen - skb->len - esp.tfclen;
 243         esp.tailen = esp.tfclen + esp.plen + alen;
 244 
 245         esp.esph = ip_esp_hdr(skb);
 246 
 247 
 248         if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
 249                 esp.nfrags = esp_output_head(x, skb, &esp);
 250                 if (esp.nfrags < 0)
 251                         return esp.nfrags;
 252         }
 253 
 254         seq = xo->seq.low;
 255 
 256         esph = esp.esph;
 257         esph->spi = x->id.spi;
 258 
 259         skb_push(skb, -skb_network_offset(skb));
 260 
 261         if (xo->flags & XFRM_GSO_SEGMENT) {
 262                 esph->seq_no = htonl(seq);
 263 
 264                 if (!skb_is_gso(skb))
 265                         xo->seq.low++;
 266                 else
 267                         xo->seq.low += skb_shinfo(skb)->gso_segs;
 268         }
 269 
 270         esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
 271 
 272         ip_hdr(skb)->tot_len = htons(skb->len);
 273         ip_send_check(ip_hdr(skb));
 274 
 275         if (hw_offload)
 276                 return 0;
 277 
 278         err = esp_output_tail(x, skb, &esp);
 279         if (err)
 280                 return err;
 281 
 282         secpath_reset(skb);
 283 
 284         return 0;
 285 }
 286 
 287 static const struct net_offload esp4_offload = {
 288         .callbacks = {
 289                 .gro_receive = esp4_gro_receive,
 290                 .gso_segment = esp4_gso_segment,
 291         },
 292 };
 293 
 294 static const struct xfrm_type_offload esp_type_offload = {
 295         .description    = "ESP4 OFFLOAD",
 296         .owner          = THIS_MODULE,
 297         .proto          = IPPROTO_ESP,
 298         .input_tail     = esp_input_tail,
 299         .xmit           = esp_xmit,
 300         .encap          = esp4_gso_encap,
 301 };
 302 
 303 static int __init esp4_offload_init(void)
 304 {
 305         if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
 306                 pr_info("%s: can't add xfrm type offload\n", __func__);
 307                 return -EAGAIN;
 308         }
 309 
 310         return inet_add_offload(&esp4_offload, IPPROTO_ESP);
 311 }
 312 
 313 static void __exit esp4_offload_exit(void)
 314 {
 315         xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
 316         inet_del_offload(&esp4_offload, IPPROTO_ESP);
 317 }
 318 
 319 module_init(esp4_offload_init);
 320 module_exit(esp4_offload_exit);
 321 MODULE_LICENSE("GPL");
 322 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 323 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);

/* [<][>][^][v][top][bottom][index][help] */