root/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_ipsec_sa_entry
  2. mlx5e_ipsec_sadb_rx_lookup
  3. mlx5e_ipsec_sadb_rx_add
  4. mlx5e_ipsec_sadb_rx_del
  5. mlx5e_ipsec_sadb_rx_free
  6. mlx5e_ipsec_update_esn_state
  7. mlx5e_ipsec_build_accel_xfrm_attrs
  8. mlx5e_xfrm_validate_state
  9. mlx5e_xfrm_add_state
  10. mlx5e_xfrm_del_state
  11. mlx5e_xfrm_free_state
  12. mlx5e_ipsec_init
  13. mlx5e_ipsec_cleanup
  14. mlx5e_ipsec_offload_ok
  15. _update_xfrm_state
  16. mlx5e_xfrm_advance_esn_state
  17. mlx5e_ipsec_build_netdev

   1 /*
   2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  *
  32  */
  33 
  34 #include <crypto/internal/geniv.h>
  35 #include <crypto/aead.h>
  36 #include <linux/inetdevice.h>
  37 #include <linux/netdevice.h>
  38 #include <linux/module.h>
  39 
  40 #include "en.h"
  41 #include "en_accel/ipsec.h"
  42 #include "en_accel/ipsec_rxtx.h"
  43 
  44 
  45 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
  46 {
  47         struct mlx5e_ipsec_sa_entry *sa;
  48 
  49         if (!x)
  50                 return NULL;
  51 
  52         sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
  53         if (!sa)
  54                 return NULL;
  55 
  56         WARN_ON(sa->x != x);
  57         return sa;
  58 }
  59 
  60 struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
  61                                               unsigned int handle)
  62 {
  63         struct mlx5e_ipsec_sa_entry *sa_entry;
  64         struct xfrm_state *ret = NULL;
  65 
  66         rcu_read_lock();
  67         hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
  68                 if (sa_entry->handle == handle) {
  69                         ret = sa_entry->x;
  70                         xfrm_state_hold(ret);
  71                         break;
  72                 }
  73         rcu_read_unlock();
  74 
  75         return ret;
  76 }
  77 
  78 static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
  79 {
  80         struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
  81         unsigned long flags;
  82         int ret;
  83 
  84         ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
  85         if (ret < 0)
  86                 return ret;
  87 
  88         spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
  89         sa_entry->handle = ret;
  90         hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
  91         spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
  92 
  93         return 0;
  94 }
  95 
  96 static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
  97 {
  98         struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
  99         unsigned long flags;
 100 
 101         spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
 102         hash_del_rcu(&sa_entry->hlist);
 103         spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
 104 }
 105 
 106 static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
 107 {
 108         struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
 109 
 110         /* xfrm already doing sync rcu between del and free callbacks */
 111 
 112         ida_simple_remove(&ipsec->halloc, sa_entry->handle);
 113 }
 114 
 115 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
 116 {
 117         struct xfrm_replay_state_esn *replay_esn;
 118         u32 seq_bottom;
 119         u8 overlap;
 120         u32 *esn;
 121 
 122         if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
 123                 sa_entry->esn_state.trigger = 0;
 124                 return false;
 125         }
 126 
 127         replay_esn = sa_entry->x->replay_esn;
 128         seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
 129         overlap = sa_entry->esn_state.overlap;
 130 
 131         sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
 132                                                     htonl(seq_bottom));
 133         esn = &sa_entry->esn_state.esn;
 134 
 135         sa_entry->esn_state.trigger = 1;
 136         if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
 137                 ++(*esn);
 138                 sa_entry->esn_state.overlap = 0;
 139                 return true;
 140         } else if (unlikely(!overlap &&
 141                             (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
 142                 sa_entry->esn_state.overlap = 1;
 143                 return true;
 144         }
 145 
 146         return false;
 147 }
 148 
 149 static void
 150 mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
 151                                    struct mlx5_accel_esp_xfrm_attrs *attrs)
 152 {
 153         struct xfrm_state *x = sa_entry->x;
 154         struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
 155         struct aead_geniv_ctx *geniv_ctx;
 156         struct crypto_aead *aead;
 157         unsigned int crypto_data_len, key_len;
 158         int ivsize;
 159 
 160         memset(attrs, 0, sizeof(*attrs));
 161 
 162         /* key */
 163         crypto_data_len = (x->aead->alg_key_len + 7) / 8;
 164         key_len = crypto_data_len - 4; /* 4 bytes salt at end */
 165 
 166         memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
 167         aes_gcm->key_len = key_len * 8;
 168 
 169         /* salt and seq_iv */
 170         aead = x->data;
 171         geniv_ctx = crypto_aead_ctx(aead);
 172         ivsize = crypto_aead_ivsize(aead);
 173         memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
 174         memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
 175                sizeof(aes_gcm->salt));
 176 
 177         /* iv len */
 178         aes_gcm->icv_len = x->aead->alg_icv_len;
 179 
 180         /* esn */
 181         if (sa_entry->esn_state.trigger) {
 182                 attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
 183                 attrs->esn = sa_entry->esn_state.esn;
 184                 if (sa_entry->esn_state.overlap)
 185                         attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
 186         }
 187 
 188         /* rx handle */
 189         attrs->sa_handle = sa_entry->handle;
 190 
 191         /* algo type */
 192         attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
 193 
 194         /* action */
 195         attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
 196                         MLX5_ACCEL_ESP_ACTION_ENCRYPT :
 197                         MLX5_ACCEL_ESP_ACTION_DECRYPT;
 198         /* flags */
 199         attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
 200                         MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
 201                         MLX5_ACCEL_ESP_FLAGS_TUNNEL;
 202 }
 203 
 204 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 205 {
 206         struct net_device *netdev = x->xso.dev;
 207         struct mlx5e_priv *priv;
 208 
 209         priv = netdev_priv(netdev);
 210 
 211         if (x->props.aalgo != SADB_AALG_NONE) {
 212                 netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
 213                 return -EINVAL;
 214         }
 215         if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
 216                 netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
 217                 return -EINVAL;
 218         }
 219         if (x->props.calgo != SADB_X_CALG_NONE) {
 220                 netdev_info(netdev, "Cannot offload compressed xfrm states\n");
 221                 return -EINVAL;
 222         }
 223         if (x->props.flags & XFRM_STATE_ESN &&
 224             !(mlx5_accel_ipsec_device_caps(priv->mdev) &
 225             MLX5_ACCEL_IPSEC_CAP_ESN)) {
 226                 netdev_info(netdev, "Cannot offload ESN xfrm states\n");
 227                 return -EINVAL;
 228         }
 229         if (x->props.family != AF_INET &&
 230             x->props.family != AF_INET6) {
 231                 netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
 232                 return -EINVAL;
 233         }
 234         if (x->props.mode != XFRM_MODE_TRANSPORT &&
 235             x->props.mode != XFRM_MODE_TUNNEL) {
 236                 dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
 237                 return -EINVAL;
 238         }
 239         if (x->id.proto != IPPROTO_ESP) {
 240                 netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
 241                 return -EINVAL;
 242         }
 243         if (x->encap) {
 244                 netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
 245                 return -EINVAL;
 246         }
 247         if (!x->aead) {
 248                 netdev_info(netdev, "Cannot offload xfrm states without aead\n");
 249                 return -EINVAL;
 250         }
 251         if (x->aead->alg_icv_len != 128) {
 252                 netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
 253                 return -EINVAL;
 254         }
 255         if ((x->aead->alg_key_len != 128 + 32) &&
 256             (x->aead->alg_key_len != 256 + 32)) {
 257                 netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
 258                 return -EINVAL;
 259         }
 260         if (x->tfcpad) {
 261                 netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
 262                 return -EINVAL;
 263         }
 264         if (!x->geniv) {
 265                 netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
 266                 return -EINVAL;
 267         }
 268         if (strcmp(x->geniv, "seqiv")) {
 269                 netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
 270                 return -EINVAL;
 271         }
 272         if (x->props.family == AF_INET6 &&
 273             !(mlx5_accel_ipsec_device_caps(priv->mdev) &
 274              MLX5_ACCEL_IPSEC_CAP_IPV6)) {
 275                 netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
 276                 return -EINVAL;
 277         }
 278         return 0;
 279 }
 280 
 281 static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 282 {
 283         struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
 284         struct net_device *netdev = x->xso.dev;
 285         struct mlx5_accel_esp_xfrm_attrs attrs;
 286         struct mlx5e_priv *priv;
 287         __be32 saddr[4] = {0}, daddr[4] = {0}, spi;
 288         bool is_ipv6 = false;
 289         int err;
 290 
 291         priv = netdev_priv(netdev);
 292 
 293         err = mlx5e_xfrm_validate_state(x);
 294         if (err)
 295                 return err;
 296 
 297         sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
 298         if (!sa_entry) {
 299                 err = -ENOMEM;
 300                 goto out;
 301         }
 302 
 303         sa_entry->x = x;
 304         sa_entry->ipsec = priv->ipsec;
 305 
 306         /* Add the SA to handle processed incoming packets before the add SA
 307          * completion was received
 308          */
 309         if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
 310                 err = mlx5e_ipsec_sadb_rx_add(sa_entry);
 311                 if (err) {
 312                         netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
 313                         goto err_entry;
 314                 }
 315         } else {
 316                 sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
 317                                 mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
 318         }
 319 
 320         /* check esn */
 321         mlx5e_ipsec_update_esn_state(sa_entry);
 322 
 323         /* create xfrm */
 324         mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
 325         sa_entry->xfrm =
 326                 mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
 327                                            MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
 328         if (IS_ERR(sa_entry->xfrm)) {
 329                 err = PTR_ERR(sa_entry->xfrm);
 330                 goto err_sadb_rx;
 331         }
 332 
 333         /* create hw context */
 334         if (x->props.family == AF_INET) {
 335                 saddr[3] = x->props.saddr.a4;
 336                 daddr[3] = x->id.daddr.a4;
 337         } else {
 338                 memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
 339                 memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
 340                 is_ipv6 = true;
 341         }
 342         spi = x->id.spi;
 343         sa_entry->hw_context =
 344                         mlx5_accel_esp_create_hw_context(priv->mdev,
 345                                                          sa_entry->xfrm,
 346                                                          saddr, daddr, spi,
 347                                                          is_ipv6);
 348         if (IS_ERR(sa_entry->hw_context)) {
 349                 err = PTR_ERR(sa_entry->hw_context);
 350                 goto err_xfrm;
 351         }
 352 
 353         x->xso.offload_handle = (unsigned long)sa_entry;
 354         goto out;
 355 
 356 err_xfrm:
 357         mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
 358 err_sadb_rx:
 359         if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
 360                 mlx5e_ipsec_sadb_rx_del(sa_entry);
 361                 mlx5e_ipsec_sadb_rx_free(sa_entry);
 362         }
 363 err_entry:
 364         kfree(sa_entry);
 365 out:
 366         return err;
 367 }
 368 
 369 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
 370 {
 371         struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
 372 
 373         if (!sa_entry)
 374                 return;
 375 
 376         if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
 377                 mlx5e_ipsec_sadb_rx_del(sa_entry);
 378 }
 379 
 380 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
 381 {
 382         struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
 383 
 384         if (!sa_entry)
 385                 return;
 386 
 387         if (sa_entry->hw_context) {
 388                 flush_workqueue(sa_entry->ipsec->wq);
 389                 mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
 390                 mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
 391         }
 392 
 393         if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
 394                 mlx5e_ipsec_sadb_rx_free(sa_entry);
 395 
 396         kfree(sa_entry);
 397 }
 398 
 399 int mlx5e_ipsec_init(struct mlx5e_priv *priv)
 400 {
 401         struct mlx5e_ipsec *ipsec = NULL;
 402 
 403         if (!MLX5_IPSEC_DEV(priv->mdev)) {
 404                 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
 405                 return 0;
 406         }
 407 
 408         ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
 409         if (!ipsec)
 410                 return -ENOMEM;
 411 
 412         hash_init(ipsec->sadb_rx);
 413         spin_lock_init(&ipsec->sadb_rx_lock);
 414         ida_init(&ipsec->halloc);
 415         ipsec->en_priv = priv;
 416         ipsec->en_priv->ipsec = ipsec;
 417         ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
 418                                MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
 419         ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
 420                                             priv->netdev->name);
 421         if (!ipsec->wq) {
 422                 kfree(ipsec);
 423                 return -ENOMEM;
 424         }
 425         netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
 426         return 0;
 427 }
 428 
 429 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
 430 {
 431         struct mlx5e_ipsec *ipsec = priv->ipsec;
 432 
 433         if (!ipsec)
 434                 return;
 435 
 436         drain_workqueue(ipsec->wq);
 437         destroy_workqueue(ipsec->wq);
 438 
 439         ida_destroy(&ipsec->halloc);
 440         kfree(ipsec);
 441         priv->ipsec = NULL;
 442 }
 443 
 444 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 445 {
 446         if (x->props.family == AF_INET) {
 447                 /* Offload with IPv4 options is not supported yet */
 448                 if (ip_hdr(skb)->ihl > 5)
 449                         return false;
 450         } else {
 451                 /* Offload with IPv6 extension headers is not support yet */
 452                 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
 453                         return false;
 454         }
 455 
 456         return true;
 457 }
 458 
 459 struct mlx5e_ipsec_modify_state_work {
 460         struct work_struct              work;
 461         struct mlx5_accel_esp_xfrm_attrs attrs;
 462         struct mlx5e_ipsec_sa_entry     *sa_entry;
 463 };
 464 
 465 static void _update_xfrm_state(struct work_struct *work)
 466 {
 467         int ret;
 468         struct mlx5e_ipsec_modify_state_work *modify_work =
 469                 container_of(work, struct mlx5e_ipsec_modify_state_work, work);
 470         struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
 471 
 472         ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
 473                                          &modify_work->attrs);
 474         if (ret)
 475                 netdev_warn(sa_entry->ipsec->en_priv->netdev,
 476                             "Not an IPSec offload device\n");
 477 
 478         kfree(modify_work);
 479 }
 480 
 481 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
 482 {
 483         struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
 484         struct mlx5e_ipsec_modify_state_work *modify_work;
 485         bool need_update;
 486 
 487         if (!sa_entry)
 488                 return;
 489 
 490         need_update = mlx5e_ipsec_update_esn_state(sa_entry);
 491         if (!need_update)
 492                 return;
 493 
 494         modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
 495         if (!modify_work)
 496                 return;
 497 
 498         mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
 499         modify_work->sa_entry = sa_entry;
 500 
 501         INIT_WORK(&modify_work->work, _update_xfrm_state);
 502         WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
 503 }
 504 
 505 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
 506         .xdo_dev_state_add      = mlx5e_xfrm_add_state,
 507         .xdo_dev_state_delete   = mlx5e_xfrm_del_state,
 508         .xdo_dev_state_free     = mlx5e_xfrm_free_state,
 509         .xdo_dev_offload_ok     = mlx5e_ipsec_offload_ok,
 510         .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
 511 };
 512 
 513 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
 514 {
 515         struct mlx5_core_dev *mdev = priv->mdev;
 516         struct net_device *netdev = priv->netdev;
 517 
 518         if (!priv->ipsec)
 519                 return;
 520 
 521         if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
 522             !MLX5_CAP_ETH(mdev, swp)) {
 523                 mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
 524                 return;
 525         }
 526 
 527         mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
 528         netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
 529         netdev->features |= NETIF_F_HW_ESP;
 530         netdev->hw_enc_features |= NETIF_F_HW_ESP;
 531 
 532         if (!MLX5_CAP_ETH(mdev, swp_csum)) {
 533                 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
 534                 return;
 535         }
 536 
 537         netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
 538         netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
 539 
 540         if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
 541             !MLX5_CAP_ETH(mdev, swp_lso)) {
 542                 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
 543                 return;
 544         }
 545 
 546         mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
 547         netdev->features |= NETIF_F_GSO_ESP;
 548         netdev->hw_features |= NETIF_F_GSO_ESP;
 549         netdev->hw_enc_features |= NETIF_F_GSO_ESP;
 550 }

/* [<][>][^][v][top][bottom][index][help] */