root/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5e_xsk_wakeup
  2. mlx5e_xsk_tx_post_err
  3. mlx5e_xsk_tx

   1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2 /* Copyright (c) 2019 Mellanox Technologies. */
   3 
   4 #include "tx.h"
   5 #include "umem.h"
   6 #include "en/xdp.h"
   7 #include "en/params.h"
   8 #include <net/xdp_sock.h>
   9 
  10 int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
  11 {
  12         struct mlx5e_priv *priv = netdev_priv(dev);
  13         struct mlx5e_params *params = &priv->channels.params;
  14         struct mlx5e_channel *c;
  15         u16 ix;
  16 
  17         if (unlikely(!mlx5e_xdp_is_active(priv)))
  18                 return -ENETDOWN;
  19 
  20         if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
  21                 return -EINVAL;
  22 
  23         c = priv->channels.c[ix];
  24 
  25         if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
  26                 return -ENXIO;
  27 
  28         if (!napi_if_scheduled_mark_missed(&c->napi)) {
  29                 /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not
  30                  * active and not polled by NAPI. Return 0, because the upcoming
  31                  * activate will trigger the IRQ for us.
  32                  */
  33                 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
  34                         return 0;
  35 
  36                 if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
  37                         return 0;
  38 
  39                 spin_lock(&c->xskicosq_lock);
  40                 mlx5e_trigger_irq(&c->xskicosq);
  41                 spin_unlock(&c->xskicosq_lock);
  42         }
  43 
  44         return 0;
  45 }
  46 
  47 /* When TX fails (because of the size of the packet), we need to get completions
  48  * in order, so post a NOP to get a CQE. Since AF_XDP doesn't distinguish
  49  * between successful TX and errors, handling in mlx5e_poll_xdpsq_cq is the
  50  * same.
  51  */
  52 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
  53                                   struct mlx5e_xdp_info *xdpi)
  54 {
  55         u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
  56         struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
  57         struct mlx5e_tx_wqe *nopwqe;
  58 
  59         wi->num_wqebbs = 1;
  60         wi->num_pkts = 1;
  61 
  62         nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
  63         mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
  64         sq->doorbell_cseg = &nopwqe->ctrl;
  65 }
  66 
  67 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
  68 {
  69         struct xdp_umem *umem = sq->umem;
  70         struct mlx5e_xdp_info xdpi;
  71         struct mlx5e_xdp_xmit_data xdptxd;
  72         bool work_done = true;
  73         bool flush = false;
  74 
  75         xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK;
  76 
  77         for (; budget; budget--) {
  78                 int check_result = sq->xmit_xdp_frame_check(sq);
  79                 struct xdp_desc desc;
  80 
  81                 if (unlikely(check_result < 0)) {
  82                         work_done = false;
  83                         break;
  84                 }
  85 
  86                 if (!xsk_umem_consume_tx(umem, &desc)) {
  87                         /* TX will get stuck until something wakes it up by
  88                          * triggering NAPI. Currently it's expected that the
  89                          * application calls sendto() if there are consumed, but
  90                          * not completed frames.
  91                          */
  92                         break;
  93                 }
  94 
  95                 xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr);
  96                 xdptxd.data = xdp_umem_get_data(umem, desc.addr);
  97                 xdptxd.len = desc.len;
  98 
  99                 dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr,
 100                                            xdptxd.len, DMA_BIDIRECTIONAL);
 101 
 102                 if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) {
 103                         if (sq->mpwqe.wqe)
 104                                 mlx5e_xdp_mpwqe_complete(sq);
 105 
 106                         mlx5e_xsk_tx_post_err(sq, &xdpi);
 107                 }
 108 
 109                 flush = true;
 110         }
 111 
 112         if (flush) {
 113                 if (sq->mpwqe.wqe)
 114                         mlx5e_xdp_mpwqe_complete(sq);
 115                 mlx5e_xmit_xdp_doorbell(sq);
 116 
 117                 xsk_umem_consume_tx_done(umem);
 118         }
 119 
 120         return !(budget && work_done);
 121 }

/* [<][>][^][v][top][bottom][index][help] */