root/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5e_dma_unmap_wqe_err
  2. mlx5e_get_dscp_up
  3. mlx5e_select_queue
  4. mlx5e_skb_l2_header_offset
  5. mlx5e_skb_l3_header_offset
  6. mlx5e_calc_min_inline
  7. mlx5e_insert_vlan
  8. mlx5e_txwqe_build_eseg_csum
  9. mlx5e_tx_get_gso_ihs
  10. mlx5e_txwqe_build_dsegs
  11. mlx5e_txwqe_complete
  12. mlx5e_sq_xmit
  13. mlx5e_xmit
  14. mlx5e_dump_error_cqe
  15. mlx5e_poll_tx_cq
  16. mlx5e_free_txqsq_descs
  17. mlx5i_txwqe_build_datagram
  18. mlx5i_sq_xmit

   1 /*
   2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #include <linux/tcp.h>
  34 #include <linux/if_vlan.h>
  35 #include <net/geneve.h>
  36 #include <net/dsfield.h>
  37 #include "en.h"
  38 #include "en/txrx.h"
  39 #include "ipoib/ipoib.h"
  40 #include "en_accel/en_accel.h"
  41 #include "en_accel/ktls.h"
  42 #include "lib/clock.h"
  43 
  44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
  45 {
  46         int i;
  47 
  48         for (i = 0; i < num_dma; i++) {
  49                 struct mlx5e_sq_dma *last_pushed_dma =
  50                         mlx5e_dma_get(sq, --sq->dma_fifo_pc);
  51 
  52                 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
  53         }
  54 }
  55 
  56 #ifdef CONFIG_MLX5_CORE_EN_DCB
  57 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
  58 {
  59         int dscp_cp = 0;
  60 
  61         if (skb->protocol == htons(ETH_P_IP))
  62                 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
  63         else if (skb->protocol == htons(ETH_P_IPV6))
  64                 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
  65 
  66         return priv->dcbx_dp.dscp2prio[dscp_cp];
  67 }
  68 #endif
  69 
  70 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
  71                        struct net_device *sb_dev)
  72 {
  73         int txq_ix = netdev_pick_tx(dev, skb, NULL);
  74         struct mlx5e_priv *priv = netdev_priv(dev);
  75         u16 num_channels;
  76         int up = 0;
  77 
  78         if (!netdev_get_num_tc(dev))
  79                 return txq_ix;
  80 
  81 #ifdef CONFIG_MLX5_CORE_EN_DCB
  82         if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
  83                 up = mlx5e_get_dscp_up(priv, skb);
  84         else
  85 #endif
  86                 if (skb_vlan_tag_present(skb))
  87                         up = skb_vlan_tag_get_prio(skb);
  88 
  89         /* txq_ix can be larger than num_channels since
  90          * dev->num_real_tx_queues = num_channels * num_tc
  91          */
  92         num_channels = priv->channels.params.num_channels;
  93         if (txq_ix >= num_channels)
  94                 txq_ix = priv->txq2sq[txq_ix]->ch_ix;
  95 
  96         return priv->channel_tc2realtxq[txq_ix][up];
  97 }
  98 
  99 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
 100 {
 101 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 102 
 103         return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
 104 }
 105 
 106 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
 107 {
 108         if (skb_transport_header_was_set(skb))
 109                 return skb_transport_offset(skb);
 110         else
 111                 return mlx5e_skb_l2_header_offset(skb);
 112 }
 113 
 114 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
 115                                         struct sk_buff *skb)
 116 {
 117         u16 hlen;
 118 
 119         switch (mode) {
 120         case MLX5_INLINE_MODE_NONE:
 121                 return 0;
 122         case MLX5_INLINE_MODE_TCP_UDP:
 123                 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
 124                 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
 125                         hlen += VLAN_HLEN;
 126                 break;
 127         case MLX5_INLINE_MODE_IP:
 128                 hlen = mlx5e_skb_l3_header_offset(skb);
 129                 break;
 130         case MLX5_INLINE_MODE_L2:
 131         default:
 132                 hlen = mlx5e_skb_l2_header_offset(skb);
 133         }
 134         return min_t(u16, hlen, skb_headlen(skb));
 135 }
 136 
 137 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
 138 {
 139         struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
 140         int cpy1_sz = 2 * ETH_ALEN;
 141         int cpy2_sz = ihs - cpy1_sz;
 142 
 143         memcpy(vhdr, skb->data, cpy1_sz);
 144         vhdr->h_vlan_proto = skb->vlan_proto;
 145         vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
 146         memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
 147 }
 148 
 149 static inline void
 150 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
 151 {
 152         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 153                 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
 154                 if (skb->encapsulation) {
 155                         eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
 156                                           MLX5_ETH_WQE_L4_INNER_CSUM;
 157                         sq->stats->csum_partial_inner++;
 158                 } else {
 159                         eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
 160                         sq->stats->csum_partial++;
 161                 }
 162         } else
 163                 sq->stats->csum_none++;
 164 }
 165 
 166 static inline u16
 167 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
 168 {
 169         struct mlx5e_sq_stats *stats = sq->stats;
 170         u16 ihs;
 171 
 172         if (skb->encapsulation) {
 173                 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
 174                 stats->tso_inner_packets++;
 175                 stats->tso_inner_bytes += skb->len - ihs;
 176         } else {
 177                 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
 178                         ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
 179                 else
 180                         ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
 181                 stats->tso_packets++;
 182                 stats->tso_bytes += skb->len - ihs;
 183         }
 184 
 185         return ihs;
 186 }
 187 
 188 static inline int
 189 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 190                         unsigned char *skb_data, u16 headlen,
 191                         struct mlx5_wqe_data_seg *dseg)
 192 {
 193         dma_addr_t dma_addr = 0;
 194         u8 num_dma          = 0;
 195         int i;
 196 
 197         if (headlen) {
 198                 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
 199                                           DMA_TO_DEVICE);
 200                 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
 201                         goto dma_unmap_wqe_err;
 202 
 203                 dseg->addr       = cpu_to_be64(dma_addr);
 204                 dseg->lkey       = sq->mkey_be;
 205                 dseg->byte_count = cpu_to_be32(headlen);
 206 
 207                 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
 208                 num_dma++;
 209                 dseg++;
 210         }
 211 
 212         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 213                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 214                 int fsz = skb_frag_size(frag);
 215 
 216                 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
 217                                             DMA_TO_DEVICE);
 218                 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
 219                         goto dma_unmap_wqe_err;
 220 
 221                 dseg->addr       = cpu_to_be64(dma_addr);
 222                 dseg->lkey       = sq->mkey_be;
 223                 dseg->byte_count = cpu_to_be32(fsz);
 224 
 225                 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 226                 num_dma++;
 227                 dseg++;
 228         }
 229 
 230         return num_dma;
 231 
 232 dma_unmap_wqe_err:
 233         mlx5e_dma_unmap_wqe_err(sq, num_dma);
 234         return -ENOMEM;
 235 }
 236 
 237 static inline void
 238 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 239                      u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
 240                      struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
 241                      bool xmit_more)
 242 {
 243         struct mlx5_wq_cyc *wq = &sq->wq;
 244         bool send_doorbell;
 245 
 246         wi->num_bytes = num_bytes;
 247         wi->num_dma = num_dma;
 248         wi->num_wqebbs = num_wqebbs;
 249         wi->skb = skb;
 250 
 251         cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
 252         cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 253 
 254         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
 255                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 256 
 257         sq->pc += wi->num_wqebbs;
 258         if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) {
 259                 netif_tx_stop_queue(sq->txq);
 260                 sq->stats->stopped++;
 261         }
 262 
 263         send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes,
 264                                                xmit_more);
 265         if (send_doorbell)
 266                 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
 267 }
 268 
 269 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 270                           struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
 271 {
 272         struct mlx5_wq_cyc *wq = &sq->wq;
 273         struct mlx5_wqe_ctrl_seg *cseg;
 274         struct mlx5_wqe_eth_seg  *eseg;
 275         struct mlx5_wqe_data_seg *dseg;
 276         struct mlx5e_tx_wqe_info *wi;
 277 
 278         struct mlx5e_sq_stats *stats = sq->stats;
 279         u16 headlen, ihs, contig_wqebbs_room;
 280         u16 ds_cnt, ds_cnt_inl = 0;
 281         u8 num_wqebbs, opcode;
 282         u32 num_bytes;
 283         int num_dma;
 284         __be16 mss;
 285 
 286         /* Calc ihs and ds cnt, no writes to wqe yet */
 287         ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
 288         if (skb_is_gso(skb)) {
 289                 opcode    = MLX5_OPCODE_LSO;
 290                 mss       = cpu_to_be16(skb_shinfo(skb)->gso_size);
 291                 ihs       = mlx5e_tx_get_gso_ihs(sq, skb);
 292                 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
 293                 stats->packets += skb_shinfo(skb)->gso_segs;
 294         } else {
 295                 u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
 296 
 297                 opcode    = MLX5_OPCODE_SEND;
 298                 mss       = 0;
 299                 ihs       = mlx5e_calc_min_inline(mode, skb);
 300                 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 301                 stats->packets++;
 302         }
 303 
 304         stats->bytes     += num_bytes;
 305         stats->xmit_more += xmit_more;
 306 
 307         headlen = skb->len - ihs - skb->data_len;
 308         ds_cnt += !!headlen;
 309         ds_cnt += skb_shinfo(skb)->nr_frags;
 310 
 311         if (ihs) {
 312                 ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
 313 
 314                 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
 315                 ds_cnt += ds_cnt_inl;
 316         }
 317 
 318         num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 319         contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
 320         if (unlikely(contig_wqebbs_room < num_wqebbs)) {
 321 #ifdef CONFIG_MLX5_EN_IPSEC
 322                 struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
 323 #endif
 324 #ifdef CONFIG_MLX5_EN_TLS
 325                 struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
 326 #endif
 327                 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 328                 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
 329 #ifdef CONFIG_MLX5_EN_IPSEC
 330                 wqe->eth = cur_eth;
 331 #endif
 332 #ifdef CONFIG_MLX5_EN_TLS
 333                 wqe->ctrl = cur_ctrl;
 334 #endif
 335         }
 336 
 337         /* fill wqe */
 338         wi   = &sq->db.wqe_info[pi];
 339         cseg = &wqe->ctrl;
 340         eseg = &wqe->eth;
 341         dseg =  wqe->data;
 342 
 343 #if IS_ENABLED(CONFIG_GENEVE)
 344         if (skb->encapsulation)
 345                 mlx5e_tx_tunnel_accel(skb, eseg);
 346 #endif
 347         mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
 348 
 349         eseg->mss = mss;
 350 
 351         if (ihs) {
 352                 eseg->inline_hdr.sz = cpu_to_be16(ihs);
 353                 if (skb_vlan_tag_present(skb)) {
 354                         ihs -= VLAN_HLEN;
 355                         mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
 356                         stats->added_vlan_packets++;
 357                 } else {
 358                         memcpy(eseg->inline_hdr.start, skb->data, ihs);
 359                 }
 360                 dseg += ds_cnt_inl;
 361         } else if (skb_vlan_tag_present(skb)) {
 362                 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
 363                 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
 364                         eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
 365                 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
 366                 stats->added_vlan_packets++;
 367         }
 368 
 369         num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
 370         if (unlikely(num_dma < 0))
 371                 goto err_drop;
 372 
 373         mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
 374                              num_dma, wi, cseg, xmit_more);
 375 
 376         return NETDEV_TX_OK;
 377 
 378 err_drop:
 379         stats->dropped++;
 380         dev_kfree_skb_any(skb);
 381 
 382         return NETDEV_TX_OK;
 383 }
 384 
 385 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
 386 {
 387         struct mlx5e_priv *priv = netdev_priv(dev);
 388         struct mlx5e_tx_wqe *wqe;
 389         struct mlx5e_txqsq *sq;
 390         u16 pi;
 391 
 392         sq = priv->txq2sq[skb_get_queue_mapping(skb)];
 393         wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
 394 
 395         /* might send skbs and update wqe and pi */
 396         skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
 397         if (unlikely(!skb))
 398                 return NETDEV_TX_OK;
 399 
 400         return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
 401 }
 402 
 403 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
 404                                  struct mlx5_err_cqe *err_cqe)
 405 {
 406         struct mlx5_cqwq *wq = &sq->cq.wq;
 407         u32 ci;
 408 
 409         ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
 410 
 411         netdev_err(sq->channel->netdev,
 412                    "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
 413                    sq->cq.mcq.cqn, ci, sq->sqn,
 414                    get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
 415                    err_cqe->syndrome, err_cqe->vendor_err_synd);
 416         mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
 417 }
 418 
 419 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 420 {
 421         struct mlx5e_sq_stats *stats;
 422         struct mlx5e_txqsq *sq;
 423         struct mlx5_cqe64 *cqe;
 424         u32 dma_fifo_cc;
 425         u32 nbytes;
 426         u16 npkts;
 427         u16 sqcc;
 428         int i;
 429 
 430         sq = container_of(cq, struct mlx5e_txqsq, cq);
 431 
 432         if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
 433                 return false;
 434 
 435         cqe = mlx5_cqwq_get_cqe(&cq->wq);
 436         if (!cqe)
 437                 return false;
 438 
 439         stats = sq->stats;
 440 
 441         npkts = 0;
 442         nbytes = 0;
 443 
 444         /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
 445          * otherwise a cq overrun may occur
 446          */
 447         sqcc = sq->cc;
 448 
 449         /* avoid dirtying sq cache line every cqe */
 450         dma_fifo_cc = sq->dma_fifo_cc;
 451 
 452         i = 0;
 453         do {
 454                 u16 wqe_counter;
 455                 bool last_wqe;
 456 
 457                 mlx5_cqwq_pop(&cq->wq);
 458 
 459                 wqe_counter = be16_to_cpu(cqe->wqe_counter);
 460 
 461                 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
 462                         if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
 463                                               &sq->state)) {
 464                                 mlx5e_dump_error_cqe(sq,
 465                                                      (struct mlx5_err_cqe *)cqe);
 466                                 queue_work(cq->channel->priv->wq,
 467                                            &sq->recover_work);
 468                         }
 469                         stats->cqe_err++;
 470                 }
 471 
 472                 do {
 473                         struct mlx5e_tx_wqe_info *wi;
 474                         struct sk_buff *skb;
 475                         u16 ci;
 476                         int j;
 477 
 478                         last_wqe = (sqcc == wqe_counter);
 479 
 480                         ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
 481                         wi = &sq->db.wqe_info[ci];
 482                         skb = wi->skb;
 483 
 484                         if (unlikely(!skb)) {
 485                                 mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
 486                                 sqcc += wi->num_wqebbs;
 487                                 continue;
 488                         }
 489 
 490                         if (unlikely(skb_shinfo(skb)->tx_flags &
 491                                      SKBTX_HW_TSTAMP)) {
 492                                 struct skb_shared_hwtstamps hwts = {};
 493 
 494                                 hwts.hwtstamp =
 495                                         mlx5_timecounter_cyc2time(sq->clock,
 496                                                                   get_cqe_ts(cqe));
 497                                 skb_tstamp_tx(skb, &hwts);
 498                         }
 499 
 500                         for (j = 0; j < wi->num_dma; j++) {
 501                                 struct mlx5e_sq_dma *dma =
 502                                         mlx5e_dma_get(sq, dma_fifo_cc++);
 503 
 504                                 mlx5e_tx_dma_unmap(sq->pdev, dma);
 505                         }
 506 
 507                         npkts++;
 508                         nbytes += wi->num_bytes;
 509                         sqcc += wi->num_wqebbs;
 510                         napi_consume_skb(skb, napi_budget);
 511                 } while (!last_wqe);
 512 
 513         } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 514 
 515         stats->cqes += i;
 516 
 517         mlx5_cqwq_update_db_record(&cq->wq);
 518 
 519         /* ensure cq space is freed before enabling more cqes */
 520         wmb();
 521 
 522         sq->dma_fifo_cc = dma_fifo_cc;
 523         sq->cc = sqcc;
 524 
 525         netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 526 
 527         if (netif_tx_queue_stopped(sq->txq) &&
 528             mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
 529             !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
 530                 netif_tx_wake_queue(sq->txq);
 531                 stats->wake++;
 532         }
 533 
 534         return (i == MLX5E_TX_CQ_POLL_BUDGET);
 535 }
 536 
 537 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
 538 {
 539         struct mlx5e_tx_wqe_info *wi;
 540         u32 dma_fifo_cc, nbytes = 0;
 541         u16 ci, sqcc, npkts = 0;
 542         struct sk_buff *skb;
 543         int i;
 544 
 545         sqcc = sq->cc;
 546         dma_fifo_cc = sq->dma_fifo_cc;
 547 
 548         while (sqcc != sq->pc) {
 549                 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
 550                 wi = &sq->db.wqe_info[ci];
 551                 skb = wi->skb;
 552 
 553                 if (!skb) {
 554                         mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
 555                         sqcc += wi->num_wqebbs;
 556                         continue;
 557                 }
 558 
 559                 for (i = 0; i < wi->num_dma; i++) {
 560                         struct mlx5e_sq_dma *dma =
 561                                 mlx5e_dma_get(sq, dma_fifo_cc++);
 562 
 563                         mlx5e_tx_dma_unmap(sq->pdev, dma);
 564                 }
 565 
 566                 dev_kfree_skb_any(skb);
 567                 npkts++;
 568                 nbytes += wi->num_bytes;
 569                 sqcc += wi->num_wqebbs;
 570         }
 571 
 572         sq->dma_fifo_cc = dma_fifo_cc;
 573         sq->cc = sqcc;
 574 
 575         netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 576 }
 577 
 578 #ifdef CONFIG_MLX5_CORE_IPOIB
 579 static inline void
 580 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
 581                            struct mlx5_wqe_datagram_seg *dseg)
 582 {
 583         memcpy(&dseg->av, av, sizeof(struct mlx5_av));
 584         dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
 585         dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
 586 }
 587 
 588 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 589                           struct mlx5_av *av, u32 dqpn, u32 dqkey,
 590                           bool xmit_more)
 591 {
 592         struct mlx5_wq_cyc *wq = &sq->wq;
 593         struct mlx5i_tx_wqe *wqe;
 594 
 595         struct mlx5_wqe_datagram_seg *datagram;
 596         struct mlx5_wqe_ctrl_seg *cseg;
 597         struct mlx5_wqe_eth_seg  *eseg;
 598         struct mlx5_wqe_data_seg *dseg;
 599         struct mlx5e_tx_wqe_info *wi;
 600 
 601         struct mlx5e_sq_stats *stats = sq->stats;
 602         u16 headlen, ihs, pi, contig_wqebbs_room;
 603         u16 ds_cnt, ds_cnt_inl = 0;
 604         u8 num_wqebbs, opcode;
 605         u32 num_bytes;
 606         int num_dma;
 607         __be16 mss;
 608 
 609         /* Calc ihs and ds cnt, no writes to wqe yet */
 610         ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
 611         if (skb_is_gso(skb)) {
 612                 opcode    = MLX5_OPCODE_LSO;
 613                 mss       = cpu_to_be16(skb_shinfo(skb)->gso_size);
 614                 ihs       = mlx5e_tx_get_gso_ihs(sq, skb);
 615                 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
 616                 stats->packets += skb_shinfo(skb)->gso_segs;
 617         } else {
 618                 u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
 619 
 620                 opcode    = MLX5_OPCODE_SEND;
 621                 mss       = 0;
 622                 ihs       = mlx5e_calc_min_inline(mode, skb);
 623                 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 624                 stats->packets++;
 625         }
 626 
 627         stats->bytes     += num_bytes;
 628         stats->xmit_more += xmit_more;
 629 
 630         headlen = skb->len - ihs - skb->data_len;
 631         ds_cnt += !!headlen;
 632         ds_cnt += skb_shinfo(skb)->nr_frags;
 633 
 634         if (ihs) {
 635                 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
 636                 ds_cnt += ds_cnt_inl;
 637         }
 638 
 639         num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 640         pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 641         contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
 642         if (unlikely(contig_wqebbs_room < num_wqebbs)) {
 643                 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 644                 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 645         }
 646 
 647         mlx5i_sq_fetch_wqe(sq, &wqe, pi);
 648 
 649         /* fill wqe */
 650         wi       = &sq->db.wqe_info[pi];
 651         cseg     = &wqe->ctrl;
 652         datagram = &wqe->datagram;
 653         eseg     = &wqe->eth;
 654         dseg     =  wqe->data;
 655 
 656         mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
 657 
 658         mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
 659 
 660         eseg->mss = mss;
 661 
 662         if (ihs) {
 663                 memcpy(eseg->inline_hdr.start, skb->data, ihs);
 664                 eseg->inline_hdr.sz = cpu_to_be16(ihs);
 665                 dseg += ds_cnt_inl;
 666         }
 667 
 668         num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
 669         if (unlikely(num_dma < 0))
 670                 goto err_drop;
 671 
 672         mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
 673                              num_dma, wi, cseg, xmit_more);
 674 
 675         return NETDEV_TX_OK;
 676 
 677 err_drop:
 678         stats->dropped++;
 679         dev_kfree_skb_any(skb);
 680 
 681         return NETDEV_TX_OK;
 682 }
 683 #endif

/* [<][>][^][v][top][bottom][index][help] */