root/drivers/net/ethernet/huawei/hinic/hinic_rx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hinic_rxq_clean_stats
  2. hinic_rxq_get_stats
  3. rxq_stats_init
  4. rx_csum
  5. rx_alloc_skb
  6. rx_unmap_skb
  7. rx_free_skb
  8. rx_alloc_pkts
  9. free_all_rx_skbs
  10. rx_recv_jumbo_pkt
  11. rxq_recv
  12. rx_poll
  13. rx_add_napi
  14. rx_del_napi
  15. rx_irq
  16. rx_request_irq
  17. rx_free_irq
  18. hinic_init_rxq
  19. hinic_clean_rxq

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Huawei HiNIC PCI Express Linux driver
   4  * Copyright(c) 2017 Huawei Technologies Co., Ltd
   5  */
   6 
   7 #include <linux/kernel.h>
   8 #include <linux/types.h>
   9 #include <linux/errno.h>
  10 #include <linux/pci.h>
  11 #include <linux/device.h>
  12 #include <linux/netdevice.h>
  13 #include <linux/etherdevice.h>
  14 #include <linux/u64_stats_sync.h>
  15 #include <linux/slab.h>
  16 #include <linux/interrupt.h>
  17 #include <linux/skbuff.h>
  18 #include <linux/dma-mapping.h>
  19 #include <linux/prefetch.h>
  20 #include <linux/cpumask.h>
  21 #include <linux/if_vlan.h>
  22 #include <asm/barrier.h>
  23 
  24 #include "hinic_common.h"
  25 #include "hinic_hw_if.h"
  26 #include "hinic_hw_wqe.h"
  27 #include "hinic_hw_wq.h"
  28 #include "hinic_hw_qp.h"
  29 #include "hinic_hw_dev.h"
  30 #include "hinic_rx.h"
  31 #include "hinic_dev.h"
  32 
  33 #define RX_IRQ_NO_PENDING               0
  34 #define RX_IRQ_NO_COALESC               0
  35 #define RX_IRQ_NO_LLI_TIMER             0
  36 #define RX_IRQ_NO_CREDIT                0
  37 #define RX_IRQ_NO_RESEND_TIMER          0
  38 #define HINIC_RX_BUFFER_WRITE           16
  39 
  40 #define HINIC_RX_IPV6_PKT               7
  41 #define LRO_PKT_HDR_LEN_IPV4            66
  42 #define LRO_PKT_HDR_LEN_IPV6            86
  43 #define LRO_REPLENISH_THLD              256
  44 
  45 #define LRO_PKT_HDR_LEN(cqe)            \
  46         (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
  47          HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
  48 
  49 /**
  50  * hinic_rxq_clean_stats - Clean the statistics of specific queue
  51  * @rxq: Logical Rx Queue
  52  **/
  53 void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
  54 {
  55         struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
  56 
  57         u64_stats_update_begin(&rxq_stats->syncp);
  58         rxq_stats->pkts  = 0;
  59         rxq_stats->bytes = 0;
  60         rxq_stats->errors = 0;
  61         rxq_stats->csum_errors = 0;
  62         rxq_stats->other_errors = 0;
  63         u64_stats_update_end(&rxq_stats->syncp);
  64 }
  65 
  66 /**
  67  * hinic_rxq_get_stats - get statistics of Rx Queue
  68  * @rxq: Logical Rx Queue
  69  * @stats: return updated stats here
  70  **/
  71 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
  72 {
  73         struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
  74         unsigned int start;
  75 
  76         u64_stats_update_begin(&stats->syncp);
  77         do {
  78                 start = u64_stats_fetch_begin(&rxq_stats->syncp);
  79                 stats->pkts = rxq_stats->pkts;
  80                 stats->bytes = rxq_stats->bytes;
  81                 stats->errors = rxq_stats->csum_errors +
  82                                 rxq_stats->other_errors;
  83                 stats->csum_errors = rxq_stats->csum_errors;
  84                 stats->other_errors = rxq_stats->other_errors;
  85         } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
  86         u64_stats_update_end(&stats->syncp);
  87 }
  88 
  89 /**
  90  * rxq_stats_init - Initialize the statistics of specific queue
  91  * @rxq: Logical Rx Queue
  92  **/
  93 static void rxq_stats_init(struct hinic_rxq *rxq)
  94 {
  95         struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
  96 
  97         u64_stats_init(&rxq_stats->syncp);
  98         hinic_rxq_clean_stats(rxq);
  99 }
 100 
 101 static void rx_csum(struct hinic_rxq *rxq, u32 status,
 102                     struct sk_buff *skb)
 103 {
 104         struct net_device *netdev = rxq->netdev;
 105         u32 csum_err;
 106 
 107         csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
 108 
 109         if (!(netdev->features & NETIF_F_RXCSUM))
 110                 return;
 111 
 112         if (!csum_err) {
 113                 skb->ip_summed = CHECKSUM_UNNECESSARY;
 114         } else {
 115                 if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
 116                         HINIC_RX_CSUM_IPSU_OTHER_ERR)))
 117                         rxq->rxq_stats.csum_errors++;
 118                 skb->ip_summed = CHECKSUM_NONE;
 119         }
 120 }
 121 /**
 122  * rx_alloc_skb - allocate skb and map it to dma address
 123  * @rxq: rx queue
 124  * @dma_addr: returned dma address for the skb
 125  *
 126  * Return skb
 127  **/
 128 static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
 129                                     dma_addr_t *dma_addr)
 130 {
 131         struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 132         struct hinic_hwdev *hwdev = nic_dev->hwdev;
 133         struct hinic_hwif *hwif = hwdev->hwif;
 134         struct pci_dev *pdev = hwif->pdev;
 135         struct sk_buff *skb;
 136         dma_addr_t addr;
 137         int err;
 138 
 139         skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
 140         if (!skb) {
 141                 netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
 142                 return NULL;
 143         }
 144 
 145         addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
 146                               DMA_FROM_DEVICE);
 147         err = dma_mapping_error(&pdev->dev, addr);
 148         if (err) {
 149                 dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
 150                 goto err_rx_map;
 151         }
 152 
 153         *dma_addr = addr;
 154         return skb;
 155 
 156 err_rx_map:
 157         dev_kfree_skb_any(skb);
 158         return NULL;
 159 }
 160 
 161 /**
 162  * rx_unmap_skb - unmap the dma address of the skb
 163  * @rxq: rx queue
 164  * @dma_addr: dma address of the skb
 165  **/
 166 static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
 167 {
 168         struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 169         struct hinic_hwdev *hwdev = nic_dev->hwdev;
 170         struct hinic_hwif *hwif = hwdev->hwif;
 171         struct pci_dev *pdev = hwif->pdev;
 172 
 173         dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
 174                          DMA_FROM_DEVICE);
 175 }
 176 
 177 /**
 178  * rx_free_skb - unmap and free skb
 179  * @rxq: rx queue
 180  * @skb: skb to free
 181  * @dma_addr: dma address of the skb
 182  **/
 183 static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
 184                         dma_addr_t dma_addr)
 185 {
 186         rx_unmap_skb(rxq, dma_addr);
 187         dev_kfree_skb_any(skb);
 188 }
 189 
 190 /**
 191  * rx_alloc_pkts - allocate pkts in rx queue
 192  * @rxq: rx queue
 193  *
 194  * Return number of skbs allocated
 195  **/
 196 static int rx_alloc_pkts(struct hinic_rxq *rxq)
 197 {
 198         struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 199         struct hinic_rq_wqe *rq_wqe;
 200         unsigned int free_wqebbs;
 201         struct hinic_sge sge;
 202         dma_addr_t dma_addr;
 203         struct sk_buff *skb;
 204         u16 prod_idx;
 205         int i;
 206 
 207         free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
 208 
 209         /* Limit the allocation chunks */
 210         if (free_wqebbs > nic_dev->rx_weight)
 211                 free_wqebbs = nic_dev->rx_weight;
 212 
 213         for (i = 0; i < free_wqebbs; i++) {
 214                 skb = rx_alloc_skb(rxq, &dma_addr);
 215                 if (!skb) {
 216                         netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
 217                         goto skb_out;
 218                 }
 219 
 220                 hinic_set_sge(&sge, dma_addr, skb->len);
 221 
 222                 rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
 223                                           &prod_idx);
 224                 if (!rq_wqe) {
 225                         rx_free_skb(rxq, skb, dma_addr);
 226                         goto skb_out;
 227                 }
 228 
 229                 hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
 230 
 231                 hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
 232         }
 233 
 234 skb_out:
 235         if (i) {
 236                 wmb();  /* write all the wqes before update PI */
 237 
 238                 hinic_rq_update(rxq->rq, prod_idx);
 239         }
 240 
 241         return i;
 242 }
 243 
 244 /**
 245  * free_all_rx_skbs - free all skbs in rx queue
 246  * @rxq: rx queue
 247  **/
 248 static void free_all_rx_skbs(struct hinic_rxq *rxq)
 249 {
 250         struct hinic_rq *rq = rxq->rq;
 251         struct hinic_hw_wqe *hw_wqe;
 252         struct hinic_sge sge;
 253         u16 ci;
 254 
 255         while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
 256                 if (IS_ERR(hw_wqe))
 257                         break;
 258 
 259                 hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
 260 
 261                 hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
 262 
 263                 rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
 264         }
 265 }
 266 
 267 /**
 268  * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
 269  * @rxq: rx queue
 270  * @head_skb: the first skb in the list
 271  * @left_pkt_len: left size of the pkt exclude head skb
 272  * @ci: consumer index
 273  *
 274  * Return number of wqes that used for the left of the pkt
 275  **/
 276 static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
 277                              unsigned int left_pkt_len, u16 ci)
 278 {
 279         struct sk_buff *skb, *curr_skb = head_skb;
 280         struct hinic_rq_wqe *rq_wqe;
 281         unsigned int curr_len;
 282         struct hinic_sge sge;
 283         int num_wqes = 0;
 284 
 285         while (left_pkt_len > 0) {
 286                 rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
 287                                                 &skb, &ci);
 288 
 289                 num_wqes++;
 290 
 291                 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
 292 
 293                 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
 294 
 295                 prefetch(skb->data);
 296 
 297                 curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
 298                             left_pkt_len;
 299 
 300                 left_pkt_len -= curr_len;
 301 
 302                 __skb_put(skb, curr_len);
 303 
 304                 if (curr_skb == head_skb)
 305                         skb_shinfo(head_skb)->frag_list = skb;
 306                 else
 307                         curr_skb->next = skb;
 308 
 309                 head_skb->len += skb->len;
 310                 head_skb->data_len += skb->len;
 311                 head_skb->truesize += skb->truesize;
 312 
 313                 curr_skb = skb;
 314         }
 315 
 316         return num_wqes;
 317 }
 318 
 319 /**
 320  * rxq_recv - Rx handler
 321  * @rxq: rx queue
 322  * @budget: maximum pkts to process
 323  *
 324  * Return number of pkts received
 325  **/
 326 static int rxq_recv(struct hinic_rxq *rxq, int budget)
 327 {
 328         struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
 329         struct net_device *netdev = rxq->netdev;
 330         u64 pkt_len = 0, rx_bytes = 0;
 331         struct hinic_rq *rq = rxq->rq;
 332         struct hinic_rq_wqe *rq_wqe;
 333         unsigned int free_wqebbs;
 334         struct hinic_rq_cqe *cqe;
 335         int num_wqes, pkts = 0;
 336         struct hinic_sge sge;
 337         unsigned int status;
 338         struct sk_buff *skb;
 339         u32 offload_type;
 340         u16 ci, num_lro;
 341         u16 num_wqe = 0;
 342         u32 vlan_len;
 343         u16 vid;
 344 
 345         while (pkts < budget) {
 346                 num_wqes = 0;
 347 
 348                 rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
 349                                            &ci);
 350                 if (!rq_wqe)
 351                         break;
 352 
 353                 /* make sure we read rx_done before packet length */
 354                 dma_rmb();
 355 
 356                 cqe = rq->cqe[ci];
 357                 status =  be32_to_cpu(cqe->status);
 358                 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
 359 
 360                 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
 361 
 362                 rx_csum(rxq, status, skb);
 363 
 364                 prefetch(skb->data);
 365 
 366                 pkt_len = sge.len;
 367 
 368                 if (pkt_len <= HINIC_RX_BUF_SZ) {
 369                         __skb_put(skb, pkt_len);
 370                 } else {
 371                         __skb_put(skb, HINIC_RX_BUF_SZ);
 372                         num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
 373                                                      HINIC_RX_BUF_SZ, ci);
 374                 }
 375 
 376                 hinic_rq_put_wqe(rq, ci,
 377                                  (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
 378 
 379                 offload_type = be32_to_cpu(cqe->offload_type);
 380                 vlan_len = be32_to_cpu(cqe->len);
 381                 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 382                     HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
 383                         vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
 384                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 385                 }
 386 
 387                 skb_record_rx_queue(skb, qp->q_id);
 388                 skb->protocol = eth_type_trans(skb, rxq->netdev);
 389 
 390                 napi_gro_receive(&rxq->napi, skb);
 391 
 392                 pkts++;
 393                 rx_bytes += pkt_len;
 394 
 395                 num_lro = HINIC_GET_RX_NUM_LRO(status);
 396                 if (num_lro) {
 397                         rx_bytes += ((num_lro - 1) *
 398                                      LRO_PKT_HDR_LEN(cqe));
 399 
 400                         num_wqe +=
 401                         (u16)(pkt_len >> rxq->rx_buff_shift) +
 402                         ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
 403                 }
 404 
 405                 cqe->status = 0;
 406 
 407                 if (num_wqe >= LRO_REPLENISH_THLD)
 408                         break;
 409         }
 410 
 411         free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
 412         if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
 413                 rx_alloc_pkts(rxq);
 414 
 415         u64_stats_update_begin(&rxq->rxq_stats.syncp);
 416         rxq->rxq_stats.pkts += pkts;
 417         rxq->rxq_stats.bytes += rx_bytes;
 418         u64_stats_update_end(&rxq->rxq_stats.syncp);
 419 
 420         return pkts;
 421 }
 422 
 423 static int rx_poll(struct napi_struct *napi, int budget)
 424 {
 425         struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
 426         struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 427         struct hinic_rq *rq = rxq->rq;
 428         int pkts;
 429 
 430         pkts = rxq_recv(rxq, budget);
 431         if (pkts >= budget)
 432                 return budget;
 433 
 434         napi_complete(napi);
 435         hinic_hwdev_set_msix_state(nic_dev->hwdev,
 436                                    rq->msix_entry,
 437                                    HINIC_MSIX_ENABLE);
 438 
 439         return pkts;
 440 }
 441 
 442 static void rx_add_napi(struct hinic_rxq *rxq)
 443 {
 444         struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 445 
 446         netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
 447         napi_enable(&rxq->napi);
 448 }
 449 
 450 static void rx_del_napi(struct hinic_rxq *rxq)
 451 {
 452         napi_disable(&rxq->napi);
 453         netif_napi_del(&rxq->napi);
 454 }
 455 
 456 static irqreturn_t rx_irq(int irq, void *data)
 457 {
 458         struct hinic_rxq *rxq = (struct hinic_rxq *)data;
 459         struct hinic_rq *rq = rxq->rq;
 460         struct hinic_dev *nic_dev;
 461 
 462         /* Disable the interrupt until napi will be completed */
 463         nic_dev = netdev_priv(rxq->netdev);
 464         hinic_hwdev_set_msix_state(nic_dev->hwdev,
 465                                    rq->msix_entry,
 466                                    HINIC_MSIX_DISABLE);
 467 
 468         nic_dev = netdev_priv(rxq->netdev);
 469         hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
 470 
 471         napi_schedule(&rxq->napi);
 472         return IRQ_HANDLED;
 473 }
 474 
 475 static int rx_request_irq(struct hinic_rxq *rxq)
 476 {
 477         struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 478         struct hinic_hwdev *hwdev = nic_dev->hwdev;
 479         struct hinic_rq *rq = rxq->rq;
 480         struct hinic_qp *qp;
 481         int err;
 482 
 483         rx_add_napi(rxq);
 484 
 485         hinic_hwdev_msix_set(hwdev, rq->msix_entry,
 486                              RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
 487                              RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
 488                              RX_IRQ_NO_RESEND_TIMER);
 489 
 490         err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
 491         if (err) {
 492                 rx_del_napi(rxq);
 493                 return err;
 494         }
 495 
 496         qp = container_of(rq, struct hinic_qp, rq);
 497         cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
 498         return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
 499 }
 500 
 501 static void rx_free_irq(struct hinic_rxq *rxq)
 502 {
 503         struct hinic_rq *rq = rxq->rq;
 504 
 505         irq_set_affinity_hint(rq->irq, NULL);
 506         free_irq(rq->irq, rxq);
 507         rx_del_napi(rxq);
 508 }
 509 
 510 /**
 511  * hinic_init_rxq - Initialize the Rx Queue
 512  * @rxq: Logical Rx Queue
 513  * @rq: Hardware Rx Queue to connect the Logical queue with
 514  * @netdev: network device to connect the Logical queue with
 515  *
 516  * Return 0 - Success, negative - Failure
 517  **/
 518 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
 519                    struct net_device *netdev)
 520 {
 521         struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
 522         int err, pkts;
 523 
 524         rxq->netdev = netdev;
 525         rxq->rq = rq;
 526         rxq->buf_len = HINIC_RX_BUF_SZ;
 527         rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
 528 
 529         rxq_stats_init(rxq);
 530 
 531         rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
 532                                        "hinic_rxq%d", qp->q_id);
 533         if (!rxq->irq_name)
 534                 return -ENOMEM;
 535 
 536         pkts = rx_alloc_pkts(rxq);
 537         if (!pkts) {
 538                 err = -ENOMEM;
 539                 goto err_rx_pkts;
 540         }
 541 
 542         err = rx_request_irq(rxq);
 543         if (err) {
 544                 netdev_err(netdev, "Failed to request Rx irq\n");
 545                 goto err_req_rx_irq;
 546         }
 547 
 548         return 0;
 549 
 550 err_req_rx_irq:
 551 err_rx_pkts:
 552         free_all_rx_skbs(rxq);
 553         devm_kfree(&netdev->dev, rxq->irq_name);
 554         return err;
 555 }
 556 
 557 /**
 558  * hinic_clean_rxq - Clean the Rx Queue
 559  * @rxq: Logical Rx Queue
 560  **/
 561 void hinic_clean_rxq(struct hinic_rxq *rxq)
 562 {
 563         struct net_device *netdev = rxq->netdev;
 564 
 565         rx_free_irq(rxq);
 566 
 567         free_all_rx_skbs(rxq);
 568         devm_kfree(&netdev->dev, rxq->irq_name);
 569 }

/* [<][>][^][v][top][bottom][index][help] */