rx_ring 285 drivers/dma/xgene-dma.c struct xgene_dma_ring rx_ring; rx_ring 686 drivers/dma/xgene-dma.c struct xgene_dma_ring *ring = &chan->rx_ring; rx_ring 1177 drivers/dma/xgene-dma.c xgene_dma_delete_ring_one(&chan->rx_ring); rx_ring 1215 drivers/dma/xgene-dma.c struct xgene_dma_ring *rx_ring = &chan->rx_ring; rx_ring 1220 drivers/dma/xgene-dma.c rx_ring->owner = XGENE_DMA_RING_OWNER_CPU; rx_ring 1221 drivers/dma/xgene-dma.c rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id; rx_ring 1223 drivers/dma/xgene-dma.c ret = xgene_dma_create_ring_one(chan, rx_ring, rx_ring 1229 drivers/dma/xgene-dma.c rx_ring->id, rx_ring->num, rx_ring->desc_vaddr); rx_ring 1238 drivers/dma/xgene-dma.c xgene_dma_delete_ring_one(rx_ring); rx_ring 1242 drivers/dma/xgene-dma.c tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num); rx_ring 237 drivers/infiniband/ulp/ipoib/ipoib.h struct ipoib_cm_rx_buf *rx_ring; rx_ring 377 drivers/infiniband/ulp/ipoib/ipoib.h struct ipoib_rx_buf *rx_ring; rx_ring 127 drivers/infiniband/ulp/ipoib/ipoib_cm.c sge[i].addr = rx->rx_ring[id].mapping[i]; rx_ring 133 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx->rx_ring[id].mapping); rx_ring 134 drivers/infiniband/ulp/ipoib/ipoib_cm.c dev_kfree_skb_any(rx->rx_ring[id].skb); rx_ring 135 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx->rx_ring[id].skb = NULL; rx_ring 142 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_cm_rx_buf *rx_ring, rx_ring 181 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx_ring[id].skb = skb; rx_ring 196 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_cm_rx_buf *rx_ring) rx_ring 202 drivers/infiniband/ulp/ipoib/ipoib_cm.c if (rx_ring[i].skb) { rx_ring 204 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx_ring[i].mapping); rx_ring 205 drivers/infiniband/ulp/ipoib/ipoib_cm.c dev_kfree_skb_any(rx_ring[i].skb); rx_ring 208 drivers/infiniband/ulp/ipoib/ipoib_cm.c vfree(rx_ring); rx_ring 358 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx->rx_ring = vzalloc(array_size(ipoib_recvq_size, rx_ring 359 drivers/infiniband/ulp/ipoib/ipoib_cm.c sizeof(*rx->rx_ring))); rx_ring 360 drivers/infiniband/ulp/ipoib/ipoib_cm.c if (!rx->rx_ring) rx_ring 384 drivers/infiniband/ulp/ipoib/ipoib_cm.c if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1, rx_ring 385 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx->rx_ring[i].mapping, rx_ring 415 drivers/infiniband/ulp/ipoib/ipoib_cm.c ipoib_cm_free_rx_ring(dev, rx->rx_ring); rx_ring 563 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_cm_rx_buf *rx_ring; rx_ring 592 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; rx_ring 594 drivers/infiniband/ulp/ipoib/ipoib_cm.c skb = rx_ring[wr_id].skb; rx_ring 632 drivers/infiniband/ulp/ipoib/ipoib_cm.c ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], rx_ring 635 drivers/infiniband/ulp/ipoib/ipoib_cm.c ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0], rx_ring 647 drivers/infiniband/ulp/ipoib/ipoib_cm.c newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, rx_ring 659 drivers/infiniband/ulp/ipoib/ipoib_cm.c ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping); rx_ring 660 drivers/infiniband/ulp/ipoib/ipoib_cm.c memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping)); rx_ring 918 drivers/infiniband/ulp/ipoib/ipoib_cm.c ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring); rx_ring 107 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; rx_ring 108 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; rx_ring 114 drivers/infiniband/ulp/ipoib/ipoib_ib.c ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); rx_ring 115 drivers/infiniband/ulp/ipoib/ipoib_ib.c dev_kfree_skb_any(priv->rx_ring[id].skb); rx_ring 116 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_ring[id].skb = NULL; rx_ring 141 drivers/infiniband/ulp/ipoib/ipoib_ib.c mapping = priv->rx_ring[id].mapping; rx_ring 147 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_ring[id].skb = skb; rx_ring 191 drivers/infiniband/ulp/ipoib/ipoib_ib.c skb = priv->rx_ring[wr_id].skb; rx_ring 198 drivers/infiniband/ulp/ipoib/ipoib_ib.c ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); rx_ring 200 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_ring[wr_id].skb = NULL; rx_ring 204 drivers/infiniband/ulp/ipoib/ipoib_ib.c memcpy(mapping, priv->rx_ring[wr_id].mapping, rx_ring 730 drivers/infiniband/ulp/ipoib/ipoib_ib.c if (priv->rx_ring[i].skb) rx_ring 820 drivers/infiniband/ulp/ipoib/ipoib_ib.c rx_req = &priv->rx_ring[i]; rx_ring 824 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_ring[i].mapping); rx_ring 1682 drivers/infiniband/ulp/ipoib/ipoib_main.c kfree(priv->rx_ring); rx_ring 1685 drivers/infiniband/ulp/ipoib/ipoib_main.c priv->rx_ring = NULL; rx_ring 1696 drivers/infiniband/ulp/ipoib/ipoib_main.c priv->rx_ring = kcalloc(ipoib_recvq_size, rx_ring 1697 drivers/infiniband/ulp/ipoib/ipoib_main.c sizeof(*priv->rx_ring), rx_ring 1699 drivers/infiniband/ulp/ipoib/ipoib_main.c if (!priv->rx_ring) rx_ring 1729 drivers/infiniband/ulp/ipoib/ipoib_main.c kfree(priv->rx_ring); rx_ring 722 drivers/infiniband/ulp/srp/ib_srp.c if (ch->rx_ring) { rx_ring 724 drivers/infiniband/ulp/srp/ib_srp.c srp_free_iu(target->srp_host, ch->rx_ring[i]); rx_ring 725 drivers/infiniband/ulp/srp/ib_srp.c kfree(ch->rx_ring); rx_ring 726 drivers/infiniband/ulp/srp/ib_srp.c ch->rx_ring = NULL; rx_ring 2454 drivers/infiniband/ulp/srp/ib_srp.c ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), rx_ring 2456 drivers/infiniband/ulp/srp/ib_srp.c if (!ch->rx_ring) rx_ring 2464 drivers/infiniband/ulp/srp/ib_srp.c ch->rx_ring[i] = srp_alloc_iu(target->srp_host, rx_ring 2467 drivers/infiniband/ulp/srp/ib_srp.c if (!ch->rx_ring[i]) rx_ring 2485 drivers/infiniband/ulp/srp/ib_srp.c srp_free_iu(target->srp_host, ch->rx_ring[i]); rx_ring 2493 drivers/infiniband/ulp/srp/ib_srp.c kfree(ch->rx_ring); rx_ring 2494 drivers/infiniband/ulp/srp/ib_srp.c ch->rx_ring = NULL; rx_ring 2567 drivers/infiniband/ulp/srp/ib_srp.c if (!ch->rx_ring) { rx_ring 2574 drivers/infiniband/ulp/srp/ib_srp.c struct srp_iu *iu = ch->rx_ring[i]; rx_ring 186 drivers/infiniband/ulp/srp/ib_srp.h struct srp_iu **rx_ring; rx_ring 305 drivers/net/ethernet/3com/3c515.c struct boom_rx_desc rx_ring[RX_RING_SIZE]; rx_ring 823 drivers/net/ethernet/3com/3c515.c vp->rx_ring[i].next = rx_ring 824 drivers/net/ethernet/3com/3c515.c isa_virt_to_bus(&vp->rx_ring[i + 1]); rx_ring 826 drivers/net/ethernet/3com/3c515.c vp->rx_ring[i].next = 0; rx_ring 827 drivers/net/ethernet/3com/3c515.c vp->rx_ring[i].status = 0; /* Clear complete bit. */ rx_ring 828 drivers/net/ethernet/3com/3c515.c vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000; rx_ring 834 drivers/net/ethernet/3com/3c515.c vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); rx_ring 837 drivers/net/ethernet/3com/3c515.c vp->rx_ring[i - 1].next = rx_ring 838 drivers/net/ethernet/3com/3c515.c isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ rx_ring 839 drivers/net/ethernet/3com/3c515.c outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); rx_ring 1340 drivers/net/ethernet/3com/3c515.c while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) { rx_ring 1374 drivers/net/ethernet/3com/3c515.c isa_bus_to_virt(vp->rx_ring[entry].addr), rx_ring 1384 drivers/net/ethernet/3com/3c515.c if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp) rx_ring 1387 drivers/net/ethernet/3com/3c515.c isa_bus_to_virt(vp->rx_ring[entry].addr), rx_ring 1406 drivers/net/ethernet/3com/3c515.c vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data); rx_ring 1409 drivers/net/ethernet/3com/3c515.c vp->rx_ring[entry].status = 0; /* Clear complete bit. */ rx_ring 597 drivers/net/ethernet/3com/3c59x.c struct boom_rx_desc* rx_ring; rx_ring 1210 drivers/net/ethernet/3com/3c59x.c vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE rx_ring 1214 drivers/net/ethernet/3com/3c59x.c if (!vp->rx_ring) rx_ring 1217 drivers/net/ethernet/3com/3c59x.c vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); rx_ring 1477 drivers/net/ethernet/3com/3c59x.c vp->rx_ring, vp->rx_ring_dma); rx_ring 1682 drivers/net/ethernet/3com/3c59x.c vp->rx_ring[i].status = 0; rx_ring 1736 drivers/net/ethernet/3com/3c59x.c vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); rx_ring 1737 drivers/net/ethernet/3com/3c59x.c vp->rx_ring[i].status = 0; /* Clear complete bit. */ rx_ring 1738 drivers/net/ethernet/3com/3c59x.c vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); rx_ring 1751 drivers/net/ethernet/3com/3c59x.c vp->rx_ring[i].addr = cpu_to_le32(dma); rx_ring 1759 drivers/net/ethernet/3com/3c59x.c vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); rx_ring 2600 drivers/net/ethernet/3com/3c59x.c while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ rx_ring 2618 drivers/net/ethernet/3com/3c59x.c dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); rx_ring 2656 drivers/net/ethernet/3com/3c59x.c vp->rx_ring[entry].addr = cpu_to_le32(newdma); rx_ring 2676 drivers/net/ethernet/3com/3c59x.c vp->rx_ring[entry].status = 0; /* Clear complete bit. */ rx_ring 2755 drivers/net/ethernet/3com/3c59x.c dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr), rx_ring 3285 drivers/net/ethernet/3com/3c59x.c vp->rx_ring, vp->rx_ring_dma); rx_ring 533 drivers/net/ethernet/adaptec/starfire.c struct starfire_rx_desc *rx_ring; rx_ring 916 drivers/net/ethernet/adaptec/starfire.c np->rx_ring = (void *) np->tx_ring + tx_ring_size; rx_ring 1162 drivers/net/ethernet/adaptec/starfire.c np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); rx_ring 1169 drivers/net/ethernet/adaptec/starfire.c np->rx_ring[i].rxaddr = 0; rx_ring 1174 drivers/net/ethernet/adaptec/starfire.c np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing); rx_ring 1613 drivers/net/ethernet/adaptec/starfire.c np->rx_ring[entry].rxaddr = rx_ring 1617 drivers/net/ethernet/adaptec/starfire.c np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing); rx_ring 1972 drivers/net/ethernet/adaptec/starfire.c i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status)); rx_ring 1980 drivers/net/ethernet/adaptec/starfire.c np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */ rx_ring 492 drivers/net/ethernet/agere/et131x.c struct rx_ring rx_ring; rx_ring 740 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_ring = &adapter->rx_ring; rx_ring 742 drivers/net/ethernet/agere/et131x.c if (rx_ring->fbr[1]->buffsize == 4096) rx_ring 744 drivers/net/ethernet/agere/et131x.c else if (rx_ring->fbr[1]->buffsize == 8192) rx_ring 746 drivers/net/ethernet/agere/et131x.c else if (rx_ring->fbr[1]->buffsize == 16384) rx_ring 750 drivers/net/ethernet/agere/et131x.c if (rx_ring->fbr[0]->buffsize == 256) rx_ring 752 drivers/net/ethernet/agere/et131x.c else if (rx_ring->fbr[0]->buffsize == 512) rx_ring 754 drivers/net/ethernet/agere/et131x.c else if (rx_ring->fbr[0]->buffsize == 1024) rx_ring 1543 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_local = &adapter->rx_ring; rx_ring 1869 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_ring = &adapter->rx_ring; rx_ring 1873 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL); rx_ring 1874 drivers/net/ethernet/agere/et131x.c if (rx_ring->fbr[0] == NULL) rx_ring 1876 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL); rx_ring 1877 drivers/net/ethernet/agere/et131x.c if (rx_ring->fbr[1] == NULL) rx_ring 1898 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[0]->buffsize = 256; rx_ring 1899 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[0]->num_entries = 512; rx_ring 1900 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[1]->buffsize = 2048; rx_ring 1901 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[1]->num_entries = 512; rx_ring 1903 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[0]->buffsize = 512; rx_ring 1904 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[0]->num_entries = 1024; rx_ring 1905 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[1]->buffsize = 4096; rx_ring 1906 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[1]->num_entries = 512; rx_ring 1908 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[0]->buffsize = 1024; rx_ring 1909 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[0]->num_entries = 768; rx_ring 1910 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[1]->buffsize = 16384; rx_ring 1911 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[1]->num_entries = 128; rx_ring 1914 drivers/net/ethernet/agere/et131x.c rx_ring->psr_entries = rx_ring->fbr[0]->num_entries + rx_ring 1915 drivers/net/ethernet/agere/et131x.c rx_ring->fbr[1]->num_entries; rx_ring 1918 drivers/net/ethernet/agere/et131x.c fbr = rx_ring->fbr[id]; rx_ring 1934 drivers/net/ethernet/agere/et131x.c fbr = rx_ring->fbr[id]; rx_ring 1974 drivers/net/ethernet/agere/et131x.c psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; rx_ring 1976 drivers/net/ethernet/agere/et131x.c rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, rx_ring 1978 drivers/net/ethernet/agere/et131x.c &rx_ring->ps_ring_physaddr, rx_ring 1981 drivers/net/ethernet/agere/et131x.c if (!rx_ring->ps_ring_virtaddr) { rx_ring 1988 drivers/net/ethernet/agere/et131x.c rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, rx_ring 1990 drivers/net/ethernet/agere/et131x.c &rx_ring->rx_status_bus, rx_ring 1992 drivers/net/ethernet/agere/et131x.c if (!rx_ring->rx_status_block) { rx_ring 1997 drivers/net/ethernet/agere/et131x.c rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; rx_ring 2002 drivers/net/ethernet/agere/et131x.c INIT_LIST_HEAD(&rx_ring->recv_list); rx_ring 2013 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_ring = &adapter->rx_ring; rx_ring 2017 drivers/net/ethernet/agere/et131x.c WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); rx_ring 2019 drivers/net/ethernet/agere/et131x.c while (!list_empty(&rx_ring->recv_list)) { rx_ring 2020 drivers/net/ethernet/agere/et131x.c rfd = list_entry(rx_ring->recv_list.next, rx_ring 2030 drivers/net/ethernet/agere/et131x.c fbr = rx_ring->fbr[id]; rx_ring 2060 drivers/net/ethernet/agere/et131x.c if (rx_ring->ps_ring_virtaddr) { rx_ring 2061 drivers/net/ethernet/agere/et131x.c psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; rx_ring 2064 drivers/net/ethernet/agere/et131x.c rx_ring->ps_ring_virtaddr, rx_ring 2065 drivers/net/ethernet/agere/et131x.c rx_ring->ps_ring_physaddr); rx_ring 2067 drivers/net/ethernet/agere/et131x.c rx_ring->ps_ring_virtaddr = NULL; rx_ring 2071 drivers/net/ethernet/agere/et131x.c if (rx_ring->rx_status_block) { rx_ring 2074 drivers/net/ethernet/agere/et131x.c rx_ring->rx_status_block, rx_ring 2075 drivers/net/ethernet/agere/et131x.c rx_ring->rx_status_bus); rx_ring 2076 drivers/net/ethernet/agere/et131x.c rx_ring->rx_status_block = NULL; rx_ring 2080 drivers/net/ethernet/agere/et131x.c kfree(rx_ring->fbr[0]); rx_ring 2081 drivers/net/ethernet/agere/et131x.c kfree(rx_ring->fbr[1]); rx_ring 2084 drivers/net/ethernet/agere/et131x.c rx_ring->num_ready_recv = 0; rx_ring 2092 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_ring = &adapter->rx_ring; rx_ring 2095 drivers/net/ethernet/agere/et131x.c for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { rx_ring 2103 drivers/net/ethernet/agere/et131x.c list_add_tail(&rfd->list_node, &rx_ring->recv_list); rx_ring 2106 drivers/net/ethernet/agere/et131x.c rx_ring->num_ready_recv++; rx_ring 2129 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_local = &adapter->rx_ring; rx_ring 2188 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_local = &adapter->rx_ring; rx_ring 2302 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_ring = &adapter->rx_ring; rx_ring 2309 drivers/net/ethernet/agere/et131x.c if (list_empty(&rx_ring->recv_list)) { rx_ring 2310 drivers/net/ethernet/agere/et131x.c WARN_ON(rx_ring->num_ready_recv != 0); rx_ring 2332 drivers/net/ethernet/agere/et131x.c if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK) rx_ring 2339 drivers/net/ethernet/agere/et131x.c rx_ring->unfinished_receives = true; rx_ring 2344 drivers/net/ethernet/agere/et131x.c rx_ring->unfinished_receives = false; rx_ring 3369 drivers/net/ethernet/agere/et131x.c struct rx_ring *rx_ring = &adapter->rx_ring; rx_ring 3403 drivers/net/ethernet/agere/et131x.c if (rx_ring->unfinished_receives) rx_ring 427 drivers/net/ethernet/altera/altera_tse.h struct tse_buffer *rx_ring; rx_ring 277 drivers/net/ethernet/altera/altera_tse_main.c priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer), rx_ring 279 drivers/net/ethernet/altera/altera_tse_main.c if (!priv->rx_ring) rx_ring 293 drivers/net/ethernet/altera/altera_tse_main.c ret = tse_init_rx_buffer(priv, &priv->rx_ring[i], rx_ring 305 drivers/net/ethernet/altera/altera_tse_main.c tse_free_rx_buffer(priv, &priv->rx_ring[i]); rx_ring 308 drivers/net/ethernet/altera/altera_tse_main.c kfree(priv->rx_ring); rx_ring 322 drivers/net/ethernet/altera/altera_tse_main.c tse_free_rx_buffer(priv, &priv->rx_ring[i]); rx_ring 341 drivers/net/ethernet/altera/altera_tse_main.c if (likely(priv->rx_ring[entry].skb == NULL)) { rx_ring 342 drivers/net/ethernet/altera/altera_tse_main.c ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry], rx_ring 346 drivers/net/ethernet/altera/altera_tse_main.c priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]); rx_ring 402 drivers/net/ethernet/altera/altera_tse_main.c skb = priv->rx_ring[entry].skb; rx_ring 410 drivers/net/ethernet/altera/altera_tse_main.c priv->rx_ring[entry].skb = NULL; rx_ring 414 drivers/net/ethernet/altera/altera_tse_main.c dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, rx_ring 415 drivers/net/ethernet/altera/altera_tse_main.c priv->rx_ring[entry].len, DMA_FROM_DEVICE); rx_ring 1231 drivers/net/ethernet/altera/altera_tse_main.c priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]); rx_ring 150 drivers/net/ethernet/amazon/ena/ena_ethtool.c ring = &adapter->rx_ring[i]; rx_ring 347 drivers/net/ethernet/amazon/ena/ena_ethtool.c adapter->rx_ring[i].smoothed_interval = val; rx_ring 420 drivers/net/ethernet/amazon/ena/ena_ethtool.c ring->rx_pending = adapter->rx_ring[0].ring_size; rx_ring 105 drivers/net/ethernet/amazon/ena/ena_netdev.c adapter->rx_ring[i].mtu = mtu; rx_ring 177 drivers/net/ethernet/amazon/ena/ena_netdev.c rxr = &adapter->rx_ring[i]; rx_ring 329 drivers/net/ethernet/amazon/ena/ena_netdev.c static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id) rx_ring 331 drivers/net/ethernet/amazon/ena/ena_netdev.c if (likely(req_id < rx_ring->ring_size)) rx_ring 334 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, rx_ring 337 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 338 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.bad_req_id++; rx_ring 339 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 342 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; rx_ring 343 drivers/net/ethernet/amazon/ena/ena_netdev.c set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags); rx_ring 356 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring = &adapter->rx_ring[qid]; rx_ring 360 drivers/net/ethernet/amazon/ena/ena_netdev.c if (rx_ring->rx_buffer_info) { rx_ring 369 drivers/net/ethernet/amazon/ena/ena_netdev.c size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); rx_ring 372 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_buffer_info = vzalloc_node(size, node); rx_ring 373 drivers/net/ethernet/amazon/ena/ena_netdev.c if (!rx_ring->rx_buffer_info) { rx_ring 374 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_buffer_info = vzalloc(size); rx_ring 375 drivers/net/ethernet/amazon/ena/ena_netdev.c if (!rx_ring->rx_buffer_info) rx_ring 379 drivers/net/ethernet/amazon/ena/ena_netdev.c size = sizeof(u16) * rx_ring->ring_size; rx_ring 380 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->free_ids = vzalloc_node(size, node); rx_ring 381 drivers/net/ethernet/amazon/ena/ena_netdev.c if (!rx_ring->free_ids) { rx_ring 382 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->free_ids = vzalloc(size); rx_ring 383 drivers/net/ethernet/amazon/ena/ena_netdev.c if (!rx_ring->free_ids) { rx_ring 384 drivers/net/ethernet/amazon/ena/ena_netdev.c vfree(rx_ring->rx_buffer_info); rx_ring 385 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_buffer_info = NULL; rx_ring 391 drivers/net/ethernet/amazon/ena/ena_netdev.c for (i = 0; i < rx_ring->ring_size; i++) rx_ring 392 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->free_ids[i] = i; rx_ring 395 drivers/net/ethernet/amazon/ena/ena_netdev.c memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); rx_ring 397 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->next_to_clean = 0; rx_ring 398 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->next_to_use = 0; rx_ring 399 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->cpu = ena_irq->cpu; rx_ring 413 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring = &adapter->rx_ring[qid]; rx_ring 415 drivers/net/ethernet/amazon/ena/ena_netdev.c vfree(rx_ring->rx_buffer_info); rx_ring 416 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_buffer_info = NULL; rx_ring 418 drivers/net/ethernet/amazon/ena/ena_netdev.c vfree(rx_ring->free_ids); rx_ring 419 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->free_ids = NULL; rx_ring 463 drivers/net/ethernet/amazon/ena/ena_netdev.c static int ena_alloc_rx_page(struct ena_ring *rx_ring, rx_ring 476 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 477 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.page_alloc_fail++; rx_ring 478 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 482 drivers/net/ethernet/amazon/ena/ena_netdev.c dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, rx_ring 484 drivers/net/ethernet/amazon/ena/ena_netdev.c if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { rx_ring 485 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 486 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.dma_mapping_err++; rx_ring 487 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 492 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, rx_ring 504 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_free_rx_page(struct ena_ring *rx_ring, rx_ring 511 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, rx_ring 516 drivers/net/ethernet/amazon/ena/ena_netdev.c dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE, rx_ring 523 drivers/net/ethernet/amazon/ena/ena_netdev.c static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) rx_ring 529 drivers/net/ethernet/amazon/ena/ena_netdev.c next_to_use = rx_ring->next_to_use; rx_ring 534 drivers/net/ethernet/amazon/ena/ena_netdev.c req_id = rx_ring->free_ids[next_to_use]; rx_ring 535 drivers/net/ethernet/amazon/ena/ena_netdev.c rc = validate_rx_req_id(rx_ring, req_id); rx_ring 539 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_info = &rx_ring->rx_buffer_info[req_id]; rx_ring 542 drivers/net/ethernet/amazon/ena/ena_netdev.c rc = ena_alloc_rx_page(rx_ring, rx_info, rx_ring 545 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, rx_ring 547 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->qid); rx_ring 550 drivers/net/ethernet/amazon/ena/ena_netdev.c rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, rx_ring 554 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, rx_ring 556 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->qid); rx_ring 560 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->ring_size); rx_ring 564 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 565 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.refil_partial++; rx_ring 566 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 567 drivers/net/ethernet/amazon/ena/ena_netdev.c netdev_warn(rx_ring->netdev, rx_ring 569 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->qid, i, num); rx_ring 574 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); rx_ring 576 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->next_to_use = next_to_use; rx_ring 584 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring = &adapter->rx_ring[qid]; rx_ring 587 drivers/net/ethernet/amazon/ena/ena_netdev.c for (i = 0; i < rx_ring->ring_size; i++) { rx_ring 588 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; rx_ring 591 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_free_rx_page(rx_ring, rx_info); rx_ring 600 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring; rx_ring 604 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring = &adapter->rx_ring[i]; rx_ring 605 drivers/net/ethernet/amazon/ena/ena_netdev.c bufs_num = rx_ring->ring_size - 1; rx_ring 606 drivers/net/ethernet/amazon/ena/ena_netdev.c rc = ena_refill_rx_bufs(rx_ring, bufs_num); rx_ring 609 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, rx_ring 840 drivers/net/ethernet/amazon/ena/ena_netdev.c static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) rx_ring 845 drivers/net/ethernet/amazon/ena/ena_netdev.c skb = napi_get_frags(rx_ring->napi); rx_ring 847 drivers/net/ethernet/amazon/ena/ena_netdev.c skb = netdev_alloc_skb_ip_align(rx_ring->netdev, rx_ring 848 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_copybreak); rx_ring 851 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 852 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.skb_alloc_fail++; rx_ring 853 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 854 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, rx_ring 862 drivers/net/ethernet/amazon/ena/ena_netdev.c static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, rx_ring 874 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_info = &rx_ring->rx_buffer_info[req_id]; rx_ring 877 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, rx_ring 882 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, rx_ring 890 drivers/net/ethernet/amazon/ena/ena_netdev.c if (len <= rx_ring->rx_copybreak) { rx_ring 891 drivers/net/ethernet/amazon/ena/ena_netdev.c skb = ena_alloc_skb(rx_ring, false); rx_ring 895 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, rx_ring 900 drivers/net/ethernet/amazon/ena/ena_netdev.c dma_sync_single_for_cpu(rx_ring->dev, rx_ring 905 drivers/net/ethernet/amazon/ena/ena_netdev.c dma_sync_single_for_device(rx_ring->dev, rx_ring 911 drivers/net/ethernet/amazon/ena/ena_netdev.c skb->protocol = eth_type_trans(skb, rx_ring->netdev); rx_ring 912 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->free_ids[*next_to_clean] = req_id; rx_ring 914 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->ring_size); rx_ring 918 drivers/net/ethernet/amazon/ena/ena_netdev.c skb = ena_alloc_skb(rx_ring, true); rx_ring 923 drivers/net/ethernet/amazon/ena/ena_netdev.c dma_unmap_page(rx_ring->dev, rx_ring 930 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, rx_ring 936 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->free_ids[*next_to_clean] = req_id; rx_ring 939 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->ring_size); rx_ring 946 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_info = &rx_ring->rx_buffer_info[req_id]; rx_ring 957 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_rx_checksum(struct ena_ring *rx_ring, rx_ring 962 drivers/net/ethernet/amazon/ena/ena_netdev.c if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { rx_ring 978 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 979 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.bad_csum++; rx_ring 980 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 981 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, rx_ring 991 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 992 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.bad_csum++; rx_ring 993 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 994 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, rx_ring 1002 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 1003 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.csum_good++; rx_ring 1004 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 1006 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 1007 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.csum_unchecked++; rx_ring 1008 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 1018 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_set_rx_hash(struct ena_ring *rx_ring, rx_ring 1024 drivers/net/ethernet/amazon/ena/ena_netdev.c if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { rx_ring 1047 drivers/net/ethernet/amazon/ena/ena_netdev.c static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, rx_ring 1050 drivers/net/ethernet/amazon/ena/ena_netdev.c u16 next_to_clean = rx_ring->next_to_clean; rx_ring 1063 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, rx_ring 1064 drivers/net/ethernet/amazon/ena/ena_netdev.c "%s qid %d\n", __func__, rx_ring->qid); rx_ring 1068 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; rx_ring 1069 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_rx_ctx.max_bufs = rx_ring->sgl_size; rx_ring 1071 drivers/net/ethernet/amazon/ena/ena_netdev.c rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, rx_ring 1072 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->ena_com_io_sq, rx_ring 1080 drivers/net/ethernet/amazon/ena/ena_netdev.c netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, rx_ring 1082 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, rx_ring 1086 drivers/net/ethernet/amazon/ena/ena_netdev.c skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, rx_ring 1092 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->free_ids[next_to_clean] = rx_ring 1093 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->ena_bufs[i].req_id; rx_ring 1096 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->ring_size); rx_ring 1101 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); rx_ring 1103 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); rx_ring 1105 drivers/net/ethernet/amazon/ena/ena_netdev.c skb_record_rx_queue(skb, rx_ring->qid); rx_ring 1107 drivers/net/ethernet/amazon/ena/ena_netdev.c if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { rx_ring 1108 drivers/net/ethernet/amazon/ena/ena_netdev.c total_len += rx_ring->ena_bufs[0].len; rx_ring 1120 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->per_napi_packets += work_done; rx_ring 1121 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 1122 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.bytes += total_len; rx_ring 1123 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.cnt += work_done; rx_ring 1124 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; rx_ring 1125 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 1127 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->next_to_clean = next_to_clean; rx_ring 1129 drivers/net/ethernet/amazon/ena/ena_netdev.c refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); rx_ring 1131 drivers/net/ethernet/amazon/ena/ena_netdev.c min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, rx_ring 1136 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); rx_ring 1137 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_refill_rx_bufs(rx_ring, refill_required); rx_ring 1143 drivers/net/ethernet/amazon/ena/ena_netdev.c adapter = netdev_priv(rx_ring->netdev); rx_ring 1145 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 1146 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.bad_desc_num++; rx_ring 1147 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 1163 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_napi->rx_ring->smoothed_interval = cur_moder.usec; rx_ring 1170 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring = ena_napi->rx_ring; rx_ring 1172 drivers/net/ethernet/amazon/ena/ena_netdev.c if (!rx_ring->per_napi_packets) rx_ring 1175 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->non_empty_napi_events++; rx_ring 1177 drivers/net/ethernet/amazon/ena/ena_netdev.c dim_update_sample(rx_ring->non_empty_napi_events, rx_ring 1178 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.cnt, rx_ring 1179 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.bytes, rx_ring 1184 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->per_napi_packets = 0; rx_ring 1188 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring) rx_ring 1191 drivers/net/ethernet/amazon/ena/ena_netdev.c u32 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? rx_ring 1192 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->smoothed_interval : rx_ring 1193 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); rx_ring 1207 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); rx_ring 1211 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring) rx_ring 1225 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); rx_ring 1229 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->cpu = cpu; rx_ring 1239 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *tx_ring, *rx_ring; rx_ring 1248 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring = ena_napi->rx_ring; rx_ring 1263 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); rx_ring 1283 drivers/net/ethernet/amazon/ena/ena_netdev.c if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) rx_ring 1286 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_unmask_interrupt(tx_ring, rx_ring); rx_ring 1289 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_update_ring_numa_node(tx_ring, rx_ring); rx_ring 1326 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_napi->rx_ring->first_interrupt = true; rx_ring 1552 drivers/net/ethernet/amazon/ena/ena_netdev.c napi->rx_ring = &adapter->rx_ring[i]; rx_ring 1699 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring; rx_ring 1706 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring = &adapter->rx_ring[qid]; rx_ring 1716 drivers/net/ethernet/amazon/ena/ena_netdev.c ctx.queue_size = rx_ring->ring_size; rx_ring 1717 drivers/net/ethernet/amazon/ena/ena_netdev.c ctx.numa_node = cpu_to_node(rx_ring->cpu); rx_ring 1728 drivers/net/ethernet/amazon/ena/ena_netdev.c &rx_ring->ena_com_io_sq, rx_ring 1729 drivers/net/ethernet/amazon/ena/ena_netdev.c &rx_ring->ena_com_io_cq); rx_ring 1738 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); rx_ring 1773 drivers/net/ethernet/amazon/ena/ena_netdev.c adapter->rx_ring[i].ring_size = new_rx_size; rx_ring 1837 drivers/net/ethernet/amazon/ena/ena_netdev.c cur_rx_ring_size = adapter->rx_ring[0].ring_size; rx_ring 1911 drivers/net/ethernet/amazon/ena/ena_netdev.c &adapter->rx_ring[i]); rx_ring 2494 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring, *tx_ring; rx_ring 2516 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring = &adapter->rx_ring[i]; rx_ring 2519 drivers/net/ethernet/amazon/ena/ena_netdev.c start = u64_stats_fetch_begin_irq(&rx_ring->syncp); rx_ring 2520 drivers/net/ethernet/amazon/ena/ena_netdev.c packets = rx_ring->rx_stats.cnt; rx_ring 2521 drivers/net/ethernet/amazon/ena/ena_netdev.c bytes = rx_ring->rx_stats.bytes; rx_ring 2522 drivers/net/ethernet/amazon/ena/ena_netdev.c } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); rx_ring 2852 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring) rx_ring 2854 drivers/net/ethernet/amazon/ena/ena_netdev.c if (likely(rx_ring->first_interrupt)) rx_ring 2857 drivers/net/ethernet/amazon/ena/ena_netdev.c if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) rx_ring 2860 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->no_interrupt_event_cnt++; rx_ring 2862 drivers/net/ethernet/amazon/ena/ena_netdev.c if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { rx_ring 2865 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->qid); rx_ring 2938 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring; rx_ring 2957 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring = &adapter->rx_ring[i]; rx_ring 2963 drivers/net/ethernet/amazon/ena/ena_netdev.c rc = check_for_rx_interrupt_queue(adapter, rx_ring); rx_ring 2993 drivers/net/ethernet/amazon/ena/ena_netdev.c struct ena_ring *rx_ring; rx_ring 3003 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring = &adapter->rx_ring[i]; rx_ring 3006 drivers/net/ethernet/amazon/ena/ena_netdev.c ena_com_free_desc(rx_ring->ena_com_io_sq); rx_ring 3007 drivers/net/ethernet/amazon/ena/ena_netdev.c if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring 3008 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->empty_rx_queue++; rx_ring 3010 drivers/net/ethernet/amazon/ena/ena_netdev.c if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { rx_ring 3011 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 3012 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->rx_stats.empty_rx_ring++; rx_ring 3013 drivers/net/ethernet/amazon/ena/ena_netdev.c u64_stats_update_end(&rx_ring->syncp); rx_ring 3018 drivers/net/ethernet/amazon/ena/ena_netdev.c napi_schedule(rx_ring->napi); rx_ring 3019 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->empty_rx_queue = 0; rx_ring 3022 drivers/net/ethernet/amazon/ena/ena_netdev.c rx_ring->empty_rx_queue = 0; rx_ring 157 drivers/net/ethernet/amazon/ena/ena_netdev.h struct ena_ring *rx_ring; rx_ring 359 drivers/net/ethernet/amazon/ena/ena_netdev.h struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES] rx_ring 298 drivers/net/ethernet/amd/amd8111e.c if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, rx_ring 325 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]); rx_ring 326 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2); rx_ring 328 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT); rx_ring 343 drivers/net/ethernet/amd/amd8111e.c sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring, rx_ring 615 drivers/net/ethernet/amd/amd8111e.c if(lp->rx_ring){ rx_ring 618 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring, lp->rx_ring_dma_addr); rx_ring 619 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring = NULL; rx_ring 692 drivers/net/ethernet/amd/amd8111e.c status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); rx_ring 704 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; rx_ring 710 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; rx_ring 713 drivers/net/ethernet/amd/amd8111e.c pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; rx_ring 725 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; rx_ring 734 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; rx_ring 754 drivers/net/ethernet/amd/amd8111e.c u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); rx_ring 765 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[rx_index].buff_phy_addr rx_ring 767 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[rx_index].buff_count = rx_ring 770 drivers/net/ethernet/amd/amd8111e.c lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); rx_ring 737 drivers/net/ethernet/amd/amd8111e.h struct amd8111e_rx_dr* rx_ring; rx_ring 89 drivers/net/ethernet/amd/ariadne.c volatile struct RDRE *rx_ring[RX_RING_SIZE]; rx_ring 101 drivers/net/ethernet/amd/ariadne.c struct RDRE rx_ring[RX_RING_SIZE]; rx_ring 146 drivers/net/ethernet/amd/ariadne.c volatile struct RDRE *r = &lancedata->rx_ring[i]; rx_ring 154 drivers/net/ethernet/amd/ariadne.c priv->rx_ring[i] = &lancedata->rx_ring[i]; rx_ring 157 drivers/net/ethernet/amd/ariadne.c i, &lancedata->rx_ring[i], lancedata->rx_buff[i]); rx_ring 168 drivers/net/ethernet/amd/ariadne.c while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) { rx_ring 169 drivers/net/ethernet/amd/ariadne.c int status = lowb(priv->rx_ring[entry]->RMD1); rx_ring 189 drivers/net/ethernet/amd/ariadne.c priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP; rx_ring 192 drivers/net/ethernet/amd/ariadne.c short pkt_len = swapw(priv->rx_ring[entry]->RMD3); rx_ring 198 drivers/net/ethernet/amd/ariadne.c if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) rx_ring 203 drivers/net/ethernet/amd/ariadne.c priv->rx_ring[entry]->RMD1 |= RF_OWN; rx_ring 226 drivers/net/ethernet/amd/ariadne.c priv->rx_ring[entry]->RMD1 |= RF_OWN; rx_ring 462 drivers/net/ethernet/amd/ariadne.c lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring)); rx_ring 464 drivers/net/ethernet/amd/ariadne.c lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring)); rx_ring 150 drivers/net/ethernet/amd/atarilance.c struct ringdesc rx_ring; rx_ring 617 drivers/net/ethernet/amd/atarilance.c MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head ); rx_ring 618 drivers/net/ethernet/amd/atarilance.c MEM->init.rx_ring.adr_hi = 0; rx_ring 619 drivers/net/ethernet/amd/atarilance.c MEM->init.rx_ring.len = RX_RING_LEN_BITS; rx_ring 231 drivers/net/ethernet/amd/lance.c u32 rx_ring; /* Tx and Rx ring base pointers */ rx_ring 237 drivers/net/ethernet/amd/lance.c struct lance_rx_head rx_ring[RX_RING_SIZE]; rx_ring 574 drivers/net/ethernet/amd/lance.c lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; rx_ring 789 drivers/net/ethernet/amd/lance.c (u32) isa_virt_to_bus(lp->rx_ring), rx_ring 846 drivers/net/ethernet/amd/lance.c lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */ rx_ring 880 drivers/net/ethernet/amd/lance.c lp->rx_ring[i].base = 0; rx_ring 882 drivers/net/ethernet/amd/lance.c lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000; rx_ring 883 drivers/net/ethernet/amd/lance.c lp->rx_ring[i].buf_length = -PKT_BUF_SZ; rx_ring 897 drivers/net/ethernet/amd/lance.c lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; rx_ring 934 drivers/net/ethernet/amd/lance.c lp->rx_ring[i].base, -lp->rx_ring[i].buf_length, rx_ring 935 drivers/net/ethernet/amd/lance.c lp->rx_ring[i].msg_length); rx_ring 1151 drivers/net/ethernet/amd/lance.c while (lp->rx_ring[entry].base >= 0) { rx_ring 1152 drivers/net/ethernet/amd/lance.c int status = lp->rx_ring[entry].base >> 24; rx_ring 1169 drivers/net/ethernet/amd/lance.c lp->rx_ring[entry].base &= 0x03ffffff; rx_ring 1174 drivers/net/ethernet/amd/lance.c short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4; rx_ring 1189 drivers/net/ethernet/amd/lance.c if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0) rx_ring 1195 drivers/net/ethernet/amd/lance.c lp->rx_ring[entry].base |= 0x80000000; rx_ring 1203 drivers/net/ethernet/amd/lance.c (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), rx_ring 1213 drivers/net/ethernet/amd/lance.c lp->rx_ring[entry].buf_length = -PKT_BUF_SZ; rx_ring 1214 drivers/net/ethernet/amd/lance.c lp->rx_ring[entry].base |= 0x80000000; rx_ring 240 drivers/net/ethernet/amd/pcnet32.c __le32 rx_ring; rx_ring 262 drivers/net/ethernet/amd/pcnet32.c struct pcnet32_rx_head *rx_ring; rx_ring 569 drivers/net/ethernet/amd/pcnet32.c new_rx_ring[new] = lp->rx_ring[new]; rx_ring 617 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring_size, lp->rx_ring, rx_ring 623 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring = new_rx_ring; rx_ring 658 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring[i].status = 0; /* CPU owns buffer */ rx_ring 1071 drivers/net/ethernet/amd/pcnet32.c while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { rx_ring 1289 drivers/net/ethernet/amd/pcnet32.c struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; rx_ring 1304 drivers/net/ethernet/amd/pcnet32.c rxp = &lp->rx_ring[entry]; rx_ring 1912 drivers/net/ethernet/amd/pcnet32.c lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); rx_ring 2029 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring = pci_alloc_consistent(lp->pci_dev, rx_ring 2033 drivers/net/ethernet/amd/pcnet32.c if (lp->rx_ring == NULL) { rx_ring 2085 drivers/net/ethernet/amd/pcnet32.c if (lp->rx_ring) { rx_ring 2088 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring_size, lp->rx_ring, rx_ring 2090 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring = NULL; rx_ring 2401 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); rx_ring 2402 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); rx_ring 2404 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring[i].status = cpu_to_le16(0x8000); rx_ring 2419 drivers/net/ethernet/amd/pcnet32.c lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); rx_ring 2478 drivers/net/ethernet/amd/pcnet32.c le32_to_cpu(lp->rx_ring[i].base), rx_ring 2479 drivers/net/ethernet/amd/pcnet32.c (-le16_to_cpu(lp->rx_ring[i].buf_length)) & rx_ring 2480 drivers/net/ethernet/amd/pcnet32.c 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), rx_ring 2481 drivers/net/ethernet/amd/pcnet32.c le16_to_cpu(lp->rx_ring[i].status)); rx_ring 182 drivers/net/ethernet/amd/xgbe/xgbe-desc.c xgbe_free_ring(pdata, channel->rx_ring); rx_ring 268 drivers/net/ethernet/amd/xgbe/xgbe-desc.c ret = xgbe_init_ring(pdata, channel->rx_ring, rx_ring 441 drivers/net/ethernet/amd/xgbe/xgbe-desc.c ring = channel->rx_ring; rx_ring 199 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (pdata->channel[i]->rx_ring) rx_ring 269 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (!pdata->channel[i]->rx_ring) rx_ring 289 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (!pdata->channel[i]->rx_ring) rx_ring 314 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (!pdata->channel[i]->rx_ring) rx_ring 696 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (channel->rx_ring) { rx_ring 1489 drivers/net/ethernet/amd/xgbe/xgbe-dev.c struct xgbe_ring *ring = channel->rx_ring; rx_ring 1907 drivers/net/ethernet/amd/xgbe/xgbe-dev.c struct xgbe_ring *ring = channel->rx_ring; rx_ring 3359 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (!pdata->channel[i]->rx_ring) rx_ring 3397 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (!pdata->channel[i]->rx_ring) rx_ring 3446 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (!pdata->channel[i]->rx_ring) rx_ring 3459 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (!pdata->channel[i]->rx_ring) rx_ring 180 drivers/net/ethernet/amd/xgbe/xgbe-drv.c kfree(pdata->channel[i]->rx_ring); rx_ring 241 drivers/net/ethernet/amd/xgbe/xgbe-drv.c channel->rx_ring = ring; rx_ring 250 drivers/net/ethernet/amd/xgbe/xgbe-drv.c channel->tx_ring, channel->rx_ring); rx_ring 315 drivers/net/ethernet/amd/xgbe/xgbe-drv.c if (channel->tx_ring && channel->rx_ring) rx_ring 319 drivers/net/ethernet/amd/xgbe/xgbe-drv.c else if (channel->rx_ring) rx_ring 341 drivers/net/ethernet/amd/xgbe/xgbe-drv.c if (channel->tx_ring && channel->rx_ring) rx_ring 345 drivers/net/ethernet/amd/xgbe/xgbe-drv.c else if (channel->rx_ring) rx_ring 1216 drivers/net/ethernet/amd/xgbe/xgbe-drv.c ring = pdata->channel[i]->rx_ring; rx_ring 2521 drivers/net/ethernet/amd/xgbe/xgbe-drv.c struct xgbe_ring *ring = channel->rx_ring; rx_ring 2687 drivers/net/ethernet/amd/xgbe/xgbe-drv.c struct xgbe_ring *ring = channel->rx_ring; rx_ring 520 drivers/net/ethernet/amd/xgbe/xgbe.h struct xgbe_ring *rx_ring; rx_ring 67 drivers/net/ethernet/apm/xgene-v2/main.c struct xge_desc_ring *ring = pdata->rx_ring; rx_ring 289 drivers/net/ethernet/apm/xgene-v2/main.c struct xge_desc_ring *rx_ring; rx_ring 299 drivers/net/ethernet/apm/xgene-v2/main.c rx_ring = pdata->rx_ring; rx_ring 300 drivers/net/ethernet/apm/xgene-v2/main.c head = rx_ring->head; rx_ring 307 drivers/net/ethernet/apm/xgene-v2/main.c raw_desc = &rx_ring->raw_desc[head]; rx_ring 314 drivers/net/ethernet/apm/xgene-v2/main.c skb = rx_ring->pkt_info[head].skb; rx_ring 315 drivers/net/ethernet/apm/xgene-v2/main.c rx_ring->pkt_info[head].skb = NULL; rx_ring 316 drivers/net/ethernet/apm/xgene-v2/main.c dma_addr = rx_ring->pkt_info[head].dma_addr; rx_ring 346 drivers/net/ethernet/apm/xgene-v2/main.c rx_ring->head = head; rx_ring 372 drivers/net/ethernet/apm/xgene-v2/main.c struct xge_desc_ring *ring = pdata->rx_ring; rx_ring 400 drivers/net/ethernet/apm/xgene-v2/main.c xge_delete_desc_ring(ndev, pdata->rx_ring); rx_ring 456 drivers/net/ethernet/apm/xgene-v2/main.c pdata->rx_ring = ring; rx_ring 54 drivers/net/ethernet/apm/xgene-v2/main.h struct xge_desc_ring *rx_ring; rx_ring 48 drivers/net/ethernet/apm/xgene-v2/ring.c struct xge_desc_ring *ring = pdata->rx_ring; rx_ring 705 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c pool_id = pdata->rx_ring[idx]->buf_pool->id; rx_ring 707 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); rx_ring 709 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c if (pdata->rx_ring[idx]->page_pool) { rx_ring 710 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c pool_id = pdata->rx_ring[idx]->page_pool->id; rx_ring 782 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); rx_ring 783 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c pool_id = pdata->rx_ring[0]->buf_pool->id; rx_ring 786 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c if (pdata->rx_ring[0]->page_pool) { rx_ring 787 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c pool_id = pdata->rx_ring[0]->page_pool->id; rx_ring 215 drivers/net/ethernet/apm/xgene/xgene_enet_main.c struct xgene_enet_desc_ring *rx_ring = data; rx_ring 217 drivers/net/ethernet/apm/xgene/xgene_enet_main.c if (napi_schedule_prep(&rx_ring->napi)) { rx_ring 219 drivers/net/ethernet/apm/xgene/xgene_enet_main.c __napi_schedule(&rx_ring->napi); rx_ring 668 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, rx_ring 686 drivers/net/ethernet/apm/xgene/xgene_enet_main.c ndev = rx_ring->ndev; rx_ring 688 drivers/net/ethernet/apm/xgene/xgene_enet_main.c dev = ndev_to_dev(rx_ring->ndev); rx_ring 689 drivers/net/ethernet/apm/xgene/xgene_enet_main.c buf_pool = rx_ring->buf_pool; rx_ring 690 drivers/net/ethernet/apm/xgene/xgene_enet_main.c page_pool = rx_ring->page_pool; rx_ring 714 drivers/net/ethernet/apm/xgene/xgene_enet_main.c xgene_enet_parse_error(rx_ring, status); rx_ring 715 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->rx_dropped++; rx_ring 750 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->npagepool -= skb_shinfo(skb)->nr_frags; rx_ring 756 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->rx_packets++; rx_ring 757 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->rx_bytes += datalen; rx_ring 758 drivers/net/ethernet/apm/xgene/xgene_enet_main.c napi_gro_receive(&rx_ring->napi, skb); rx_ring 761 drivers/net/ethernet/apm/xgene/xgene_enet_main.c if (rx_ring->npagepool <= 0) { rx_ring 763 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->npagepool = NUM_NXTBUFPOOL; rx_ring 768 drivers/net/ethernet/apm/xgene/xgene_enet_main.c if (--rx_ring->nbufpool == 0) { rx_ring 770 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->nbufpool = NUM_BUFPOOL; rx_ring 884 drivers/net/ethernet/apm/xgene/xgene_enet_main.c ring = pdata->rx_ring[i]; rx_ring 910 drivers/net/ethernet/apm/xgene/xgene_enet_main.c ring = pdata->rx_ring[i]; rx_ring 945 drivers/net/ethernet/apm/xgene/xgene_enet_main.c ring = pdata->rx_ring[i]; rx_ring 963 drivers/net/ethernet/apm/xgene/xgene_enet_main.c napi = &pdata->rx_ring[i]->napi; rx_ring 979 drivers/net/ethernet/apm/xgene/xgene_enet_main.c napi = &pdata->rx_ring[i]->napi; rx_ring 1040 drivers/net/ethernet/apm/xgene/xgene_enet_main.c xgene_enet_process_ring(pdata->rx_ring[i], -1); rx_ring 1075 drivers/net/ethernet/apm/xgene/xgene_enet_main.c ring = pdata->rx_ring[i]; rx_ring 1090 drivers/net/ethernet/apm/xgene/xgene_enet_main.c pdata->rx_ring[i] = NULL; rx_ring 1166 drivers/net/ethernet/apm/xgene/xgene_enet_main.c ring = pdata->rx_ring[i]; rx_ring 1303 drivers/net/ethernet/apm/xgene/xgene_enet_main.c struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; rx_ring 1324 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, rx_ring 1327 drivers/net/ethernet/apm/xgene/xgene_enet_main.c if (!rx_ring) { rx_ring 1343 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->nbufpool = NUM_BUFPOOL; rx_ring 1344 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->npagepool = NUM_NXTBUFPOOL; rx_ring 1345 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->irq = pdata->irqs[i]; rx_ring 1355 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->buf_pool = buf_pool; rx_ring 1356 drivers/net/ethernet/apm/xgene/xgene_enet_main.c pdata->rx_ring[i] = rx_ring; rx_ring 1392 drivers/net/ethernet/apm/xgene/xgene_enet_main.c rx_ring->page_pool = page_pool; rx_ring 1419 drivers/net/ethernet/apm/xgene/xgene_enet_main.c cp_ring = pdata->rx_ring[i]; rx_ring 1487 drivers/net/ethernet/apm/xgene/xgene_enet_main.c ring = pdata->rx_ring[i]; rx_ring 1829 drivers/net/ethernet/apm/xgene/xgene_enet_main.c buf_pool = pdata->rx_ring[i]->buf_pool; rx_ring 1831 drivers/net/ethernet/apm/xgene/xgene_enet_main.c page_pool = pdata->rx_ring[i]->page_pool; rx_ring 1845 drivers/net/ethernet/apm/xgene/xgene_enet_main.c dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); rx_ring 1846 drivers/net/ethernet/apm/xgene/xgene_enet_main.c buf_pool = pdata->rx_ring[0]->buf_pool; rx_ring 1863 drivers/net/ethernet/apm/xgene/xgene_enet_main.c dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); rx_ring 1864 drivers/net/ethernet/apm/xgene/xgene_enet_main.c buf_pool = pdata->rx_ring[0]->buf_pool; rx_ring 1865 drivers/net/ethernet/apm/xgene/xgene_enet_main.c page_pool = pdata->rx_ring[0]->page_pool; rx_ring 1977 drivers/net/ethernet/apm/xgene/xgene_enet_main.c napi = &pdata->rx_ring[i]->napi; rx_ring 192 drivers/net/ethernet/apm/xgene/xgene_enet_main.h struct xgene_enet_desc_ring *rx_ring[XGENE_NUM_RX_RING]; rx_ring 60 drivers/net/ethernet/apple/macmace.c unsigned char *rx_ring; rx_ring 390 drivers/net/ethernet/apple/macmace.c mp->rx_ring = dma_alloc_coherent(mp->device, rx_ring 393 drivers/net/ethernet/apple/macmace.c if (mp->rx_ring == NULL) rx_ring 702 drivers/net/ethernet/apple/macmace.c mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring rx_ring 749 drivers/net/ethernet/apple/macmace.c mp->rx_ring, mp->rx_ring_phys); rx_ring 298 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx_ring rx_ring ____cacheline_aligned; rx_ring 1014 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx_ring *ring = &ag->rx_ring; rx_ring 1040 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx_ring *ring = &ag->rx_ring; rx_ring 1059 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx_ring *ring = &ag->rx_ring; rx_ring 1100 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx_ring *ring = &ag->rx_ring; rx_ring 1134 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx_ring *rx = &ag->rx_ring; rx_ring 1164 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx_ring *rx = &ag->rx_ring; rx_ring 1217 drivers/net/ethernet/atheros/ag71xx.c ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); rx_ring 1444 drivers/net/ethernet/atheros/ag71xx.c ring = &ag->rx_ring; rx_ring 1520 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx_ring *rx_ring = &ag->rx_ring; rx_ring 1521 drivers/net/ethernet/atheros/ag71xx.c int rx_ring_size = BIT(rx_ring->order); rx_ring 1531 drivers/net/ethernet/atheros/ag71xx.c if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf) rx_ring 1712 drivers/net/ethernet/atheros/ag71xx.c ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); rx_ring 445 drivers/net/ethernet/atheros/atl1e/atl1e.h struct atl1e_rx_ring rx_ring; rx_ring 690 drivers/net/ethernet/atheros/atl1e/atl1e_main.c struct atl1e_rx_ring *rx_ring = rx_ring 691 drivers/net/ethernet/atheros/atl1e/atl1e_main.c &adapter->rx_ring; rx_ring 692 drivers/net/ethernet/atheros/atl1e/atl1e_main.c struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; rx_ring 703 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_ring->real_page_size); rx_ring 714 drivers/net/ethernet/atheros/atl1e/atl1e_main.c + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * rx_ring 724 drivers/net/ethernet/atheros/atl1e/atl1e_main.c struct atl1e_rx_ring *rx_ring = NULL; rx_ring 726 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_ring = &adapter->rx_ring; rx_ring 728 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_ring->real_page_size = adapter->rx_ring.page_size rx_ring 732 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); rx_ring 736 drivers/net/ethernet/atheros/atl1e/atl1e_main.c adapter->rx_ring.desc = NULL; rx_ring 746 drivers/net/ethernet/atheros/atl1e/atl1e_main.c struct atl1e_rx_ring *rx_ring = NULL; rx_ring 751 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_ring = &adapter->rx_ring; rx_ring 752 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_page_desc = rx_ring->rx_page_desc; rx_ring 802 drivers/net/ethernet/atheros/atl1e/atl1e_main.c struct atl1e_rx_ring *rx_ring; rx_ring 812 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_ring = &adapter->rx_ring; rx_ring 825 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_page_desc = rx_ring->rx_page_desc; rx_ring 848 drivers/net/ethernet/atheros/atl1e/atl1e_main.c offset += rx_ring->real_page_size; rx_ring 888 drivers/net/ethernet/atheros/atl1e/atl1e_main.c struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; rx_ring 901 drivers/net/ethernet/atheros/atl1e/atl1e_main.c rx_page_desc = rx_ring->rx_page_desc; rx_ring 923 drivers/net/ethernet/atheros/atl1e/atl1e_main.c AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); rx_ring 1386 drivers/net/ethernet/atheros/atl1e/atl1e_main.c (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; rx_ring 1396 drivers/net/ethernet/atheros/atl1e/atl1e_main.c struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; rx_ring 1398 drivers/net/ethernet/atheros/atl1e/atl1e_main.c (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; rx_ring 1475 drivers/net/ethernet/atheros/atl1e/atl1e_main.c if (rx_page->read_offset >= rx_ring->page_size) { rx_ring 212 drivers/net/ethernet/atheros/atl1e/atl1e_param.c adapter->rx_ring.page_size = (u32)val * 1024; rx_ring 214 drivers/net/ethernet/atheros/atl1e/atl1e_param.c adapter->rx_ring.page_size = (u32)opt.def * 1024; rx_ring 721 drivers/net/ethernet/broadcom/b44.c dp = &bp->rx_ring[dest_idx]; rx_ring 742 drivers/net/ethernet/broadcom/b44.c dest_desc = &bp->rx_ring[dest_idx]; rx_ring 744 drivers/net/ethernet/broadcom/b44.c src_desc = &bp->rx_ring[src_idx]; rx_ring 1135 drivers/net/ethernet/broadcom/b44.c memset(bp->rx_ring, 0, B44_RX_RING_BYTES); rx_ring 1162 drivers/net/ethernet/broadcom/b44.c if (bp->rx_ring) { rx_ring 1166 drivers/net/ethernet/broadcom/b44.c kfree(bp->rx_ring); rx_ring 1169 drivers/net/ethernet/broadcom/b44.c bp->rx_ring, bp->rx_ring_dma); rx_ring 1170 drivers/net/ethernet/broadcom/b44.c bp->rx_ring = NULL; rx_ring 1205 drivers/net/ethernet/broadcom/b44.c bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, rx_ring 1207 drivers/net/ethernet/broadcom/b44.c if (!bp->rx_ring) { rx_ring 1211 drivers/net/ethernet/broadcom/b44.c struct dma_desc *rx_ring; rx_ring 1214 drivers/net/ethernet/broadcom/b44.c rx_ring = kzalloc(size, gfp); rx_ring 1215 drivers/net/ethernet/broadcom/b44.c if (!rx_ring) rx_ring 1218 drivers/net/ethernet/broadcom/b44.c rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, rx_ring 1224 drivers/net/ethernet/broadcom/b44.c kfree(rx_ring); rx_ring 1228 drivers/net/ethernet/broadcom/b44.c bp->rx_ring = rx_ring; rx_ring 363 drivers/net/ethernet/broadcom/b44.h struct dma_desc *rx_ring, *tx_ring; rx_ring 596 drivers/net/ethernet/broadcom/bgmac.c bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]); rx_ring 608 drivers/net/ethernet/broadcom/bgmac.c bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i], rx_ring 657 drivers/net/ethernet/broadcom/bgmac.c ring = &bgmac->rx_ring[i]; rx_ring 710 drivers/net/ethernet/broadcom/bgmac.c ring = &bgmac->rx_ring[i]; rx_ring 924 drivers/net/ethernet/broadcom/bgmac.c bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]); rx_ring 1157 drivers/net/ethernet/broadcom/bgmac.c handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight); rx_ring 503 drivers/net/ethernet/broadcom/bgmac.h struct bgmac_dma_ring rx_ring[BGMAC_MAX_RX_RINGS]; rx_ring 723 drivers/net/ethernet/broadcom/bnx2.c struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; rx_ring 777 drivers/net/ethernet/broadcom/bnx2.c struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; rx_ring 3150 drivers/net/ethernet/broadcom/bnx2.c struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; rx_ring 3395 drivers/net/ethernet/broadcom/bnx2.c struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; rx_ring 3489 drivers/net/ethernet/broadcom/bnx2.c struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; rx_ring 5141 drivers/net/ethernet/broadcom/bnx2.c rxr = &bnapi->rx_ring; rx_ring 5216 drivers/net/ethernet/broadcom/bnx2.c bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[], rx_ring 5225 drivers/net/ethernet/broadcom/bnx2.c rxbd = &rx_ring[i][0]; rx_ring 5246 drivers/net/ethernet/broadcom/bnx2.c struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; rx_ring 5484 drivers/net/ethernet/broadcom/bnx2.c struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; rx_ring 5826 drivers/net/ethernet/broadcom/bnx2.c rxr = &bnapi->rx_ring; rx_ring 6781 drivers/net/ethernet/broadcom/bnx2.h struct bnx2_rx_ring_info rx_ring; rx_ring 871 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; rx_ring 1006 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; rx_ring 1509 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; rx_ring 1675 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; rx_ring 2247 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; rx_ring 2279 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; rx_ring 2546 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!bp->rx_ring) rx_ring 2552 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 2720 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 2746 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 2774 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!bp->rx_ring) rx_ring 2779 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 2826 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!bp->rx_ring) rx_ring 2833 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 3103 drivers/net/ethernet/broadcom/bnxt/bnxt.c rxr = bnapi->rx_ring; rx_ring 3177 drivers/net/ethernet/broadcom/bnxt/bnxt.c rxr = &bp->rx_ring[ring_nr]; rx_ring 3874 drivers/net/ethernet/broadcom/bnxt/bnxt.c rxr = bnapi->rx_ring; rx_ring 3951 drivers/net/ethernet/broadcom/bnxt/bnxt.c kfree(bp->rx_ring); rx_ring 3952 drivers/net/ethernet/broadcom/bnxt/bnxt.c bp->rx_ring = NULL; rx_ring 3991 drivers/net/ethernet/broadcom/bnxt/bnxt.c bp->rx_ring = kcalloc(bp->rx_nr_rings, rx_ring 3994 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!bp->rx_ring) rx_ring 3998 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 4007 drivers/net/ethernet/broadcom/bnxt/bnxt.c bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; rx_ring 4873 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; rx_ring 4904 drivers/net/ethernet/broadcom/bnxt/bnxt.c rxr = &bp->rx_ring[0]; rx_ring 4998 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; rx_ring 5040 drivers/net/ethernet/broadcom/bnxt/bnxt.c grp_idx = bp->rx_ring[ring].bnapi->index; rx_ring 5100 drivers/net/ethernet/broadcom/bnxt/bnxt.c grp_idx = bp->rx_ring[i].bnapi->index; rx_ring 5170 drivers/net/ethernet/broadcom/bnxt/bnxt.c unsigned int grp_idx = bp->rx_ring[i].bnapi->index; rx_ring 5466 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 5499 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 5573 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 5595 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rx_ring 6236 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!bnapi->rx_ring) rx_ring 6244 drivers/net/ethernet/broadcom/bnxt/bnxt.c req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); rx_ring 6271 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!bnapi->rx_ring) { rx_ring 6275 drivers/net/ethernet/broadcom/bnxt/bnxt.c ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); rx_ring 6287 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (bnapi->rx_ring && bnapi->tx_ring) { rx_ring 6296 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (bnapi->rx_ring) rx_ring 8222 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (rmap && bp->bnapi[i]->rx_ring) { rx_ring 8312 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (bp->bnapi[i]->rx_ring) rx_ring 8327 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (bp->bnapi[i]->rx_ring) { rx_ring 9898 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; rx_ring 946 drivers/net/ethernet/broadcom/bnxt/bnxt.h struct bnxt_rx_ring_info *rx_ring; rx_ring 1557 drivers/net/ethernet/broadcom/bnxt/bnxt.h struct bnxt_rx_ring_info *rx_ring; rx_ring 86 drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c if (cpr && bp->bnapi[i]->rx_ring) rx_ring 2756 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c rxr = bnapi->rx_ring; rx_ring 2815 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; rx_ring 74 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; rx_ring 5239 drivers/net/ethernet/broadcom/cnic.c void *rx_ring; rx_ring 5276 drivers/net/ethernet/broadcom/cnic.c rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; rx_ring 5277 drivers/net/ethernet/broadcom/cnic.c memset(rx_ring, 0, CNIC_PAGE_SIZE); rx_ring 2633 drivers/net/ethernet/broadcom/genet/bcmgenet.c struct bcmgenet_rx_ring *rx_ring; rx_ring 2652 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring = &priv->rx_rings[index]; rx_ring 2653 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring->dim.event_ctr++; rx_ring 2655 drivers/net/ethernet/broadcom/genet/bcmgenet.c if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring 2656 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring->int_disable(rx_ring); rx_ring 2657 drivers/net/ethernet/broadcom/genet/bcmgenet.c __napi_schedule_irqoff(&rx_ring->napi); rx_ring 2681 drivers/net/ethernet/broadcom/genet/bcmgenet.c struct bcmgenet_rx_ring *rx_ring; rx_ring 2697 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring = &priv->rx_rings[DESC_INDEX]; rx_ring 2698 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring->dim.event_ctr++; rx_ring 2700 drivers/net/ethernet/broadcom/genet/bcmgenet.c if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring 2701 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring->int_disable(rx_ring); rx_ring 2702 drivers/net/ethernet/broadcom/genet/bcmgenet.c __napi_schedule_irqoff(&rx_ring->napi); rx_ring 3178 drivers/net/ethernet/broadcom/genet/bcmgenet.c struct bcmgenet_rx_ring *rx_ring; rx_ring 3191 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring = &priv->rx_rings[q]; rx_ring 3193 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_bytes += rx_ring->bytes; rx_ring 3194 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_packets += rx_ring->packets; rx_ring 3195 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_errors += rx_ring->errors; rx_ring 3196 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_dropped += rx_ring->dropped; rx_ring 3198 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring = &priv->rx_rings[DESC_INDEX]; rx_ring 3199 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_bytes += rx_ring->bytes; rx_ring 3200 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_packets += rx_ring->packets; rx_ring 3201 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_errors += rx_ring->errors; rx_ring 3202 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_dropped += rx_ring->dropped; rx_ring 1132 drivers/net/ethernet/cadence/macb.h struct macb_dma_desc *rx_ring; rx_ring 216 drivers/net/ethernet/cadence/macb_main.c return &queue->rx_ring[index]; rx_ring 1955 drivers/net/ethernet/cadence/macb_main.c if (queue->rx_ring) { rx_ring 1958 drivers/net/ethernet/cadence/macb_main.c queue->rx_ring, queue->rx_ring_dma); rx_ring 1959 drivers/net/ethernet/cadence/macb_main.c queue->rx_ring = NULL; rx_ring 2024 drivers/net/ethernet/cadence/macb_main.c queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, rx_ring 2026 drivers/net/ethernet/cadence/macb_main.c if (!queue->rx_ring) rx_ring 2030 drivers/net/ethernet/cadence/macb_main.c size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); rx_ring 3646 drivers/net/ethernet/cadence/macb_main.c q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, rx_ring 3650 drivers/net/ethernet/cadence/macb_main.c if (!q->rx_ring) rx_ring 3661 drivers/net/ethernet/cadence/macb_main.c q->rx_ring, q->rx_ring_dma); rx_ring 3662 drivers/net/ethernet/cadence/macb_main.c q->rx_ring = NULL; rx_ring 3755 drivers/net/ethernet/cadence/macb_main.c q->rx_ring, q->rx_ring_dma); rx_ring 3756 drivers/net/ethernet/cadence/macb_main.c q->rx_ring = NULL; rx_ring 135 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c u64 *rx_ring; rx_ring 237 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c p->rx_ring[p->rx_next_fill] = re.d64; rx_ring 374 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c re.d64 = p->rx_ring[p->rx_next]; rx_ring 994 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), rx_ring 996 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c if (!p->rx_ring) rx_ring 999 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c dma_map_single(p->dev, p->rx_ring, rx_ring 1232 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c kfree(p->rx_ring); rx_ring 1264 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c kfree(p->rx_ring); rx_ring 306 drivers/net/ethernet/dec/tulip/de2104x.c struct de_desc *rx_ring; rx_ring 416 drivers/net/ethernet/dec/tulip/de2104x.c status = le32_to_cpu(de->rx_ring[rx_tail].opts1); rx_ring 479 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[rx_tail].opts2 = rx_ring 482 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz); rx_ring 483 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); rx_ring 485 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn); rx_ring 1289 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[i].opts1 = cpu_to_le32(DescOwn); rx_ring 1291 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[i].opts2 = rx_ring 1294 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz); rx_ring 1295 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping); rx_ring 1296 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[i].addr2 = 0; rx_ring 1319 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma); rx_ring 1320 drivers/net/ethernet/dec/tulip/de2104x.c if (!de->rx_ring) rx_ring 1322 drivers/net/ethernet/dec/tulip/de2104x.c de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE]; rx_ring 1330 drivers/net/ethernet/dec/tulip/de2104x.c memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE); rx_ring 1331 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); rx_ring 1370 drivers/net/ethernet/dec/tulip/de2104x.c pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma); rx_ring 1371 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring = NULL; rx_ring 790 drivers/net/ethernet/dec/tulip/de4x5.c struct de4x5_desc *rx_ring; /* RX descriptor ring */ rx_ring 1172 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, rx_ring 1174 drivers/net/ethernet/dec/tulip/de4x5.c if (lp->rx_ring == NULL) { rx_ring 1178 drivers/net/ethernet/dec/tulip/de4x5.c lp->tx_ring = lp->rx_ring + NUM_RX_DESC; rx_ring 1186 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].status = 0; rx_ring 1187 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); rx_ring 1188 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].buf = 0; rx_ring 1189 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].next = 0; rx_ring 1200 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC rx_ring 1203 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].status = 0; rx_ring 1204 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); rx_ring 1205 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].buf = rx_ring 1207 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].next = 0; rx_ring 1220 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER); rx_ring 1246 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring, lp->dma_rings); rx_ring 1277 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring, lp->dma_rings); rx_ring 1418 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].status = cpu_to_le32(R_OWN); rx_ring 1608 drivers/net/ethernet/dec/tulip/de4x5.c for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0; rx_ring 1610 drivers/net/ethernet/dec/tulip/de4x5.c status = (s32)le32_to_cpu(lp->rx_ring[entry].status); rx_ring 1637 drivers/net/ethernet/dec/tulip/de4x5.c short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status) rx_ring 1660 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN); rx_ring 1663 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[entry].status = cpu_to_le32(R_OWN); rx_ring 1802 drivers/net/ethernet/dec/tulip/de4x5.c for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) { rx_ring 1803 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN); rx_ring 3611 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[index].buf = cpu_to_le32(tmp + i); rx_ring 3651 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].status = 0; rx_ring 3716 drivers/net/ethernet/dec/tulip/de4x5.c lp->rx_ring[i].status = cpu_to_le32(R_OWN); rx_ring 5230 drivers/net/ethernet/dec/tulip/de4x5.c printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring); rx_ring 5234 drivers/net/ethernet/dec/tulip/de4x5.c printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status); rx_ring 5237 drivers/net/ethernet/dec/tulip/de4x5.c printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status); rx_ring 5248 drivers/net/ethernet/dec/tulip/de4x5.c printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf)); rx_ring 5251 drivers/net/ethernet/dec/tulip/de4x5.c printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf)); rx_ring 87 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); rx_ring 90 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_ring[entry].status = cpu_to_le32(DescOwned); rx_ring 132 drivers/net/ethernet/dec/tulip/interrupt.c entry, tp->rx_ring[entry].status); rx_ring 144 drivers/net/ethernet/dec/tulip/interrupt.c while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { rx_ring 145 drivers/net/ethernet/dec/tulip/interrupt.c s32 status = le32_to_cpu(tp->rx_ring[entry].status); rx_ring 234 drivers/net/ethernet/dec/tulip/interrupt.c le32_to_cpu(tp->rx_ring[entry].buffer1)) { rx_ring 237 drivers/net/ethernet/dec/tulip/interrupt.c le32_to_cpu(tp->rx_ring[entry].buffer1), rx_ring 374 drivers/net/ethernet/dec/tulip/interrupt.c entry, tp->rx_ring[entry].status); rx_ring 376 drivers/net/ethernet/dec/tulip/interrupt.c while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { rx_ring 377 drivers/net/ethernet/dec/tulip/interrupt.c s32 status = le32_to_cpu(tp->rx_ring[entry].status); rx_ring 460 drivers/net/ethernet/dec/tulip/interrupt.c le32_to_cpu(tp->rx_ring[entry].buffer1)) { rx_ring 463 drivers/net/ethernet/dec/tulip/interrupt.c le32_to_cpu(tp->rx_ring[entry].buffer1), rx_ring 404 drivers/net/ethernet/dec/tulip/tulip.h struct tulip_rx_desc *rx_ring; rx_ring 579 drivers/net/ethernet/dec/tulip/tulip_core.c u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); rx_ring 584 drivers/net/ethernet/dec/tulip/tulip_core.c (unsigned int)tp->rx_ring[i].status, rx_ring 585 drivers/net/ethernet/dec/tulip/tulip_core.c (unsigned int)tp->rx_ring[i].length, rx_ring 586 drivers/net/ethernet/dec/tulip/tulip_core.c (unsigned int)tp->rx_ring[i].buffer1, rx_ring 587 drivers/net/ethernet/dec/tulip/tulip_core.c (unsigned int)tp->rx_ring[i].buffer2, rx_ring 594 drivers/net/ethernet/dec/tulip/tulip_core.c printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring); rx_ring 596 drivers/net/ethernet/dec/tulip/tulip_core.c pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status); rx_ring 624 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].status = 0x00000000; rx_ring 625 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ); rx_ring 626 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1)); rx_ring 631 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP); rx_ring 632 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma); rx_ring 647 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ rx_ring 648 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); rx_ring 804 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ rx_ring 805 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].length = 0; rx_ring 807 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0); rx_ring 1442 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring = pci_alloc_consistent(pdev, rx_ring 1446 drivers/net/ethernet/dec/tulip/tulip_core.c if (!tp->rx_ring) rx_ring 1448 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); rx_ring 1780 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring, tp->rx_ring_dma); rx_ring 1928 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring, tp->rx_ring_dma); rx_ring 295 drivers/net/ethernet/dec/tulip/winbond-840.c struct w840_rx_desc *rx_ring; rx_ring 797 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_head_desc = &np->rx_ring[0]; rx_ring 798 drivers/net/ethernet/dec/tulip/winbond-840.c np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE]; rx_ring 802 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[i].length = np->rx_buf_sz; rx_ring 803 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[i].status = 0; rx_ring 807 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[i-1].length |= DescEndRing; rx_ring 818 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[i].buffer1 = np->rx_addr[i]; rx_ring 819 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[i].status = DescOwned; rx_ring 844 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[i].status = 0; rx_ring 935 drivers/net/ethernet/dec/tulip/winbond-840.c printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); rx_ring 937 drivers/net/ethernet/dec/tulip/winbond-840.c printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status); rx_ring 977 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring = pci_alloc_consistent(np->pci_dev, rx_ring 981 drivers/net/ethernet/dec/tulip/winbond-840.c if(!np->rx_ring) rx_ring 992 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring, np->ring_dma_addr); rx_ring 1183 drivers/net/ethernet/dec/tulip/winbond-840.c entry, np->rx_ring[entry].status); rx_ring 1259 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_head_desc = &np->rx_ring[entry]; rx_ring 1274 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[entry].buffer1 = np->rx_addr[entry]; rx_ring 1277 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[entry].status = DescOwned; rx_ring 1514 drivers/net/ethernet/dec/tulip/winbond-840.c printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); rx_ring 1517 drivers/net/ethernet/dec/tulip/winbond-840.c i, np->rx_ring[i].length, rx_ring 1518 drivers/net/ethernet/dec/tulip/winbond-840.c np->rx_ring[i].status, np->rx_ring[i].buffer1); rx_ring 243 drivers/net/ethernet/dlink/dl2k.c np->rx_ring = ring_space; rx_ring 291 drivers/net/ethernet/dlink/dl2k.c pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); rx_ring 447 drivers/net/ethernet/dlink/dl2k.c pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]), rx_ring 452 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[i].status = 0; rx_ring 453 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[i].fraginfo = 0; rx_ring 479 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[i].status = 0; rx_ring 511 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + rx_ring 515 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[i].fraginfo = rx_ring 519 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); rx_ring 676 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[entry].fraginfo = 0; rx_ring 683 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[entry].fraginfo = rx_ring 688 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[entry].fraginfo |= rx_ring 690 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[entry].status = 0; rx_ring 931 drivers/net/ethernet/dlink/dl2k.c struct netdev_desc *desc = &np->rx_ring[entry]; rx_ring 1003 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[entry].fraginfo = 0; rx_ring 1011 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[entry].fraginfo = rx_ring 1016 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[entry].fraginfo |= rx_ring 1018 drivers/net/ethernet/dlink/dl2k.c np->rx_ring[entry].status = 0; rx_ring 1809 drivers/net/ethernet/dlink/dl2k.c pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, rx_ring 366 drivers/net/ethernet/dlink/dl2k.h struct netdev_desc *rx_ring; rx_ring 371 drivers/net/ethernet/dlink/sundance.c struct netdev_desc *rx_ring; rx_ring 569 drivers/net/ethernet/dlink/sundance.c np->rx_ring = (struct netdev_desc *)ring_space; rx_ring 705 drivers/net/ethernet/dlink/sundance.c np->rx_ring, np->rx_ring_dma); rx_ring 1038 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + rx_ring 1039 drivers/net/ethernet/dlink/sundance.c ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); rx_ring 1040 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].status = 0; rx_ring 1041 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].frag[0].length = 0; rx_ring 1053 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].frag[0].addr = cpu_to_le32( rx_ring 1057 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].frag[0].addr)) { rx_ring 1062 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); rx_ring 1341 drivers/net/ethernet/dlink/sundance.c struct netdev_desc *desc = &(np->rx_ring[entry]); rx_ring 1442 drivers/net/ethernet/dlink/sundance.c np->rx_ring[entry].frag[0].addr = cpu_to_le32( rx_ring 1446 drivers/net/ethernet/dlink/sundance.c np->rx_ring[entry].frag[0].addr)) { rx_ring 1453 drivers/net/ethernet/dlink/sundance.c np->rx_ring[entry].frag[0].length = rx_ring 1455 drivers/net/ethernet/dlink/sundance.c np->rx_ring[entry].status = 0; rx_ring 1892 drivers/net/ethernet/dlink/sundance.c i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr, rx_ring 1893 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].frag[0].length); rx_ring 1904 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].status = 0; rx_ring 1908 drivers/net/ethernet/dlink/sundance.c le32_to_cpu(np->rx_ring[i].frag[0].addr), rx_ring 1913 drivers/net/ethernet/dlink/sundance.c np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */ rx_ring 1938 drivers/net/ethernet/dlink/sundance.c np->rx_ring, np->rx_ring_dma); rx_ring 377 drivers/net/ethernet/fealnx.c struct fealnx_desc *rx_ring; rx_ring 565 drivers/net/ethernet/fealnx.c np->rx_ring = ring_space; rx_ring 675 drivers/net/ethernet/fealnx.c pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); rx_ring 695 drivers/net/ethernet/fealnx.c pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, rx_ring 1154 drivers/net/ethernet/fealnx.c iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), rx_ring 1206 drivers/net/ethernet/fealnx.c printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); rx_ring 1209 drivers/net/ethernet/fealnx.c (unsigned int) np->rx_ring[i].status); rx_ring 1239 drivers/net/ethernet/fealnx.c np->cur_rx = &np->rx_ring[0]; rx_ring 1240 drivers/net/ethernet/fealnx.c np->lack_rxbuf = np->rx_ring; rx_ring 1245 drivers/net/ethernet/fealnx.c np->rx_ring[i].status = 0; rx_ring 1246 drivers/net/ethernet/fealnx.c np->rx_ring[i].control = np->rx_buf_sz << RBSShift; rx_ring 1247 drivers/net/ethernet/fealnx.c np->rx_ring[i].next_desc = np->rx_ring_dma + rx_ring 1249 drivers/net/ethernet/fealnx.c np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; rx_ring 1250 drivers/net/ethernet/fealnx.c np->rx_ring[i].skbuff = NULL; rx_ring 1254 drivers/net/ethernet/fealnx.c np->rx_ring[i - 1].next_desc = np->rx_ring_dma; rx_ring 1255 drivers/net/ethernet/fealnx.c np->rx_ring[i - 1].next_desc_logical = np->rx_ring; rx_ring 1262 drivers/net/ethernet/fealnx.c np->lack_rxbuf = &np->rx_ring[i]; rx_ring 1267 drivers/net/ethernet/fealnx.c np->rx_ring[i].skbuff = skb; rx_ring 1268 drivers/net/ethernet/fealnx.c np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, rx_ring 1270 drivers/net/ethernet/fealnx.c np->rx_ring[i].status = RXOWN; rx_ring 1271 drivers/net/ethernet/fealnx.c np->rx_ring[i].control |= RXIC; rx_ring 1421 drivers/net/ethernet/fealnx.c iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), rx_ring 1910 drivers/net/ethernet/fealnx.c struct sk_buff *skb = np->rx_ring[i].skbuff; rx_ring 1912 drivers/net/ethernet/fealnx.c np->rx_ring[i].status = 0; rx_ring 1914 drivers/net/ethernet/fealnx.c pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, rx_ring 1917 drivers/net/ethernet/fealnx.c np->rx_ring[i].skbuff = NULL; rx_ring 266 drivers/net/ethernet/freescale/enetc/enetc.c static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, rx_ring 281 drivers/net/ethernet/freescale/enetc/enetc.c work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); rx_ring 410 drivers/net/ethernet/freescale/enetc/enetc.c static bool enetc_new_page(struct enetc_bdr *rx_ring, rx_ring 420 drivers/net/ethernet/freescale/enetc/enetc.c addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); rx_ring 421 drivers/net/ethernet/freescale/enetc/enetc.c if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { rx_ring 434 drivers/net/ethernet/freescale/enetc/enetc.c static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) rx_ring 440 drivers/net/ethernet/freescale/enetc/enetc.c i = rx_ring->next_to_use; rx_ring 441 drivers/net/ethernet/freescale/enetc/enetc.c rx_swbd = &rx_ring->rx_swbd[i]; rx_ring 442 drivers/net/ethernet/freescale/enetc/enetc.c rxbd = ENETC_RXBD(*rx_ring, i); rx_ring 447 drivers/net/ethernet/freescale/enetc/enetc.c if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { rx_ring 448 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->stats.rx_alloc_errs++; rx_ring 462 drivers/net/ethernet/freescale/enetc/enetc.c if (unlikely(i == rx_ring->bd_count)) { rx_ring 464 drivers/net/ethernet/freescale/enetc/enetc.c rx_swbd = rx_ring->rx_swbd; rx_ring 465 drivers/net/ethernet/freescale/enetc/enetc.c rxbd = ENETC_RXBD(*rx_ring, 0); rx_ring 470 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->next_to_alloc = i; /* keep track from page reuse */ rx_ring 471 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->next_to_use = i; rx_ring 473 drivers/net/ethernet/freescale/enetc/enetc.c enetc_wr_reg(rx_ring->rcir, i); rx_ring 504 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_get_offloads(struct enetc_bdr *rx_ring, rx_ring 508 drivers/net/ethernet/freescale/enetc/enetc.c struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); rx_ring 511 drivers/net/ethernet/freescale/enetc/enetc.c if (rx_ring->ndev->features & NETIF_F_RXCSUM) { rx_ring 526 drivers/net/ethernet/freescale/enetc/enetc.c enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); rx_ring 530 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_process_skb(struct enetc_bdr *rx_ring, rx_ring 533 drivers/net/ethernet/freescale/enetc/enetc.c skb_record_rx_queue(skb, rx_ring->index); rx_ring 534 drivers/net/ethernet/freescale/enetc/enetc.c skb->protocol = eth_type_trans(skb, rx_ring->ndev); rx_ring 542 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_reuse_page(struct enetc_bdr *rx_ring, rx_ring 547 drivers/net/ethernet/freescale/enetc/enetc.c new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; rx_ring 550 drivers/net/ethernet/freescale/enetc/enetc.c enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); rx_ring 556 drivers/net/ethernet/freescale/enetc/enetc.c static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, rx_ring 559 drivers/net/ethernet/freescale/enetc/enetc.c struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; rx_ring 561 drivers/net/ethernet/freescale/enetc/enetc.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, rx_ring 567 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, rx_ring 574 drivers/net/ethernet/freescale/enetc/enetc.c enetc_reuse_page(rx_ring, rx_swbd); rx_ring 577 drivers/net/ethernet/freescale/enetc/enetc.c dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, rx_ring 582 drivers/net/ethernet/freescale/enetc/enetc.c dma_unmap_page(rx_ring->dev, rx_swbd->dma, rx_ring 589 drivers/net/ethernet/freescale/enetc/enetc.c static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, rx_ring 592 drivers/net/ethernet/freescale/enetc/enetc.c struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); rx_ring 599 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->stats.rx_alloc_errs++; rx_ring 606 drivers/net/ethernet/freescale/enetc/enetc.c enetc_put_rx_buff(rx_ring, rx_swbd); rx_ring 611 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, rx_ring 614 drivers/net/ethernet/freescale/enetc/enetc.c struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); rx_ring 619 drivers/net/ethernet/freescale/enetc/enetc.c enetc_put_rx_buff(rx_ring, rx_swbd); rx_ring 624 drivers/net/ethernet/freescale/enetc/enetc.c static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, rx_ring 630 drivers/net/ethernet/freescale/enetc/enetc.c cleaned_cnt = enetc_bd_unused(rx_ring); rx_ring 632 drivers/net/ethernet/freescale/enetc/enetc.c i = rx_ring->next_to_clean; rx_ring 641 drivers/net/ethernet/freescale/enetc/enetc.c int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); rx_ring 646 drivers/net/ethernet/freescale/enetc/enetc.c rxbd = ENETC_RXBD(*rx_ring, i); rx_ring 651 drivers/net/ethernet/freescale/enetc/enetc.c enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index)); rx_ring 654 drivers/net/ethernet/freescale/enetc/enetc.c skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); rx_ring 658 drivers/net/ethernet/freescale/enetc/enetc.c enetc_get_offloads(rx_ring, rxbd, skb); rx_ring 663 drivers/net/ethernet/freescale/enetc/enetc.c if (unlikely(i == rx_ring->bd_count)) { rx_ring 665 drivers/net/ethernet/freescale/enetc/enetc.c rxbd = ENETC_RXBD(*rx_ring, 0); rx_ring 676 drivers/net/ethernet/freescale/enetc/enetc.c if (unlikely(i == rx_ring->bd_count)) { rx_ring 678 drivers/net/ethernet/freescale/enetc/enetc.c rxbd = ENETC_RXBD(*rx_ring, 0); rx_ring 682 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->ndev->stats.rx_dropped++; rx_ring 683 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->ndev->stats.rx_errors++; rx_ring 698 drivers/net/ethernet/freescale/enetc/enetc.c enetc_add_rx_buff_to_skb(rx_ring, i, size, skb); rx_ring 703 drivers/net/ethernet/freescale/enetc/enetc.c if (unlikely(i == rx_ring->bd_count)) { rx_ring 705 drivers/net/ethernet/freescale/enetc/enetc.c rxbd = ENETC_RXBD(*rx_ring, 0); rx_ring 711 drivers/net/ethernet/freescale/enetc/enetc.c enetc_process_skb(rx_ring, skb); rx_ring 718 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->next_to_clean = i; rx_ring 720 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->stats.packets += rx_frm_cnt; rx_ring 721 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->stats.bytes += rx_byte_cnt; rx_ring 870 drivers/net/ethernet/freescale/enetc/enetc.c err = enetc_alloc_rxbdr(priv->rx_ring[i]); rx_ring 880 drivers/net/ethernet/freescale/enetc/enetc.c enetc_free_rxbdr(priv->rx_ring[i]); rx_ring 890 drivers/net/ethernet/freescale/enetc/enetc.c enetc_free_rxbdr(priv->rx_ring[i]); rx_ring 910 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) rx_ring 914 drivers/net/ethernet/freescale/enetc/enetc.c if (!rx_ring->rx_swbd) rx_ring 917 drivers/net/ethernet/freescale/enetc/enetc.c for (i = 0; i < rx_ring->bd_count; i++) { rx_ring 918 drivers/net/ethernet/freescale/enetc/enetc.c struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; rx_ring 923 drivers/net/ethernet/freescale/enetc/enetc.c dma_unmap_page(rx_ring->dev, rx_swbd->dma, rx_ring 929 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->next_to_clean = 0; rx_ring 930 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->next_to_use = 0; rx_ring 931 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->next_to_alloc = 0; rx_ring 939 drivers/net/ethernet/freescale/enetc/enetc.c enetc_free_rx_ring(priv->rx_ring[i]); rx_ring 1137 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) rx_ring 1139 drivers/net/ethernet/freescale/enetc/enetc.c int idx = rx_ring->index; rx_ring 1143 drivers/net/ethernet/freescale/enetc/enetc.c lower_32_bits(rx_ring->bd_dma_base)); rx_ring 1146 drivers/net/ethernet/freescale/enetc/enetc.c upper_32_bits(rx_ring->bd_dma_base)); rx_ring 1148 drivers/net/ethernet/freescale/enetc/enetc.c WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ rx_ring 1150 drivers/net/ethernet/freescale/enetc/enetc.c ENETC_RTBLENR_LEN(rx_ring->bd_count)); rx_ring 1163 drivers/net/ethernet/freescale/enetc/enetc.c if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) rx_ring 1166 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); rx_ring 1167 drivers/net/ethernet/freescale/enetc/enetc.c rx_ring->idr = hw->reg + ENETC_SIRXIDR; rx_ring 1169 drivers/net/ethernet/freescale/enetc/enetc.c enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); rx_ring 1183 drivers/net/ethernet/freescale/enetc/enetc.c enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]); rx_ring 1186 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) rx_ring 1188 drivers/net/ethernet/freescale/enetc/enetc.c int idx = rx_ring->index; rx_ring 1222 drivers/net/ethernet/freescale/enetc/enetc.c enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]); rx_ring 1494 drivers/net/ethernet/freescale/enetc/enetc.c packets += priv->rx_ring[i]->stats.packets; rx_ring 1495 drivers/net/ethernet/freescale/enetc/enetc.c bytes += priv->rx_ring[i]->stats.bytes; rx_ring 1661 drivers/net/ethernet/freescale/enetc/enetc.c bdr = &v->rx_ring; rx_ring 1666 drivers/net/ethernet/freescale/enetc/enetc.c priv->rx_ring[i] = bdr; rx_ring 1693 drivers/net/ethernet/freescale/enetc/enetc.c priv->rx_ring[i] = NULL; rx_ring 161 drivers/net/ethernet/freescale/enetc/enetc.h struct enetc_bdr rx_ring ____cacheline_aligned_in_smp; rx_ring 192 drivers/net/ethernet/freescale/enetc/enetc.h struct enetc_bdr *rx_ring[16]; rx_ring 262 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c data[o++] = priv->rx_ring[i]->stats.packets; rx_ring 263 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs; rx_ring 377 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { rx_ring 390 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); rx_ring 395 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c for (; i < fep->rx_ring; i++, bdp++) { rx_ring 397 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); rx_ring 436 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { rx_ring 948 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c fpi->rx_ring = RX_RING_SIZE; rx_ring 986 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c (fpi->rx_ring + fpi->tx_ring) + rx_ring 1009 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; rx_ring 1010 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring + rx_ring 1025 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; rx_ring 1028 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c fep->rx_ring = fpi->rx_ring; rx_ring 130 drivers/net/ethernet/freescale/fs_enet/fs_enet.h int rx_ring, tx_ring; rx_ring 151 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c (fpi->tx_ring + fpi->rx_ring) * rx_ring 167 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), rx_ring 264 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; rx_ring 135 drivers/net/ethernet/freescale/fs_enet/mac-fec.c (fpi->tx_ring + fpi->rx_ring) * rx_ring 150 drivers/net/ethernet/freescale/fs_enet/mac-fec.c dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) rx_ring 273 drivers/net/ethernet/freescale/fs_enet/mac-fec.c tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; rx_ring 137 drivers/net/ethernet/freescale/fs_enet/mac-scc.c fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) * rx_ring 253 drivers/net/ethernet/freescale/fs_enet/mac-scc.c fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring); rx_ring 249 drivers/net/ethernet/hisilicon/hns/hnae.c ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR); rx_ring 270 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_fini_ring(&q->rx_ring); rx_ring 356 drivers/net/ethernet/hisilicon/hns/hnae.h struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; rx_ring 671 drivers/net/ethernet/hisilicon/hns/hnae.h ring = &h->qs[i]->rx_ring; rx_ring 686 drivers/net/ethernet/hisilicon/hns/hnae.h ring = &h->qs[i]->rx_ring; rx_ring 116 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; rx_ring 320 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c q->rx_ring.buf_size = rx_buf_size; rx_ring 668 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c rx_bytes += queue->rx_ring.stats.rx_bytes; rx_ring 669 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c rx_packets += queue->rx_ring.stats.rx_pkts; rx_ring 671 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c rx_errors += queue->rx_ring.stats.err_pkt_len rx_ring 672 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c + queue->rx_ring.stats.l2_err rx_ring 673 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c + queue->rx_ring.stats.l3l4_csum_err; rx_ring 250 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; rx_ring 451 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring = &q->rx_ring; rx_ring 878 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[16] = queue->rx_ring.stats.rx_pkts; rx_ring 879 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[17] = queue->rx_ring.stats.rx_bytes; rx_ring 880 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; rx_ring 881 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[19] = queue->rx_ring.stats.io_err_cnt; rx_ring 882 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; rx_ring 883 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; rx_ring 884 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; rx_ring 885 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[23] = queue->rx_ring.stats.err_pkt_len; rx_ring 886 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[24] = queue->rx_ring.stats.non_vld_descs; rx_ring 887 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[25] = queue->rx_ring.stats.err_bd_num; rx_ring 888 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[26] = queue->rx_ring.stats.l2_err; rx_ring 889 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; rx_ring 1655 drivers/net/ethernet/hisilicon/hns/hns_enet.c ring = &h->qs[i]->rx_ring; rx_ring 1908 drivers/net/ethernet/hisilicon/hns/hns_enet.c rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; rx_ring 1909 drivers/net/ethernet/hisilicon/hns/hns_enet.c rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; rx_ring 2011 drivers/net/ethernet/hisilicon/hns/hns_enet.c i, h->qs[i]->rx_ring.next_to_clean); rx_ring 2013 drivers/net/ethernet/hisilicon/hns/hns_enet.c i, h->qs[i]->rx_ring.next_to_use); rx_ring 2134 drivers/net/ethernet/hisilicon/hns/hns_enet.c rd->ring = &h->qs[i - h->q_num]->rx_ring; rx_ring 680 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c param->rx_pending = queue->rx_ring.desc_num; rx_ring 3196 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hns3_enet_ring *rx_ring; rx_ring 3229 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c rx_ring = tqp_vector->rx_group.ring; rx_ring 3230 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (!tx_ring && rx_ring) { rx_ring 3232 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c cur_chain->tqp_index = rx_ring->tqp->tqp_index; rx_ring 3238 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c rx_ring = rx_ring->next; rx_ring 3241 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c while (rx_ring) { rx_ring 3247 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c chain->tqp_index = rx_ring->tqp->tqp_index; rx_ring 3255 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c rx_ring = rx_ring->next; rx_ring 4178 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hns3_enet_ring *rx_ring; rx_ring 4196 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; rx_ring 4197 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_init_ring_hw(rx_ring); rx_ring 4198 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = hns3_clear_rx_ring(rx_ring); rx_ring 4205 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (j = 0; j < rx_ring->desc_num; j++) rx_ring 4206 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_reuse_buffer(rx_ring, j); rx_ring 4208 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c rx_ring->next_to_clean = 0; rx_ring 4209 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c rx_ring->next_to_use = 0; rx_ring 250 drivers/net/ethernet/intel/e1000/e1000.h struct e1000_rx_ring *rx_ring, rx_ring 253 drivers/net/ethernet/intel/e1000/e1000.h struct e1000_rx_ring *rx_ring, rx_ring 255 drivers/net/ethernet/intel/e1000/e1000.h struct e1000_rx_ring *rx_ring; /* One per active queue */ rx_ring 550 drivers/net/ethernet/intel/e1000/e1000_ethtool.c struct e1000_rx_ring *rxdr = adapter->rx_ring; rx_ring 580 drivers/net/ethernet/intel/e1000/e1000_ethtool.c rx_old = adapter->rx_ring; rx_ring 594 drivers/net/ethernet/intel/e1000/e1000_ethtool.c adapter->rx_ring = rxdr; rx_ring 624 drivers/net/ethernet/intel/e1000/e1000_ethtool.c adapter->rx_ring = rx_old; rx_ring 628 drivers/net/ethernet/intel/e1000/e1000_ethtool.c adapter->rx_ring = rxdr; rx_ring 641 drivers/net/ethernet/intel/e1000/e1000_ethtool.c adapter->rx_ring = rx_old; rx_ring 83 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring); rx_ring 102 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring); rx_ring 116 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, rx_ring 119 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, rx_ring 122 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, rx_ring 127 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, rx_ring 130 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, rx_ring 382 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *ring = &adapter->rx_ring[i]; rx_ring 1231 drivers/net/ethernet/intel/e1000/e1000_main.c kfree(adapter->rx_ring); rx_ring 1272 drivers/net/ethernet/intel/e1000/e1000_main.c kfree(adapter->rx_ring); rx_ring 1331 drivers/net/ethernet/intel/e1000/e1000_main.c adapter->rx_ring = kcalloc(adapter->num_rx_queues, rx_ring 1333 drivers/net/ethernet/intel/e1000/e1000_main.c if (!adapter->rx_ring) { rx_ring 1760 drivers/net/ethernet/intel/e1000/e1000_main.c err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); rx_ring 1765 drivers/net/ethernet/intel/e1000/e1000_main.c &adapter->rx_ring[i]); rx_ring 1853 drivers/net/ethernet/intel/e1000/e1000_main.c rdlen = adapter->rx_ring[0].count * rx_ring 1858 drivers/net/ethernet/intel/e1000/e1000_main.c rdlen = adapter->rx_ring[0].count * rx_ring 1883 drivers/net/ethernet/intel/e1000/e1000_main.c rdba = adapter->rx_ring[0].dma; rx_ring 1889 drivers/net/ethernet/intel/e1000/e1000_main.c adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? rx_ring 1891 drivers/net/ethernet/intel/e1000/e1000_main.c adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? rx_ring 2026 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring) rx_ring 2030 drivers/net/ethernet/intel/e1000/e1000_main.c e1000_clean_rx_ring(adapter, rx_ring); rx_ring 2032 drivers/net/ethernet/intel/e1000/e1000_main.c vfree(rx_ring->buffer_info); rx_ring 2033 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->buffer_info = NULL; rx_ring 2035 drivers/net/ethernet/intel/e1000/e1000_main.c dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring 2036 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->dma); rx_ring 2038 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->desc = NULL; rx_ring 2052 drivers/net/ethernet/intel/e1000/e1000_main.c e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); rx_ring 2078 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring) rx_ring 2087 drivers/net/ethernet/intel/e1000/e1000_main.c for (i = 0; i < rx_ring->count; i++) { rx_ring 2088 drivers/net/ethernet/intel/e1000/e1000_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 2114 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->rx_skb_top = NULL; rx_ring 2116 drivers/net/ethernet/intel/e1000/e1000_main.c size = sizeof(struct e1000_rx_buffer) * rx_ring->count; rx_ring 2117 drivers/net/ethernet/intel/e1000/e1000_main.c memset(rx_ring->buffer_info, 0, size); rx_ring 2120 drivers/net/ethernet/intel/e1000/e1000_main.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 2122 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->next_to_clean = 0; rx_ring 2123 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->next_to_use = 0; rx_ring 2125 drivers/net/ethernet/intel/e1000/e1000_main.c writel(0, hw->hw_addr + rx_ring->rdh); rx_ring 2126 drivers/net/ethernet/intel/e1000/e1000_main.c writel(0, hw->hw_addr + rx_ring->rdt); rx_ring 2138 drivers/net/ethernet/intel/e1000/e1000_main.c e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); rx_ring 2179 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *ring = &adapter->rx_ring[0]; rx_ring 3356 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring = adapter->rx_ring; rx_ring 3443 drivers/net/ethernet/intel/e1000/e1000_main.c for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { rx_ring 3444 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); rx_ring 3445 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; rx_ring 3450 drivers/net/ethernet/intel/e1000/e1000_main.c if (i == rx_ring->next_to_use) rx_ring 3452 drivers/net/ethernet/intel/e1000/e1000_main.c else if (i == rx_ring->next_to_clean) rx_ring 3798 drivers/net/ethernet/intel/e1000/e1000_main.c adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); rx_ring 4119 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, rx_ring 4132 drivers/net/ethernet/intel/e1000/e1000_main.c i = rx_ring->next_to_clean; rx_ring 4133 drivers/net/ethernet/intel/e1000/e1000_main.c rx_desc = E1000_RX_DESC(*rx_ring, i); rx_ring 4134 drivers/net/ethernet/intel/e1000/e1000_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 4147 drivers/net/ethernet/intel/e1000/e1000_main.c if (++i == rx_ring->count) rx_ring 4150 drivers/net/ethernet/intel/e1000/e1000_main.c next_rxd = E1000_RX_DESC(*rx_ring, i); rx_ring 4153 drivers/net/ethernet/intel/e1000/e1000_main.c next_buffer = &rx_ring->buffer_info[i]; rx_ring 4178 drivers/net/ethernet/intel/e1000/e1000_main.c dev_kfree_skb(rx_ring->rx_skb_top); rx_ring 4179 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->rx_skb_top = NULL; rx_ring 4184 drivers/net/ethernet/intel/e1000/e1000_main.c #define rxtop rx_ring->rx_skb_top rx_ring 4287 drivers/net/ethernet/intel/e1000/e1000_main.c adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); rx_ring 4295 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->next_to_clean = i; rx_ring 4297 drivers/net/ethernet/intel/e1000/e1000_main.c cleaned_count = E1000_DESC_UNUSED(rx_ring); rx_ring 4299 drivers/net/ethernet/intel/e1000/e1000_main.c adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); rx_ring 4340 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, rx_ring 4353 drivers/net/ethernet/intel/e1000/e1000_main.c i = rx_ring->next_to_clean; rx_ring 4354 drivers/net/ethernet/intel/e1000/e1000_main.c rx_desc = E1000_RX_DESC(*rx_ring, i); rx_ring 4355 drivers/net/ethernet/intel/e1000/e1000_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 4390 drivers/net/ethernet/intel/e1000/e1000_main.c if (++i == rx_ring->count) rx_ring 4393 drivers/net/ethernet/intel/e1000/e1000_main.c next_rxd = E1000_RX_DESC(*rx_ring, i); rx_ring 4396 drivers/net/ethernet/intel/e1000/e1000_main.c next_buffer = &rx_ring->buffer_info[i]; rx_ring 4460 drivers/net/ethernet/intel/e1000/e1000_main.c adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); rx_ring 4468 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->next_to_clean = i; rx_ring 4470 drivers/net/ethernet/intel/e1000/e1000_main.c cleaned_count = E1000_DESC_UNUSED(rx_ring); rx_ring 4472 drivers/net/ethernet/intel/e1000/e1000_main.c adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); rx_ring 4489 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, int cleaned_count) rx_ring 4496 drivers/net/ethernet/intel/e1000/e1000_main.c i = rx_ring->next_to_use; rx_ring 4497 drivers/net/ethernet/intel/e1000/e1000_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 4523 drivers/net/ethernet/intel/e1000/e1000_main.c rx_desc = E1000_RX_DESC(*rx_ring, i); rx_ring 4526 drivers/net/ethernet/intel/e1000/e1000_main.c if (unlikely(++i == rx_ring->count)) rx_ring 4528 drivers/net/ethernet/intel/e1000/e1000_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 4531 drivers/net/ethernet/intel/e1000/e1000_main.c if (likely(rx_ring->next_to_use != i)) { rx_ring 4532 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->next_to_use = i; rx_ring 4534 drivers/net/ethernet/intel/e1000/e1000_main.c i = (rx_ring->count - 1); rx_ring 4542 drivers/net/ethernet/intel/e1000/e1000_main.c writel(i, adapter->hw.hw_addr + rx_ring->rdt); rx_ring 4551 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_rx_ring *rx_ring, rx_ring 4561 drivers/net/ethernet/intel/e1000/e1000_main.c i = rx_ring->next_to_use; rx_ring 4562 drivers/net/ethernet/intel/e1000/e1000_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 4638 drivers/net/ethernet/intel/e1000/e1000_main.c rx_desc = E1000_RX_DESC(*rx_ring, i); rx_ring 4641 drivers/net/ethernet/intel/e1000/e1000_main.c if (unlikely(++i == rx_ring->count)) rx_ring 4643 drivers/net/ethernet/intel/e1000/e1000_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 4646 drivers/net/ethernet/intel/e1000/e1000_main.c if (likely(rx_ring->next_to_use != i)) { rx_ring 4647 drivers/net/ethernet/intel/e1000/e1000_main.c rx_ring->next_to_use = i; rx_ring 4649 drivers/net/ethernet/intel/e1000/e1000_main.c i = (rx_ring->count - 1); rx_ring 4657 drivers/net/ethernet/intel/e1000/e1000_main.c writel(i, hw->hw_addr + rx_ring->rdt); rx_ring 284 drivers/net/ethernet/intel/e1000/e1000_param.c struct e1000_rx_ring *rx_ring = adapter->rx_ring; rx_ring 302 drivers/net/ethernet/intel/e1000/e1000_param.c rx_ring->count = RxDescriptors[bd]; rx_ring 303 drivers/net/ethernet/intel/e1000/e1000_param.c e1000_validate_option(&rx_ring->count, &opt, adapter); rx_ring 304 drivers/net/ethernet/intel/e1000/e1000_param.c rx_ring->count = ALIGN(rx_ring->count, rx_ring 307 drivers/net/ethernet/intel/e1000/e1000_param.c rx_ring->count = opt.def; rx_ring 310 drivers/net/ethernet/intel/e1000/e1000_param.c rx_ring[i].count = rx_ring->count; rx_ring 255 drivers/net/ethernet/intel/e1000e/e1000.h struct e1000_ring *rx_ring; rx_ring 694 drivers/net/ethernet/intel/e1000e/ethtool.c adapter->rx_ring->count = new_rx_count; rx_ring 735 drivers/net/ethernet/intel/e1000e/ethtool.c memcpy(temp_rx, adapter->rx_ring, size); rx_ring 749 drivers/net/ethernet/intel/e1000e/ethtool.c e1000e_free_rx_resources(adapter->rx_ring); rx_ring 750 drivers/net/ethernet/intel/e1000e/ethtool.c memcpy(adapter->rx_ring, temp_rx, size); rx_ring 1115 drivers/net/ethernet/intel/e1000e/ethtool.c struct e1000_ring *rx_ring = &adapter->test_rx_ring; rx_ring 1133 drivers/net/ethernet/intel/e1000e/ethtool.c if (rx_ring->desc && rx_ring->buffer_info) { rx_ring 1134 drivers/net/ethernet/intel/e1000e/ethtool.c for (i = 0; i < rx_ring->count; i++) { rx_ring 1135 drivers/net/ethernet/intel/e1000e/ethtool.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1150 drivers/net/ethernet/intel/e1000e/ethtool.c if (rx_ring->desc) { rx_ring 1151 drivers/net/ethernet/intel/e1000e/ethtool.c dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring 1152 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->dma); rx_ring 1153 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->desc = NULL; rx_ring 1158 drivers/net/ethernet/intel/e1000e/ethtool.c kfree(rx_ring->buffer_info); rx_ring 1159 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->buffer_info = NULL; rx_ring 1165 drivers/net/ethernet/intel/e1000e/ethtool.c struct e1000_ring *rx_ring = &adapter->test_rx_ring; rx_ring 1235 drivers/net/ethernet/intel/e1000e/ethtool.c if (!rx_ring->count) rx_ring 1236 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->count = E1000_DEFAULT_RXD; rx_ring 1238 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->buffer_info = kcalloc(rx_ring->count, rx_ring 1240 drivers/net/ethernet/intel/e1000e/ethtool.c if (!rx_ring->buffer_info) { rx_ring 1245 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended); rx_ring 1246 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, rx_ring 1247 drivers/net/ethernet/intel/e1000e/ethtool.c &rx_ring->dma, GFP_KERNEL); rx_ring 1248 drivers/net/ethernet/intel/e1000e/ethtool.c if (!rx_ring->desc) { rx_ring 1252 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->next_to_use = 0; rx_ring 1253 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->next_to_clean = 0; rx_ring 1258 drivers/net/ethernet/intel/e1000e/ethtool.c ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF)); rx_ring 1259 drivers/net/ethernet/intel/e1000e/ethtool.c ew32(RDBAH(0), ((u64)rx_ring->dma >> 32)); rx_ring 1260 drivers/net/ethernet/intel/e1000e/ethtool.c ew32(RDLEN(0), rx_ring->size); rx_ring 1270 drivers/net/ethernet/intel/e1000e/ethtool.c for (i = 0; i < rx_ring->count; i++) { rx_ring 1280 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->buffer_info[i].skb = skb; rx_ring 1281 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->buffer_info[i].dma = rx_ring 1285 drivers/net/ethernet/intel/e1000e/ethtool.c rx_ring->buffer_info[i].dma)) { rx_ring 1289 drivers/net/ethernet/intel/e1000e/ethtool.c rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_ring 1291 drivers/net/ethernet/intel/e1000e/ethtool.c cpu_to_le64(rx_ring->buffer_info[i].dma); rx_ring 1628 drivers/net/ethernet/intel/e1000e/ethtool.c struct e1000_ring *rx_ring = &adapter->test_rx_ring; rx_ring 1638 drivers/net/ethernet/intel/e1000e/ethtool.c ew32(RDT(0), rx_ring->count - 1); rx_ring 1645 drivers/net/ethernet/intel/e1000e/ethtool.c if (rx_ring->count <= tx_ring->count) rx_ring 1648 drivers/net/ethernet/intel/e1000e/ethtool.c lc = ((rx_ring->count / 64) * 2) + 1; rx_ring 1674 drivers/net/ethernet/intel/e1000e/ethtool.c buffer_info = &rx_ring->buffer_info[l]; rx_ring 1685 drivers/net/ethernet/intel/e1000e/ethtool.c if (l == rx_ring->count) rx_ring 208 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_ring *rx_ring = adapter->rx_ring; rx_ring 324 drivers/net/ethernet/intel/e1000e/netdev.c 0, rx_ring->next_to_use, rx_ring->next_to_clean); rx_ring 360 drivers/net/ethernet/intel/e1000e/netdev.c for (i = 0; i < rx_ring->count; i++) { rx_ring 362 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 363 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); rx_ring 368 drivers/net/ethernet/intel/e1000e/netdev.c if (i == rx_ring->next_to_use) rx_ring 370 drivers/net/ethernet/intel/e1000e/netdev.c else if (i == rx_ring->next_to_clean) rx_ring 426 drivers/net/ethernet/intel/e1000e/netdev.c for (i = 0; i < rx_ring->count; i++) { rx_ring 429 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 430 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_ring 434 drivers/net/ethernet/intel/e1000e/netdev.c if (i == rx_ring->next_to_use) rx_ring 436 drivers/net/ethernet/intel/e1000e/netdev.c else if (i == rx_ring->next_to_clean) rx_ring 606 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) rx_ring 608 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 612 drivers/net/ethernet/intel/e1000e/netdev.c writel(i, rx_ring->tail); rx_ring 614 drivers/net/ethernet/intel/e1000e/netdev.c if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { rx_ring 644 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, rx_ring 647 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 656 drivers/net/ethernet/intel/e1000e/netdev.c i = rx_ring->next_to_use; rx_ring 657 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 684 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_ring 695 drivers/net/ethernet/intel/e1000e/netdev.c e1000e_update_rdt_wa(rx_ring, i); rx_ring 697 drivers/net/ethernet/intel/e1000e/netdev.c writel(i, rx_ring->tail); rx_ring 700 drivers/net/ethernet/intel/e1000e/netdev.c if (i == rx_ring->count) rx_ring 702 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 705 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_use = i; rx_ring 712 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, rx_ring 715 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 724 drivers/net/ethernet/intel/e1000e/netdev.c i = rx_ring->next_to_use; rx_ring 725 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 728 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc = E1000_RX_DESC_PS(*rx_ring, i); rx_ring 795 drivers/net/ethernet/intel/e1000e/netdev.c e1000e_update_rdt_wa(rx_ring, i << 1); rx_ring 797 drivers/net/ethernet/intel/e1000e/netdev.c writel(i << 1, rx_ring->tail); rx_ring 801 drivers/net/ethernet/intel/e1000e/netdev.c if (i == rx_ring->count) rx_ring 803 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 807 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_use = i; rx_ring 816 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, rx_ring 819 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 828 drivers/net/ethernet/intel/e1000e/netdev.c i = rx_ring->next_to_use; rx_ring 829 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 867 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_ring 870 drivers/net/ethernet/intel/e1000e/netdev.c if (unlikely(++i == rx_ring->count)) rx_ring 872 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 875 drivers/net/ethernet/intel/e1000e/netdev.c if (likely(rx_ring->next_to_use != i)) { rx_ring 876 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_use = i; rx_ring 878 drivers/net/ethernet/intel/e1000e/netdev.c i = (rx_ring->count - 1); rx_ring 887 drivers/net/ethernet/intel/e1000e/netdev.c e1000e_update_rdt_wa(rx_ring, i); rx_ring 889 drivers/net/ethernet/intel/e1000e/netdev.c writel(i, rx_ring->tail); rx_ring 907 drivers/net/ethernet/intel/e1000e/netdev.c static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, rx_ring 910 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 922 drivers/net/ethernet/intel/e1000e/netdev.c i = rx_ring->next_to_clean; rx_ring 923 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_ring 925 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 941 drivers/net/ethernet/intel/e1000e/netdev.c if (i == rx_ring->count) rx_ring 943 drivers/net/ethernet/intel/e1000e/netdev.c next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); rx_ring 946 drivers/net/ethernet/intel/e1000e/netdev.c next_buffer = &rx_ring->buffer_info[i]; rx_ring 1033 drivers/net/ethernet/intel/e1000e/netdev.c adapter->alloc_rx_buf(rx_ring, cleaned_count, rx_ring 1044 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_clean = i; rx_ring 1046 drivers/net/ethernet/intel/e1000e/netdev.c cleaned_count = e1000_desc_unused(rx_ring); rx_ring 1048 drivers/net/ethernet/intel/e1000e/netdev.c adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); rx_ring 1301 drivers/net/ethernet/intel/e1000e/netdev.c static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, rx_ring 1304 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 1318 drivers/net/ethernet/intel/e1000e/netdev.c i = rx_ring->next_to_clean; rx_ring 1319 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc = E1000_RX_DESC_PS(*rx_ring, i); rx_ring 1321 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1334 drivers/net/ethernet/intel/e1000e/netdev.c if (i == rx_ring->count) rx_ring 1336 drivers/net/ethernet/intel/e1000e/netdev.c next_rxd = E1000_RX_DESC_PS(*rx_ring, i); rx_ring 1339 drivers/net/ethernet/intel/e1000e/netdev.c next_buffer = &rx_ring->buffer_info[i]; rx_ring 1465 drivers/net/ethernet/intel/e1000e/netdev.c adapter->alloc_rx_buf(rx_ring, cleaned_count, rx_ring 1476 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_clean = i; rx_ring 1478 drivers/net/ethernet/intel/e1000e/netdev.c cleaned_count = e1000_desc_unused(rx_ring); rx_ring 1480 drivers/net/ethernet/intel/e1000e/netdev.c adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); rx_ring 1506 drivers/net/ethernet/intel/e1000e/netdev.c static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, rx_ring 1509 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 1521 drivers/net/ethernet/intel/e1000e/netdev.c i = rx_ring->next_to_clean; rx_ring 1522 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_ring 1524 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1538 drivers/net/ethernet/intel/e1000e/netdev.c if (i == rx_ring->count) rx_ring 1540 drivers/net/ethernet/intel/e1000e/netdev.c next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); rx_ring 1543 drivers/net/ethernet/intel/e1000e/netdev.c next_buffer = &rx_ring->buffer_info[i]; rx_ring 1560 drivers/net/ethernet/intel/e1000e/netdev.c if (rx_ring->rx_skb_top) rx_ring 1561 drivers/net/ethernet/intel/e1000e/netdev.c dev_kfree_skb_irq(rx_ring->rx_skb_top); rx_ring 1562 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->rx_skb_top = NULL; rx_ring 1565 drivers/net/ethernet/intel/e1000e/netdev.c #define rxtop (rx_ring->rx_skb_top) rx_ring 1647 drivers/net/ethernet/intel/e1000e/netdev.c adapter->alloc_rx_buf(rx_ring, cleaned_count, rx_ring 1658 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_clean = i; rx_ring 1660 drivers/net/ethernet/intel/e1000e/netdev.c cleaned_count = e1000_desc_unused(rx_ring); rx_ring 1662 drivers/net/ethernet/intel/e1000e/netdev.c adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); rx_ring 1673 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) rx_ring 1675 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 1682 drivers/net/ethernet/intel/e1000e/netdev.c for (i = 0; i < rx_ring->count; i++) { rx_ring 1683 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1722 drivers/net/ethernet/intel/e1000e/netdev.c if (rx_ring->rx_skb_top) { rx_ring 1723 drivers/net/ethernet/intel/e1000e/netdev.c dev_kfree_skb(rx_ring->rx_skb_top); rx_ring 1724 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->rx_skb_top = NULL; rx_ring 1728 drivers/net/ethernet/intel/e1000e/netdev.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 1730 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_clean = 0; rx_ring 1731 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_use = 0; rx_ring 1941 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_ring *rx_ring = adapter->rx_ring; rx_ring 1946 drivers/net/ethernet/intel/e1000e/netdev.c if (rx_ring->set_itr) { rx_ring 1947 drivers/net/ethernet/intel/e1000e/netdev.c u32 itr = rx_ring->itr_val ? rx_ring 1948 drivers/net/ethernet/intel/e1000e/netdev.c 1000000000 / (rx_ring->itr_val * 256) : 0; rx_ring 1950 drivers/net/ethernet/intel/e1000e/netdev.c writel(itr, rx_ring->itr_register); rx_ring 1951 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->set_itr = 0; rx_ring 1971 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_ring *rx_ring = adapter->rx_ring; rx_ring 1987 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->ims_val = E1000_IMS_RXQ0; rx_ring 1988 drivers/net/ethernet/intel/e1000e/netdev.c adapter->eiac_mask |= rx_ring->ims_val; rx_ring 1989 drivers/net/ethernet/intel/e1000e/netdev.c if (rx_ring->itr_val) rx_ring 1990 drivers/net/ethernet/intel/e1000e/netdev.c writel(1000000000 / (rx_ring->itr_val * 256), rx_ring 1991 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->itr_register); rx_ring 1993 drivers/net/ethernet/intel/e1000e/netdev.c writel(1, rx_ring->itr_register); rx_ring 2010 drivers/net/ethernet/intel/e1000e/netdev.c if (rx_ring->itr_val) rx_ring 2011 drivers/net/ethernet/intel/e1000e/netdev.c writel(1000000000 / (rx_ring->itr_val * 256), rx_ring 2107 drivers/net/ethernet/intel/e1000e/netdev.c snprintf(adapter->rx_ring->name, rx_ring 2108 drivers/net/ethernet/intel/e1000e/netdev.c sizeof(adapter->rx_ring->name) - 1, rx_ring 2111 drivers/net/ethernet/intel/e1000e/netdev.c memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); rx_ring 2113 drivers/net/ethernet/intel/e1000e/netdev.c e1000_intr_msix_rx, 0, adapter->rx_ring->name, rx_ring 2117 drivers/net/ethernet/intel/e1000e/netdev.c adapter->rx_ring->itr_register = adapter->hw.hw_addr + rx_ring 2119 drivers/net/ethernet/intel/e1000e/netdev.c adapter->rx_ring->itr_val = adapter->itr; rx_ring 2356 drivers/net/ethernet/intel/e1000e/netdev.c int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) rx_ring 2358 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 2362 drivers/net/ethernet/intel/e1000e/netdev.c size = sizeof(struct e1000_buffer) * rx_ring->count; rx_ring 2363 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->buffer_info = vzalloc(size); rx_ring 2364 drivers/net/ethernet/intel/e1000e/netdev.c if (!rx_ring->buffer_info) rx_ring 2367 drivers/net/ethernet/intel/e1000e/netdev.c for (i = 0; i < rx_ring->count; i++) { rx_ring 2368 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 2379 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->size = rx_ring->count * desc_len; rx_ring 2380 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 2382 drivers/net/ethernet/intel/e1000e/netdev.c err = e1000_alloc_ring_dma(adapter, rx_ring); rx_ring 2386 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_clean = 0; rx_ring 2387 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->next_to_use = 0; rx_ring 2388 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->rx_skb_top = NULL; rx_ring 2393 drivers/net/ethernet/intel/e1000e/netdev.c for (i = 0; i < rx_ring->count; i++) { rx_ring 2394 drivers/net/ethernet/intel/e1000e/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 2398 drivers/net/ethernet/intel/e1000e/netdev.c vfree(rx_ring->buffer_info); rx_ring 2456 drivers/net/ethernet/intel/e1000e/netdev.c void e1000e_free_rx_resources(struct e1000_ring *rx_ring) rx_ring 2458 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = rx_ring->adapter; rx_ring 2462 drivers/net/ethernet/intel/e1000e/netdev.c e1000_clean_rx_ring(rx_ring); rx_ring 2464 drivers/net/ethernet/intel/e1000e/netdev.c for (i = 0; i < rx_ring->count; i++) rx_ring 2465 drivers/net/ethernet/intel/e1000e/netdev.c kfree(rx_ring->buffer_info[i].ps_pages); rx_ring 2467 drivers/net/ethernet/intel/e1000e/netdev.c vfree(rx_ring->buffer_info); rx_ring 2468 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->buffer_info = NULL; rx_ring 2470 drivers/net/ethernet/intel/e1000e/netdev.c dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring 2471 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->dma); rx_ring 2472 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->desc = NULL; rx_ring 2591 drivers/net/ethernet/intel/e1000e/netdev.c adapter->rx_ring->itr_val = new_itr; rx_ring 2593 drivers/net/ethernet/intel/e1000e/netdev.c adapter->rx_ring->set_itr = 1; rx_ring 2637 drivers/net/ethernet/intel/e1000e/netdev.c adapter->rx_ring = kzalloc(size, GFP_KERNEL); rx_ring 2638 drivers/net/ethernet/intel/e1000e/netdev.c if (!adapter->rx_ring) rx_ring 2640 drivers/net/ethernet/intel/e1000e/netdev.c adapter->rx_ring->count = adapter->rx_ring_count; rx_ring 2641 drivers/net/ethernet/intel/e1000e/netdev.c adapter->rx_ring->adapter = adapter; rx_ring 2646 drivers/net/ethernet/intel/e1000e/netdev.c kfree(adapter->rx_ring); rx_ring 2667 drivers/net/ethernet/intel/e1000e/netdev.c (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) rx_ring 2670 drivers/net/ethernet/intel/e1000e/netdev.c adapter->clean_rx(adapter->rx_ring, &work_done, budget); rx_ring 2683 drivers/net/ethernet/intel/e1000e/netdev.c ew32(IMS, adapter->rx_ring->ims_val); rx_ring 3186 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_ring *rx_ring = adapter->rx_ring; rx_ring 3192 drivers/net/ethernet/intel/e1000e/netdev.c rdlen = rx_ring->count * rx_ring 3197 drivers/net/ethernet/intel/e1000e/netdev.c rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); rx_ring 3201 drivers/net/ethernet/intel/e1000e/netdev.c rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); rx_ring 3244 drivers/net/ethernet/intel/e1000e/netdev.c rdba = rx_ring->dma; rx_ring 3250 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); rx_ring 3251 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); rx_ring 3253 drivers/net/ethernet/intel/e1000e/netdev.c writel(0, rx_ring->head); rx_ring 3255 drivers/net/ethernet/intel/e1000e/netdev.c e1000e_update_rdt_wa(rx_ring, 0); rx_ring 3257 drivers/net/ethernet/intel/e1000e/netdev.c writel(0, rx_ring->tail); rx_ring 3744 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_ring *rx_ring = adapter->rx_ring; rx_ring 3757 drivers/net/ethernet/intel/e1000e/netdev.c adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); rx_ring 4306 drivers/net/ethernet/intel/e1000e/netdev.c e1000_clean_rx_ring(adapter->rx_ring); rx_ring 4617 drivers/net/ethernet/intel/e1000e/netdev.c err = e1000e_setup_rx_resources(adapter->rx_ring); rx_ring 4682 drivers/net/ethernet/intel/e1000e/netdev.c e1000e_free_rx_resources(adapter->rx_ring); rx_ring 4727 drivers/net/ethernet/intel/e1000e/netdev.c e1000e_free_rx_resources(adapter->rx_ring); rx_ring 5387 drivers/net/ethernet/intel/e1000e/netdev.c ew32(ICS, adapter->rx_ring->ims_val); rx_ring 7380 drivers/net/ethernet/intel/e1000e/netdev.c kfree(adapter->rx_ring); rx_ring 7446 drivers/net/ethernet/intel/e1000e/netdev.c kfree(adapter->rx_ring); rx_ring 340 drivers/net/ethernet/intel/fm10k/fm10k.h struct fm10k_ring *rx_ring[MAX_QUEUES]; rx_ring 488 drivers/net/ethernet/intel/fm10k/fm10k.h void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count); rx_ring 289 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c ring = interface->rx_ring[i]; rx_ring 552 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c interface->rx_ring[i]->count = new_rx_count; rx_ring 603 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c memcpy(&temp_ring[i], interface->rx_ring[i], rx_ring 618 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c fm10k_free_rx_resources(interface->rx_ring[i]); rx_ring 620 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c memcpy(interface->rx_ring[i], &temp_ring[i], rx_ring 70 drivers/net/ethernet/intel/fm10k/fm10k_main.c static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, rx_ring 83 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 88 drivers/net/ethernet/intel/fm10k/fm10k_main.c dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); rx_ring 93 drivers/net/ethernet/intel/fm10k/fm10k_main.c if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring 96 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 112 drivers/net/ethernet/intel/fm10k/fm10k_main.c void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) rx_ring 116 drivers/net/ethernet/intel/fm10k/fm10k_main.c u16 i = rx_ring->next_to_use; rx_ring 122 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_desc = FM10K_RX_DESC(rx_ring, i); rx_ring 123 drivers/net/ethernet/intel/fm10k/fm10k_main.c bi = &rx_ring->rx_buffer[i]; rx_ring 124 drivers/net/ethernet/intel/fm10k/fm10k_main.c i -= rx_ring->count; rx_ring 127 drivers/net/ethernet/intel/fm10k/fm10k_main.c if (!fm10k_alloc_mapped_page(rx_ring, bi)) rx_ring 139 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_desc = FM10K_RX_DESC(rx_ring, 0); rx_ring 140 drivers/net/ethernet/intel/fm10k/fm10k_main.c bi = rx_ring->rx_buffer; rx_ring 141 drivers/net/ethernet/intel/fm10k/fm10k_main.c i -= rx_ring->count; rx_ring 150 drivers/net/ethernet/intel/fm10k/fm10k_main.c i += rx_ring->count; rx_ring 152 drivers/net/ethernet/intel/fm10k/fm10k_main.c if (rx_ring->next_to_use != i) { rx_ring 154 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->next_to_use = i; rx_ring 157 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->next_to_alloc = i; rx_ring 167 drivers/net/ethernet/intel/fm10k/fm10k_main.c writel(i, rx_ring->tail); rx_ring 178 drivers/net/ethernet/intel/fm10k/fm10k_main.c static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, rx_ring 182 drivers/net/ethernet/intel/fm10k/fm10k_main.c u16 nta = rx_ring->next_to_alloc; rx_ring 184 drivers/net/ethernet/intel/fm10k/fm10k_main.c new_buff = &rx_ring->rx_buffer[nta]; rx_ring 188 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 194 drivers/net/ethernet/intel/fm10k/fm10k_main.c dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, rx_ring 299 drivers/net/ethernet/intel/fm10k/fm10k_main.c static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, rx_ring 307 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; rx_ring 322 drivers/net/ethernet/intel/fm10k/fm10k_main.c skb = napi_alloc_skb(&rx_ring->q_vector->napi, rx_ring 325 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 337 drivers/net/ethernet/intel/fm10k/fm10k_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 346 drivers/net/ethernet/intel/fm10k/fm10k_main.c fm10k_reuse_rx_page(rx_ring, rx_buffer); rx_ring 349 drivers/net/ethernet/intel/fm10k/fm10k_main.c dma_unmap_page(rx_ring->dev, rx_buffer->dma, rx_ring 414 drivers/net/ethernet/intel/fm10k/fm10k_main.c static void fm10k_type_trans(struct fm10k_ring *rx_ring, rx_ring 418 drivers/net/ethernet/intel/fm10k/fm10k_main.c struct net_device *dev = rx_ring->netdev; rx_ring 419 drivers/net/ethernet/intel/fm10k/fm10k_main.c struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); rx_ring 434 drivers/net/ethernet/intel/fm10k/fm10k_main.c skb_record_rx_queue(skb, rx_ring->queue_index); rx_ring 452 drivers/net/ethernet/intel/fm10k/fm10k_main.c static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, rx_ring 458 drivers/net/ethernet/intel/fm10k/fm10k_main.c fm10k_rx_hash(rx_ring, rx_desc, skb); rx_ring 460 drivers/net/ethernet/intel/fm10k/fm10k_main.c fm10k_rx_checksum(rx_ring, rx_desc, skb); rx_ring 471 drivers/net/ethernet/intel/fm10k/fm10k_main.c if ((vid & VLAN_VID_MASK) != rx_ring->vid) rx_ring 478 drivers/net/ethernet/intel/fm10k/fm10k_main.c fm10k_type_trans(rx_ring, rx_desc, skb); rx_ring 493 drivers/net/ethernet/intel/fm10k/fm10k_main.c static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, rx_ring 496 drivers/net/ethernet/intel/fm10k/fm10k_main.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 499 drivers/net/ethernet/intel/fm10k/fm10k_main.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 500 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->next_to_clean = ntc; rx_ring 502 drivers/net/ethernet/intel/fm10k/fm10k_main.c prefetch(FM10K_RX_DESC(rx_ring, ntc)); rx_ring 524 drivers/net/ethernet/intel/fm10k/fm10k_main.c static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, rx_ring 533 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.switch_errors++; rx_ring 535 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.drops++; rx_ring 537 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.pp_errors++; rx_ring 539 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.link_errors++; rx_ring 541 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.length_errors++; rx_ring 543 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->rx_stats.errors++; rx_ring 566 drivers/net/ethernet/intel/fm10k/fm10k_main.c struct fm10k_ring *rx_ring, rx_ring 569 drivers/net/ethernet/intel/fm10k/fm10k_main.c struct sk_buff *skb = rx_ring->skb; rx_ring 571 drivers/net/ethernet/intel/fm10k/fm10k_main.c u16 cleaned_count = fm10k_desc_unused(rx_ring); rx_ring 578 drivers/net/ethernet/intel/fm10k/fm10k_main.c fm10k_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 582 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 594 drivers/net/ethernet/intel/fm10k/fm10k_main.c skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); rx_ring 603 drivers/net/ethernet/intel/fm10k/fm10k_main.c if (fm10k_is_non_eop(rx_ring, rx_desc)) rx_ring 607 drivers/net/ethernet/intel/fm10k/fm10k_main.c if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { rx_ring 613 drivers/net/ethernet/intel/fm10k/fm10k_main.c total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); rx_ring 625 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->skb = skb; rx_ring 627 drivers/net/ethernet/intel/fm10k/fm10k_main.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 628 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->stats.packets += total_packets; rx_ring 629 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_ring->stats.bytes += total_bytes; rx_ring 630 drivers/net/ethernet/intel/fm10k/fm10k_main.c u64_stats_update_end(&rx_ring->syncp); rx_ring 1677 drivers/net/ethernet/intel/fm10k/fm10k_main.c interface->rx_ring[rxr_idx] = ring; rx_ring 1712 drivers/net/ethernet/intel/fm10k/fm10k_main.c interface->rx_ring[ring->queue_index] = NULL; rx_ring 1890 drivers/net/ethernet/intel/fm10k/fm10k_main.c interface->rx_ring[offset + i]->reg_idx = q_idx; rx_ring 1891 drivers/net/ethernet/intel/fm10k/fm10k_main.c interface->rx_ring[offset + i]->qos_pc = pc; rx_ring 1910 drivers/net/ethernet/intel/fm10k/fm10k_main.c interface->rx_ring[i]->reg_idx = i; rx_ring 83 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) rx_ring 85 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c struct device *dev = rx_ring->dev; rx_ring 88 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; rx_ring 90 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->rx_buffer = vzalloc(size); rx_ring 91 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c if (!rx_ring->rx_buffer) rx_ring 94 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c u64_stats_init(&rx_ring->syncp); rx_ring 97 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); rx_ring 98 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 100 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, rx_ring 101 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c &rx_ring->dma, GFP_KERNEL); rx_ring 102 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c if (!rx_ring->desc) rx_ring 107 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c vfree(rx_ring->rx_buffer); rx_ring 108 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->rx_buffer = NULL; rx_ring 127 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c err = fm10k_setup_rx_resources(interface->rx_ring[i]); rx_ring 140 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c fm10k_free_rx_resources(interface->rx_ring[i]); rx_ring 248 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring) rx_ring 253 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c if (!rx_ring->rx_buffer) rx_ring 256 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c dev_kfree_skb(rx_ring->skb); rx_ring 257 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->skb = NULL; rx_ring 260 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c for (i = 0; i < rx_ring->count; i++) { rx_ring 261 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i]; rx_ring 266 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c dma_unmap_page(rx_ring->dev, buffer->dma, rx_ring 273 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; rx_ring 274 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c memset(rx_ring->rx_buffer, 0, size); rx_ring 277 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 279 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->next_to_alloc = 0; rx_ring 280 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->next_to_clean = 0; rx_ring 281 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->next_to_use = 0; rx_ring 290 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c void fm10k_free_rx_resources(struct fm10k_ring *rx_ring) rx_ring 292 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c fm10k_clean_rx_ring(rx_ring); rx_ring 294 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c vfree(rx_ring->rx_buffer); rx_ring 295 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->rx_buffer = NULL; rx_ring 298 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c if (!rx_ring->desc) rx_ring 301 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring 302 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->desc, rx_ring->dma); rx_ring 304 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->desc = NULL; rx_ring 316 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c fm10k_clean_rx_ring(interface->rx_ring[i]); rx_ring 330 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c fm10k_free_rx_resources(interface->rx_ring[i]); rx_ring 937 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c struct fm10k_ring *rx_ring = interface->rx_ring[i]; rx_ring 938 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); rx_ring 941 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->vid |= FM10K_VLAN_CLEAR; rx_ring 943 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c rx_ring->vid &= ~FM10K_VLAN_CLEAR; rx_ring 1339 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c ring = READ_ONCE(interface->rx_ring[i]); rx_ring 1449 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c struct fm10k_ring *ring = interface->rx_ring[i]; rx_ring 582 drivers/net/ethernet/intel/fm10k/fm10k_pci.c struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]); rx_ring 584 drivers/net/ethernet/intel/fm10k/fm10k_pci.c if (!rx_ring) rx_ring 587 drivers/net/ethernet/intel/fm10k/fm10k_pci.c bytes += rx_ring->stats.bytes; rx_ring 588 drivers/net/ethernet/intel/fm10k/fm10k_pci.c pkts += rx_ring->stats.packets; rx_ring 589 drivers/net/ethernet/intel/fm10k/fm10k_pci.c alloc_failed += rx_ring->rx_stats.alloc_failed; rx_ring 590 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_csum_errors += rx_ring->rx_stats.csum_err; rx_ring 591 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_errors += rx_ring->rx_stats.errors; rx_ring 592 drivers/net/ethernet/intel/fm10k/fm10k_pci.c hw_csum_rx_good += rx_ring->rx_stats.csum_good; rx_ring 593 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_switch_errors += rx_ring->rx_stats.switch_errors; rx_ring 594 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_drops += rx_ring->rx_stats.drops; rx_ring 595 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_pp_errors += rx_ring->rx_stats.pp_errors; rx_ring 596 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_link_errors += rx_ring->rx_stats.link_errors; rx_ring 597 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_length_errors += rx_ring->rx_stats.length_errors; rx_ring 1073 drivers/net/ethernet/intel/fm10k/fm10k_pci.c struct fm10k_ring *ring = interface->rx_ring[i]; rx_ring 1167 drivers/net/ethernet/intel/fm10k/fm10k_pci.c fm10k_configure_rx_ring(interface, interface->rx_ring[i]); rx_ring 242 drivers/net/ethernet/intel/i40e/i40e_debugfs.c struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); rx_ring 244 drivers/net/ethernet/intel/i40e/i40e_debugfs.c if (!rx_ring) rx_ring 249 drivers/net/ethernet/intel/i40e/i40e_debugfs.c i, *rx_ring->state, rx_ring 250 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->queue_index, rx_ring 251 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->reg_idx); rx_ring 254 drivers/net/ethernet/intel/i40e/i40e_debugfs.c i, rx_ring->rx_buf_len); rx_ring 258 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->next_to_use, rx_ring 259 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->next_to_clean, rx_ring 260 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->ring_active); rx_ring 263 drivers/net/ethernet/intel/i40e/i40e_debugfs.c i, rx_ring->stats.packets, rx_ring 264 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->stats.bytes, rx_ring 265 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->rx_stats.non_eop_descs); rx_ring 269 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->rx_stats.alloc_page_failed, rx_ring 270 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->rx_stats.alloc_buff_failed); rx_ring 274 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->rx_stats.realloc_count, rx_ring 275 drivers/net/ethernet/intel/i40e/i40e_debugfs.c rx_ring->rx_stats.page_reuse_count); rx_ring 278 drivers/net/ethernet/intel/i40e/i40e_debugfs.c i, rx_ring->size); rx_ring 281 drivers/net/ethernet/intel/i40e/i40e_debugfs.c i, rx_ring->itr_setting, rx_ring 282 drivers/net/ethernet/intel/i40e/i40e_debugfs.c ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed"); rx_ring 2734 drivers/net/ethernet/intel/i40e/i40e_ethtool.c struct i40e_ring *rx_ring, *tx_ring; rx_ring 2748 drivers/net/ethernet/intel/i40e/i40e_ethtool.c rx_ring = vsi->rx_rings[queue]; rx_ring 2751 drivers/net/ethernet/intel/i40e/i40e_ethtool.c if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) rx_ring 2757 drivers/net/ethernet/intel/i40e/i40e_ethtool.c ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; rx_ring 2813 drivers/net/ethernet/intel/i40e/i40e_ethtool.c struct i40e_ring *rx_ring = vsi->rx_rings[queue]; rx_ring 2822 drivers/net/ethernet/intel/i40e/i40e_ethtool.c rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); rx_ring 2826 drivers/net/ethernet/intel/i40e/i40e_ethtool.c rx_ring->itr_setting |= I40E_ITR_DYNAMIC; rx_ring 2828 drivers/net/ethernet/intel/i40e/i40e_ethtool.c rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; rx_ring 2835 drivers/net/ethernet/intel/i40e/i40e_ethtool.c q_vector = rx_ring->q_vector; rx_ring 2836 drivers/net/ethernet/intel/i40e/i40e_ethtool.c q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); rx_ring 3425 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_ring *tx_ring, *rx_ring; rx_ring 3432 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring = vsi->rx_rings[i]; rx_ring 3434 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->dcb_tc = 0; rx_ring 3447 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring = vsi->rx_rings[i]; rx_ring 3449 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->dcb_tc = n; rx_ring 4165 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; rx_ring 4182 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->q_vector = q_vector; rx_ring 4183 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->next = q_vector->rx.ring; rx_ring 4184 drivers/net/ethernet/intel/i40e/i40e_main.c q_vector->rx.ring = rx_ring; rx_ring 5583 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_ring *tx_ring, *rx_ring; rx_ring 5590 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring = vsi->rx_rings[pf_q]; rx_ring 5591 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->ch = NULL; rx_ring 5958 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_ring *tx_ring, *rx_ring; rx_ring 5970 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring = vsi->rx_rings[pf_q]; rx_ring 5971 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->ch = ch; rx_ring 6953 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_ring *tx_ring, *rx_ring; rx_ring 6961 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring = vsi->rx_rings[pf_q]; rx_ring 6962 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->ch = NULL; rx_ring 7041 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_ring *tx_ring, *rx_ring; rx_ring 7051 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring = vsi->rx_rings[pf_q]; rx_ring 7052 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->ch = ch; rx_ring 7069 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_ring *rx_ring; rx_ring 7073 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring = vsi->rx_rings[pf_q]; rx_ring 7074 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->netdev = NULL; rx_ring 530 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_fd_handle_status(struct i40e_ring *rx_ring, rx_ring 533 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct i40e_pf *pf = rx_ring->vsi->back; rx_ring 1205 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, rx_ring 1209 drivers/net/ethernet/intel/i40e/i40e_txrx.c u16 nta = rx_ring->next_to_alloc; rx_ring 1211 drivers/net/ethernet/intel/i40e/i40e_txrx.c new_buff = &rx_ring->rx_bi[nta]; rx_ring 1215 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 1223 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_stats.page_reuse_count++; rx_ring 1261 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct i40e_ring *rx_ring, rx_ring 1272 drivers/net/ethernet/intel/i40e/i40e_txrx.c ntc = rx_ring->next_to_clean; rx_ring 1275 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_buffer = &rx_ring->rx_bi[ntc++]; rx_ring 1276 drivers/net/ethernet/intel/i40e/i40e_txrx.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 1277 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_clean = ntc; rx_ring 1279 drivers/net/ethernet/intel/i40e/i40e_txrx.c prefetch(I40E_RX_DESC(rx_ring, ntc)); rx_ring 1285 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_fd_handle_status(rx_ring, rx_desc, id); rx_ring 1343 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_ring 1349 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (!rx_ring->rx_bi) rx_ring 1352 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (rx_ring->skb) { rx_ring 1353 drivers/net/ethernet/intel/i40e/i40e_txrx.c dev_kfree_skb(rx_ring->skb); rx_ring 1354 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->skb = NULL; rx_ring 1357 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (rx_ring->xsk_umem) { rx_ring 1358 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_xsk_clean_rx_ring(rx_ring); rx_ring 1363 drivers/net/ethernet/intel/i40e/i40e_txrx.c for (i = 0; i < rx_ring->count; i++) { rx_ring 1364 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; rx_ring 1372 drivers/net/ethernet/intel/i40e/i40e_txrx.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 1375 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_buf_len, rx_ring 1379 drivers/net/ethernet/intel/i40e/i40e_txrx.c dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, rx_ring 1380 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_rx_pg_size(rx_ring), rx_ring 1391 drivers/net/ethernet/intel/i40e/i40e_txrx.c bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; rx_ring 1392 drivers/net/ethernet/intel/i40e/i40e_txrx.c memset(rx_ring->rx_bi, 0, bi_size); rx_ring 1395 drivers/net/ethernet/intel/i40e/i40e_txrx.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 1397 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_alloc = 0; rx_ring 1398 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_clean = 0; rx_ring 1399 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_use = 0; rx_ring 1408 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_free_rx_resources(struct i40e_ring *rx_ring) rx_ring 1410 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_clean_rx_ring(rx_ring); rx_ring 1411 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (rx_ring->vsi->type == I40E_VSI_MAIN) rx_ring 1412 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring 1413 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->xdp_prog = NULL; rx_ring 1414 drivers/net/ethernet/intel/i40e/i40e_txrx.c kfree(rx_ring->rx_bi); rx_ring 1415 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_bi = NULL; rx_ring 1417 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (rx_ring->desc) { rx_ring 1418 drivers/net/ethernet/intel/i40e/i40e_txrx.c dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring 1419 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->desc, rx_ring->dma); rx_ring 1420 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->desc = NULL; rx_ring 1430 drivers/net/ethernet/intel/i40e/i40e_txrx.c int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring 1432 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct device *dev = rx_ring->dev; rx_ring 1437 drivers/net/ethernet/intel/i40e/i40e_txrx.c WARN_ON(rx_ring->rx_bi); rx_ring 1438 drivers/net/ethernet/intel/i40e/i40e_txrx.c bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; rx_ring 1439 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); rx_ring 1440 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (!rx_ring->rx_bi) rx_ring 1443 drivers/net/ethernet/intel/i40e/i40e_txrx.c u64_stats_init(&rx_ring->syncp); rx_ring 1446 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); rx_ring 1447 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 1448 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, rx_ring 1449 drivers/net/ethernet/intel/i40e/i40e_txrx.c &rx_ring->dma, GFP_KERNEL); rx_ring 1451 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (!rx_ring->desc) { rx_ring 1453 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->size); rx_ring 1457 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_alloc = 0; rx_ring 1458 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_clean = 0; rx_ring 1459 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_use = 0; rx_ring 1462 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (rx_ring->vsi->type == I40E_VSI_MAIN) { rx_ring 1463 drivers/net/ethernet/intel/i40e/i40e_txrx.c err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring 1464 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->queue_index); rx_ring 1469 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; rx_ring 1473 drivers/net/ethernet/intel/i40e/i40e_txrx.c kfree(rx_ring->rx_bi); rx_ring 1474 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_bi = NULL; rx_ring 1483 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) rx_ring 1485 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_use = val; rx_ring 1488 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_alloc = val; rx_ring 1496 drivers/net/ethernet/intel/i40e/i40e_txrx.c writel(val, rx_ring->tail); rx_ring 1505 drivers/net/ethernet/intel/i40e/i40e_txrx.c static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) rx_ring 1507 drivers/net/ethernet/intel/i40e/i40e_txrx.c return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; rx_ring 1518 drivers/net/ethernet/intel/i40e/i40e_txrx.c static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, rx_ring 1526 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_stats.page_reuse_count++; rx_ring 1531 drivers/net/ethernet/intel/i40e/i40e_txrx.c page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); rx_ring 1533 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_stats.alloc_page_failed++; rx_ring 1538 drivers/net/ethernet/intel/i40e/i40e_txrx.c dma = dma_map_page_attrs(rx_ring->dev, page, 0, rx_ring 1539 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_rx_pg_size(rx_ring), rx_ring 1546 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring 1547 drivers/net/ethernet/intel/i40e/i40e_txrx.c __free_pages(page, i40e_rx_pg_order(rx_ring)); rx_ring 1548 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_stats.alloc_page_failed++; rx_ring 1554 drivers/net/ethernet/intel/i40e/i40e_txrx.c bi->page_offset = i40e_rx_offset(rx_ring); rx_ring 1568 drivers/net/ethernet/intel/i40e/i40e_txrx.c bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) rx_ring 1570 drivers/net/ethernet/intel/i40e/i40e_txrx.c u16 ntu = rx_ring->next_to_use; rx_ring 1575 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (!rx_ring->netdev || !cleaned_count) rx_ring 1578 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_desc = I40E_RX_DESC(rx_ring, ntu); rx_ring 1579 drivers/net/ethernet/intel/i40e/i40e_txrx.c bi = &rx_ring->rx_bi[ntu]; rx_ring 1582 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (!i40e_alloc_mapped_page(rx_ring, bi)) rx_ring 1586 drivers/net/ethernet/intel/i40e/i40e_txrx.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, rx_ring 1588 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_buf_len, rx_ring 1599 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (unlikely(ntu == rx_ring->count)) { rx_ring 1600 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_desc = I40E_RX_DESC(rx_ring, 0); rx_ring 1601 drivers/net/ethernet/intel/i40e/i40e_txrx.c bi = rx_ring->rx_bi; rx_ring 1611 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (rx_ring->next_to_use != ntu) rx_ring 1612 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_release_rx_desc(rx_ring, ntu); rx_ring 1617 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (rx_ring->next_to_use != ntu) rx_ring 1618 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_release_rx_desc(rx_ring, ntu); rx_ring 1777 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_process_skb_fields(struct i40e_ring *rx_ring, rx_ring 1790 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); rx_ring 1792 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); rx_ring 1794 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); rx_ring 1796 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb_record_rx_queue(skb, rx_ring->queue_index); rx_ring 1806 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb->protocol = eth_type_trans(skb, rx_ring->netdev); rx_ring 1823 drivers/net/ethernet/intel/i40e/i40e_txrx.c static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, rx_ring 1933 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_add_rx_frag(struct i40e_ring *rx_ring, rx_ring 1939 drivers/net/ethernet/intel/i40e/i40e_txrx.c unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; rx_ring 1941 drivers/net/ethernet/intel/i40e/i40e_txrx.c unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); rx_ring 1963 drivers/net/ethernet/intel/i40e/i40e_txrx.c static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, rx_ring 1968 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; rx_ring 1972 drivers/net/ethernet/intel/i40e/i40e_txrx.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 1994 drivers/net/ethernet/intel/i40e/i40e_txrx.c static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, rx_ring 2000 drivers/net/ethernet/intel/i40e/i40e_txrx.c unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; rx_ring 2029 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb = __napi_alloc_skb(&rx_ring->q_vector->napi, rx_ring 2075 drivers/net/ethernet/intel/i40e/i40e_txrx.c static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, rx_ring 2081 drivers/net/ethernet/intel/i40e/i40e_txrx.c unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; rx_ring 2127 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, rx_ring 2132 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_reuse_rx_page(rx_ring, rx_buffer); rx_ring 2135 drivers/net/ethernet/intel/i40e/i40e_txrx.c dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, rx_ring 2136 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_rx_pg_size(rx_ring), rx_ring 2156 drivers/net/ethernet/intel/i40e/i40e_txrx.c static bool i40e_is_non_eop(struct i40e_ring *rx_ring, rx_ring 2160 drivers/net/ethernet/intel/i40e/i40e_txrx.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 2163 drivers/net/ethernet/intel/i40e/i40e_txrx.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 2164 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->next_to_clean = ntc; rx_ring 2166 drivers/net/ethernet/intel/i40e/i40e_txrx.c prefetch(I40E_RX_DESC(rx_ring, ntc)); rx_ring 2173 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_stats.non_eop_descs++; rx_ring 2196 drivers/net/ethernet/intel/i40e/i40e_txrx.c static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, rx_ring 2205 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp_prog = READ_ONCE(rx_ring->xdp_prog); rx_ring 2217 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; rx_ring 2221 drivers/net/ethernet/intel/i40e/i40e_txrx.c err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); rx_ring 2228 drivers/net/ethernet/intel/i40e/i40e_txrx.c trace_xdp_exception(rx_ring->netdev, xdp_prog, act); rx_ring 2245 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, rx_ring 2250 drivers/net/ethernet/intel/i40e/i40e_txrx.c unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; rx_ring 2254 drivers/net/ethernet/intel/i40e/i40e_txrx.c unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size); rx_ring 2283 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_update_rx_stats(struct i40e_ring *rx_ring, rx_ring 2287 drivers/net/ethernet/intel/i40e/i40e_txrx.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 2288 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->stats.packets += total_rx_packets; rx_ring 2289 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->stats.bytes += total_rx_bytes; rx_ring 2290 drivers/net/ethernet/intel/i40e/i40e_txrx.c u64_stats_update_end(&rx_ring->syncp); rx_ring 2291 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring 2292 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring 2304 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) rx_ring 2311 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->vsi->xdp_rings[rx_ring->queue_index]; rx_ring 2329 drivers/net/ethernet/intel/i40e/i40e_txrx.c static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring 2332 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct sk_buff *skb = rx_ring->skb; rx_ring 2333 drivers/net/ethernet/intel/i40e/i40e_txrx.c u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); rx_ring 2338 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp.rxq = &rx_ring->xdp_rxq; rx_ring 2349 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 2353 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 2368 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc, rx_ring 2371 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_reuse_rx_page(rx_ring, rx_buffer); rx_ring 2381 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); rx_ring 2382 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_buffer = i40e_get_rx_buffer(rx_ring, size); rx_ring 2390 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_rx_offset(rx_ring); rx_ring 2393 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb = i40e_run_xdp(rx_ring, &xdp); rx_ring 2401 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_rx_buffer_flip(rx_ring, rx_buffer, size); rx_ring 2408 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); rx_ring 2409 drivers/net/ethernet/intel/i40e/i40e_txrx.c } else if (ring_uses_build_skb(rx_ring)) { rx_ring 2410 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb = i40e_build_skb(rx_ring, rx_buffer, &xdp); rx_ring 2412 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp); rx_ring 2417 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->rx_stats.alloc_buff_failed++; rx_ring 2422 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_put_rx_buffer(rx_ring, rx_buffer); rx_ring 2425 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (i40e_is_non_eop(rx_ring, rx_desc, skb)) rx_ring 2428 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) { rx_ring 2437 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_process_skb_fields(rx_ring, rx_desc, skb); rx_ring 2439 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); rx_ring 2440 drivers/net/ethernet/intel/i40e/i40e_txrx.c napi_gro_receive(&rx_ring->q_vector->napi, skb); rx_ring 2447 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_finalize_xdp_rx(rx_ring, xdp_xmit); rx_ring 2448 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_ring->skb = skb; rx_ring 2450 drivers/net/ethernet/intel/i40e/i40e_txrx.c i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); rx_ring 485 drivers/net/ethernet/intel/i40e/i40e_txrx.h void i40e_clean_rx_ring(struct i40e_ring *rx_ring); rx_ring 487 drivers/net/ethernet/intel/i40e/i40e_txrx.h int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); rx_ring 489 drivers/net/ethernet/intel/i40e/i40e_txrx.h void i40e_free_rx_resources(struct i40e_ring *rx_ring); rx_ring 7 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h void i40e_fd_handle_status(struct i40e_ring *rx_ring, rx_ring 11 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h struct i40e_ring *rx_ring, rx_ring 14 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h void i40e_process_skb_fields(struct i40e_ring *rx_ring, rx_ring 17 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h void i40e_update_rx_stats(struct i40e_ring *rx_ring, rx_ring 20 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res); rx_ring 21 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); rx_ring 87 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring); rx_ring 191 drivers/net/ethernet/intel/i40e/i40e_xsk.c static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) rx_ring 193 drivers/net/ethernet/intel/i40e/i40e_xsk.c struct xdp_umem *umem = rx_ring->xsk_umem; rx_ring 204 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp_prog = READ_ONCE(rx_ring->xdp_prog); rx_ring 214 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; rx_ring 218 drivers/net/ethernet/intel/i40e/i40e_xsk.c err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); rx_ring 225 drivers/net/ethernet/intel/i40e/i40e_xsk.c trace_xdp_exception(rx_ring->netdev, xdp_prog, act); rx_ring 245 drivers/net/ethernet/intel/i40e/i40e_xsk.c static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, rx_ring 248 drivers/net/ethernet/intel/i40e/i40e_xsk.c struct xdp_umem *umem = rx_ring->xsk_umem; rx_ring 253 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->rx_stats.page_reuse_count++; rx_ring 258 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->rx_stats.alloc_page_failed++; rx_ring 286 drivers/net/ethernet/intel/i40e/i40e_xsk.c static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, rx_ring 289 drivers/net/ethernet/intel/i40e/i40e_xsk.c struct xdp_umem *umem = rx_ring->xsk_umem; rx_ring 293 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->rx_stats.alloc_page_failed++; rx_ring 297 drivers/net/ethernet/intel/i40e/i40e_xsk.c handle &= rx_ring->xsk_umem->chunk_mask; rx_ring 314 drivers/net/ethernet/intel/i40e/i40e_xsk.c __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, rx_ring 315 drivers/net/ethernet/intel/i40e/i40e_xsk.c bool alloc(struct i40e_ring *rx_ring, rx_ring 318 drivers/net/ethernet/intel/i40e/i40e_xsk.c u16 ntu = rx_ring->next_to_use; rx_ring 323 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_desc = I40E_RX_DESC(rx_ring, ntu); rx_ring 324 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi = &rx_ring->rx_bi[ntu]; rx_ring 326 drivers/net/ethernet/intel/i40e/i40e_xsk.c if (!alloc(rx_ring, bi)) { rx_ring 331 drivers/net/ethernet/intel/i40e/i40e_xsk.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, rx_ring 332 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->rx_buf_len, rx_ring 341 drivers/net/ethernet/intel/i40e/i40e_xsk.c if (unlikely(ntu == rx_ring->count)) { rx_ring 342 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_desc = I40E_RX_DESC(rx_ring, 0); rx_ring 343 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi = rx_ring->rx_bi; rx_ring 352 drivers/net/ethernet/intel/i40e/i40e_xsk.c if (rx_ring->next_to_use != ntu) rx_ring 353 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_release_rx_desc(rx_ring, ntu); rx_ring 368 drivers/net/ethernet/intel/i40e/i40e_xsk.c bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) rx_ring 370 drivers/net/ethernet/intel/i40e/i40e_xsk.c return __i40e_alloc_rx_buffers_zc(rx_ring, count, rx_ring 384 drivers/net/ethernet/intel/i40e/i40e_xsk.c static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count) rx_ring 386 drivers/net/ethernet/intel/i40e/i40e_xsk.c return __i40e_alloc_rx_buffers_zc(rx_ring, count, rx_ring 400 drivers/net/ethernet/intel/i40e/i40e_xsk.c static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring, rx_ring 405 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi = &rx_ring->rx_bi[rx_ring->next_to_clean]; rx_ring 408 drivers/net/ethernet/intel/i40e/i40e_xsk.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 424 drivers/net/ethernet/intel/i40e/i40e_xsk.c static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, rx_ring 427 drivers/net/ethernet/intel/i40e/i40e_xsk.c struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; rx_ring 428 drivers/net/ethernet/intel/i40e/i40e_xsk.c u16 nta = rx_ring->next_to_alloc; rx_ring 432 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 450 drivers/net/ethernet/intel/i40e/i40e_xsk.c struct i40e_ring *rx_ring; rx_ring 454 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring = container_of(alloc, struct i40e_ring, zca); rx_ring 455 drivers/net/ethernet/intel/i40e/i40e_xsk.c hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; rx_ring 456 drivers/net/ethernet/intel/i40e/i40e_xsk.c mask = rx_ring->xsk_umem->chunk_mask; rx_ring 458 drivers/net/ethernet/intel/i40e/i40e_xsk.c nta = rx_ring->next_to_alloc; rx_ring 459 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi = &rx_ring->rx_bi[nta]; rx_ring 462 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 466 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); rx_ring 469 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); rx_ring 472 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, rx_ring 473 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->xsk_umem->headroom); rx_ring 486 drivers/net/ethernet/intel/i40e/i40e_xsk.c static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, rx_ring 495 drivers/net/ethernet/intel/i40e/i40e_xsk.c skb = __napi_alloc_skb(&rx_ring->q_vector->napi, rx_ring 506 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_reuse_rx_buffer_zc(rx_ring, bi); rx_ring 514 drivers/net/ethernet/intel/i40e/i40e_xsk.c static void i40e_inc_ntc(struct i40e_ring *rx_ring) rx_ring 516 drivers/net/ethernet/intel/i40e/i40e_xsk.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 518 drivers/net/ethernet/intel/i40e/i40e_xsk.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 519 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->next_to_clean = ntc; rx_ring 520 drivers/net/ethernet/intel/i40e/i40e_xsk.c prefetch(I40E_RX_DESC(rx_ring, ntc)); rx_ring 530 drivers/net/ethernet/intel/i40e/i40e_xsk.c int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) rx_ring 533 drivers/net/ethernet/intel/i40e/i40e_xsk.c u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); rx_ring 539 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp.rxq = &rx_ring->xdp_rxq; rx_ring 549 drivers/net/ethernet/intel/i40e/i40e_xsk.c !i40e_alloc_rx_buffers_fast_zc(rx_ring, rx_ring 554 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 563 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi = i40e_clean_programming_status(rx_ring, rx_desc, rx_ring 566 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_reuse_rx_buffer_zc(rx_ring, bi); rx_ring 576 drivers/net/ethernet/intel/i40e/i40e_xsk.c bi = i40e_get_rx_buffer_zc(rx_ring, size); rx_ring 583 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); rx_ring 589 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_reuse_rx_buffer_zc(rx_ring, bi); rx_ring 596 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_inc_ntc(rx_ring); rx_ring 607 drivers/net/ethernet/intel/i40e/i40e_xsk.c skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); rx_ring 609 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_ring->rx_stats.alloc_buff_failed++; rx_ring 614 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_inc_ntc(rx_ring); rx_ring 622 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_process_skb_fields(rx_ring, rx_desc, skb); rx_ring 623 drivers/net/ethernet/intel/i40e/i40e_xsk.c napi_gro_receive(&rx_ring->q_vector->napi, skb); rx_ring 626 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_finalize_xdp_rx(rx_ring, xdp_xmit); rx_ring 627 drivers/net/ethernet/intel/i40e/i40e_xsk.c i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); rx_ring 629 drivers/net/ethernet/intel/i40e/i40e_xsk.c if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { rx_ring 630 drivers/net/ethernet/intel/i40e/i40e_xsk.c if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) rx_ring 631 drivers/net/ethernet/intel/i40e/i40e_xsk.c xsk_set_rx_need_wakeup(rx_ring->xsk_umem); rx_ring 633 drivers/net/ethernet/intel/i40e/i40e_xsk.c xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); rx_ring 822 drivers/net/ethernet/intel/i40e/i40e_xsk.c void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) rx_ring 826 drivers/net/ethernet/intel/i40e/i40e_xsk.c for (i = 0; i < rx_ring->count; i++) { rx_ring 827 drivers/net/ethernet/intel/i40e/i40e_xsk.c struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; rx_ring 832 drivers/net/ethernet/intel/i40e/i40e_xsk.c xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle); rx_ring 16 drivers/net/ethernet/intel/i40e/i40e_xsk.h bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count); rx_ring 17 drivers/net/ethernet/intel/i40e/i40e_xsk.h int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); rx_ring 646 drivers/net/ethernet/intel/iavf/iavf_ethtool.c struct iavf_ring *rx_ring, *tx_ring; rx_ring 659 drivers/net/ethernet/intel/iavf/iavf_ethtool.c rx_ring = &adapter->rx_rings[queue]; rx_ring 662 drivers/net/ethernet/intel/iavf/iavf_ethtool.c if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) rx_ring 668 drivers/net/ethernet/intel/iavf/iavf_ethtool.c ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; rx_ring 715 drivers/net/ethernet/intel/iavf/iavf_ethtool.c struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; rx_ring 719 drivers/net/ethernet/intel/iavf/iavf_ethtool.c rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); rx_ring 722 drivers/net/ethernet/intel/iavf/iavf_ethtool.c rx_ring->itr_setting |= IAVF_ITR_DYNAMIC; rx_ring 724 drivers/net/ethernet/intel/iavf/iavf_ethtool.c rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; rx_ring 730 drivers/net/ethernet/intel/iavf/iavf_ethtool.c q_vector = rx_ring->q_vector; rx_ring 731 drivers/net/ethernet/intel/iavf/iavf_ethtool.c q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); rx_ring 305 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; rx_ring 308 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring->q_vector = q_vector; rx_ring 309 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring->next = q_vector->rx.ring; rx_ring 310 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring->vsi = &adapter->vsi; rx_ring 311 drivers/net/ethernet/intel/iavf/iavf_main.c q_vector->rx.ring = rx_ring; rx_ring 314 drivers/net/ethernet/intel/iavf/iavf_main.c q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); rx_ring 1125 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_ring *rx_ring; rx_ring 1137 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring = &adapter->rx_rings[i]; rx_ring 1138 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring->queue_index = i; rx_ring 1139 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring->netdev = adapter->netdev; rx_ring 1140 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring->dev = &adapter->pdev->dev; rx_ring 1141 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring->count = adapter->rx_desc_count; rx_ring 1142 drivers/net/ethernet/intel/iavf/iavf_main.c rx_ring->itr_setting = IAVF_ITR_RX_DEF; rx_ring 655 drivers/net/ethernet/intel/iavf/iavf_txrx.c void iavf_clean_rx_ring(struct iavf_ring *rx_ring) rx_ring 661 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (!rx_ring->rx_bi) rx_ring 664 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (rx_ring->skb) { rx_ring 665 drivers/net/ethernet/intel/iavf/iavf_txrx.c dev_kfree_skb(rx_ring->skb); rx_ring 666 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->skb = NULL; rx_ring 670 drivers/net/ethernet/intel/iavf/iavf_txrx.c for (i = 0; i < rx_ring->count; i++) { rx_ring 671 drivers/net/ethernet/intel/iavf/iavf_txrx.c struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; rx_ring 679 drivers/net/ethernet/intel/iavf/iavf_txrx.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 682 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_buf_len, rx_ring 686 drivers/net/ethernet/intel/iavf/iavf_txrx.c dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, rx_ring 687 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_rx_pg_size(rx_ring), rx_ring 697 drivers/net/ethernet/intel/iavf/iavf_txrx.c bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; rx_ring 698 drivers/net/ethernet/intel/iavf/iavf_txrx.c memset(rx_ring->rx_bi, 0, bi_size); rx_ring 701 drivers/net/ethernet/intel/iavf/iavf_txrx.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 703 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_alloc = 0; rx_ring 704 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_clean = 0; rx_ring 705 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_use = 0; rx_ring 714 drivers/net/ethernet/intel/iavf/iavf_txrx.c void iavf_free_rx_resources(struct iavf_ring *rx_ring) rx_ring 716 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_clean_rx_ring(rx_ring); rx_ring 717 drivers/net/ethernet/intel/iavf/iavf_txrx.c kfree(rx_ring->rx_bi); rx_ring 718 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_bi = NULL; rx_ring 720 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (rx_ring->desc) { rx_ring 721 drivers/net/ethernet/intel/iavf/iavf_txrx.c dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring 722 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->desc, rx_ring->dma); rx_ring 723 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->desc = NULL; rx_ring 733 drivers/net/ethernet/intel/iavf/iavf_txrx.c int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) rx_ring 735 drivers/net/ethernet/intel/iavf/iavf_txrx.c struct device *dev = rx_ring->dev; rx_ring 739 drivers/net/ethernet/intel/iavf/iavf_txrx.c WARN_ON(rx_ring->rx_bi); rx_ring 740 drivers/net/ethernet/intel/iavf/iavf_txrx.c bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; rx_ring 741 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); rx_ring 742 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (!rx_ring->rx_bi) rx_ring 745 drivers/net/ethernet/intel/iavf/iavf_txrx.c u64_stats_init(&rx_ring->syncp); rx_ring 748 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); rx_ring 749 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 750 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, rx_ring 751 drivers/net/ethernet/intel/iavf/iavf_txrx.c &rx_ring->dma, GFP_KERNEL); rx_ring 753 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (!rx_ring->desc) { rx_ring 755 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->size); rx_ring 759 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_alloc = 0; rx_ring 760 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_clean = 0; rx_ring 761 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_use = 0; rx_ring 765 drivers/net/ethernet/intel/iavf/iavf_txrx.c kfree(rx_ring->rx_bi); rx_ring 766 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_bi = NULL; rx_ring 775 drivers/net/ethernet/intel/iavf/iavf_txrx.c static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) rx_ring 777 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_use = val; rx_ring 780 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_alloc = val; rx_ring 788 drivers/net/ethernet/intel/iavf/iavf_txrx.c writel(val, rx_ring->tail); rx_ring 797 drivers/net/ethernet/intel/iavf/iavf_txrx.c static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) rx_ring 799 drivers/net/ethernet/intel/iavf/iavf_txrx.c return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; rx_ring 810 drivers/net/ethernet/intel/iavf/iavf_txrx.c static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, rx_ring 818 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_stats.page_reuse_count++; rx_ring 823 drivers/net/ethernet/intel/iavf/iavf_txrx.c page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); rx_ring 825 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_stats.alloc_page_failed++; rx_ring 830 drivers/net/ethernet/intel/iavf/iavf_txrx.c dma = dma_map_page_attrs(rx_ring->dev, page, 0, rx_ring 831 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_rx_pg_size(rx_ring), rx_ring 838 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring 839 drivers/net/ethernet/intel/iavf/iavf_txrx.c __free_pages(page, iavf_rx_pg_order(rx_ring)); rx_ring 840 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_stats.alloc_page_failed++; rx_ring 846 drivers/net/ethernet/intel/iavf/iavf_txrx.c bi->page_offset = iavf_rx_offset(rx_ring); rx_ring 860 drivers/net/ethernet/intel/iavf/iavf_txrx.c static void iavf_receive_skb(struct iavf_ring *rx_ring, rx_ring 863 drivers/net/ethernet/intel/iavf/iavf_txrx.c struct iavf_q_vector *q_vector = rx_ring->q_vector; rx_ring 865 drivers/net/ethernet/intel/iavf/iavf_txrx.c if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && rx_ring 879 drivers/net/ethernet/intel/iavf/iavf_txrx.c bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) rx_ring 881 drivers/net/ethernet/intel/iavf/iavf_txrx.c u16 ntu = rx_ring->next_to_use; rx_ring 886 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (!rx_ring->netdev || !cleaned_count) rx_ring 889 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_desc = IAVF_RX_DESC(rx_ring, ntu); rx_ring 890 drivers/net/ethernet/intel/iavf/iavf_txrx.c bi = &rx_ring->rx_bi[ntu]; rx_ring 893 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (!iavf_alloc_mapped_page(rx_ring, bi)) rx_ring 897 drivers/net/ethernet/intel/iavf/iavf_txrx.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, rx_ring 899 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_buf_len, rx_ring 910 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (unlikely(ntu == rx_ring->count)) { rx_ring 911 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_desc = IAVF_RX_DESC(rx_ring, 0); rx_ring 912 drivers/net/ethernet/intel/iavf/iavf_txrx.c bi = rx_ring->rx_bi; rx_ring 922 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (rx_ring->next_to_use != ntu) rx_ring 923 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_release_rx_desc(rx_ring, ntu); rx_ring 928 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (rx_ring->next_to_use != ntu) rx_ring 929 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_release_rx_desc(rx_ring, ntu); rx_ring 1082 drivers/net/ethernet/intel/iavf/iavf_txrx.c void iavf_process_skb_fields(struct iavf_ring *rx_ring, rx_ring 1086 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); rx_ring 1088 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); rx_ring 1090 drivers/net/ethernet/intel/iavf/iavf_txrx.c skb_record_rx_queue(skb, rx_ring->queue_index); rx_ring 1093 drivers/net/ethernet/intel/iavf/iavf_txrx.c skb->protocol = eth_type_trans(skb, rx_ring->netdev); rx_ring 1109 drivers/net/ethernet/intel/iavf/iavf_txrx.c static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) rx_ring 1125 drivers/net/ethernet/intel/iavf/iavf_txrx.c static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, rx_ring 1129 drivers/net/ethernet/intel/iavf/iavf_txrx.c u16 nta = rx_ring->next_to_alloc; rx_ring 1131 drivers/net/ethernet/intel/iavf/iavf_txrx.c new_buff = &rx_ring->rx_bi[nta]; rx_ring 1135 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 1228 drivers/net/ethernet/intel/iavf/iavf_txrx.c static void iavf_add_rx_frag(struct iavf_ring *rx_ring, rx_ring 1234 drivers/net/ethernet/intel/iavf/iavf_txrx.c unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; rx_ring 1236 drivers/net/ethernet/intel/iavf/iavf_txrx.c unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); rx_ring 1261 drivers/net/ethernet/intel/iavf/iavf_txrx.c static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, rx_ring 1269 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; rx_ring 1273 drivers/net/ethernet/intel/iavf/iavf_txrx.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 1295 drivers/net/ethernet/intel/iavf/iavf_txrx.c static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, rx_ring 1301 drivers/net/ethernet/intel/iavf/iavf_txrx.c unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; rx_ring 1318 drivers/net/ethernet/intel/iavf/iavf_txrx.c skb = __napi_alloc_skb(&rx_ring->q_vector->napi, rx_ring 1362 drivers/net/ethernet/intel/iavf/iavf_txrx.c static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, rx_ring 1368 drivers/net/ethernet/intel/iavf/iavf_txrx.c unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; rx_ring 1410 drivers/net/ethernet/intel/iavf/iavf_txrx.c static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, rx_ring 1418 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_reuse_rx_page(rx_ring, rx_buffer); rx_ring 1419 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_stats.page_reuse_count++; rx_ring 1422 drivers/net/ethernet/intel/iavf/iavf_txrx.c dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, rx_ring 1423 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_rx_pg_size(rx_ring), rx_ring 1444 drivers/net/ethernet/intel/iavf/iavf_txrx.c static bool iavf_is_non_eop(struct iavf_ring *rx_ring, rx_ring 1448 drivers/net/ethernet/intel/iavf/iavf_txrx.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 1451 drivers/net/ethernet/intel/iavf/iavf_txrx.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 1452 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->next_to_clean = ntc; rx_ring 1454 drivers/net/ethernet/intel/iavf/iavf_txrx.c prefetch(IAVF_RX_DESC(rx_ring, ntc)); rx_ring 1461 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_stats.non_eop_descs++; rx_ring 1478 drivers/net/ethernet/intel/iavf/iavf_txrx.c static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) rx_ring 1481 drivers/net/ethernet/intel/iavf/iavf_txrx.c struct sk_buff *skb = rx_ring->skb; rx_ring 1482 drivers/net/ethernet/intel/iavf/iavf_txrx.c u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); rx_ring 1496 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 1500 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 1521 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); rx_ring 1522 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_buffer = iavf_get_rx_buffer(rx_ring, size); rx_ring 1526 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); rx_ring 1527 drivers/net/ethernet/intel/iavf/iavf_txrx.c else if (ring_uses_build_skb(rx_ring)) rx_ring 1528 drivers/net/ethernet/intel/iavf/iavf_txrx.c skb = iavf_build_skb(rx_ring, rx_buffer, size); rx_ring 1530 drivers/net/ethernet/intel/iavf/iavf_txrx.c skb = iavf_construct_skb(rx_ring, rx_buffer, size); rx_ring 1534 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->rx_stats.alloc_buff_failed++; rx_ring 1540 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_put_rx_buffer(rx_ring, rx_buffer); rx_ring 1543 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (iavf_is_non_eop(rx_ring, rx_desc, skb)) rx_ring 1557 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (iavf_cleanup_headers(rx_ring, skb)) { rx_ring 1570 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); rx_ring 1576 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); rx_ring 1577 drivers/net/ethernet/intel/iavf/iavf_txrx.c iavf_receive_skb(rx_ring, skb, vlan_tag); rx_ring 1584 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->skb = skb; rx_ring 1586 drivers/net/ethernet/intel/iavf/iavf_txrx.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 1587 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->stats.packets += total_rx_packets; rx_ring 1588 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->stats.bytes += total_rx_bytes; rx_ring 1589 drivers/net/ethernet/intel/iavf/iavf_txrx.c u64_stats_update_end(&rx_ring->syncp); rx_ring 1590 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring 1591 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring 442 drivers/net/ethernet/intel/iavf/iavf_txrx.h void iavf_clean_rx_ring(struct iavf_ring *rx_ring); rx_ring 444 drivers/net/ethernet/intel/iavf/iavf_txrx.h int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring); rx_ring 446 drivers/net/ethernet/intel/iavf/iavf_txrx.h void iavf_free_rx_resources(struct iavf_ring *rx_ring); rx_ring 108 drivers/net/ethernet/intel/ice/ice_dcb_lib.c struct ice_ring *tx_ring, *rx_ring; rx_ring 119 drivers/net/ethernet/intel/ice/ice_dcb_lib.c rx_ring = vsi->rx_rings[i]; rx_ring 120 drivers/net/ethernet/intel/ice/ice_dcb_lib.c rx_ring->dcb_tc = 0; rx_ring 133 drivers/net/ethernet/intel/ice/ice_dcb_lib.c rx_ring = vsi->rx_rings[i]; rx_ring 135 drivers/net/ethernet/intel/ice/ice_dcb_lib.c rx_ring->dcb_tc = n; rx_ring 608 drivers/net/ethernet/intel/ice/ice_ethtool.c static int ice_lbtest_receive_frames(struct ice_ring *rx_ring) rx_ring 616 drivers/net/ethernet/intel/ice/ice_ethtool.c for (i = 0; i < rx_ring->count; i++) { rx_ring 619 drivers/net/ethernet/intel/ice/ice_ethtool.c rx_desc = ICE_RX_DESC(rx_ring, i); rx_ring 625 drivers/net/ethernet/intel/ice/ice_ethtool.c rx_buf = &rx_ring->rx_buf[i]; rx_ring 647 drivers/net/ethernet/intel/ice/ice_ethtool.c struct ice_ring *tx_ring, *rx_ring; rx_ring 664 drivers/net/ethernet/intel/ice/ice_ethtool.c rx_ring = test_vsi->rx_rings[0]; rx_ring 671 drivers/net/ethernet/intel/ice/ice_ethtool.c if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) { rx_ring 707 drivers/net/ethernet/intel/ice/ice_ethtool.c valid_frames = ice_lbtest_receive_frames(rx_ring); rx_ring 1393 drivers/net/ethernet/intel/ice/ice_lib.c struct ice_ring *rx_ring = vsi->rx_rings[q_id]; rx_ring 1395 drivers/net/ethernet/intel/ice/ice_lib.c rx_ring->q_vector = q_vector; rx_ring 1396 drivers/net/ethernet/intel/ice/ice_lib.c rx_ring->next = q_vector->rx.ring; rx_ring 1397 drivers/net/ethernet/intel/ice/ice_lib.c q_vector->rx.ring = rx_ring; rx_ring 267 drivers/net/ethernet/intel/ice/ice_txrx.c void ice_clean_rx_ring(struct ice_ring *rx_ring) rx_ring 269 drivers/net/ethernet/intel/ice/ice_txrx.c struct device *dev = rx_ring->dev; rx_ring 273 drivers/net/ethernet/intel/ice/ice_txrx.c if (!rx_ring->rx_buf) rx_ring 277 drivers/net/ethernet/intel/ice/ice_txrx.c for (i = 0; i < rx_ring->count; i++) { rx_ring 278 drivers/net/ethernet/intel/ice/ice_txrx.c struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; rx_ring 303 drivers/net/ethernet/intel/ice/ice_txrx.c memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); rx_ring 306 drivers/net/ethernet/intel/ice/ice_txrx.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 308 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_alloc = 0; rx_ring 309 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_clean = 0; rx_ring 310 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_use = 0; rx_ring 319 drivers/net/ethernet/intel/ice/ice_txrx.c void ice_free_rx_ring(struct ice_ring *rx_ring) rx_ring 321 drivers/net/ethernet/intel/ice/ice_txrx.c ice_clean_rx_ring(rx_ring); rx_ring 322 drivers/net/ethernet/intel/ice/ice_txrx.c devm_kfree(rx_ring->dev, rx_ring->rx_buf); rx_ring 323 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_buf = NULL; rx_ring 325 drivers/net/ethernet/intel/ice/ice_txrx.c if (rx_ring->desc) { rx_ring 326 drivers/net/ethernet/intel/ice/ice_txrx.c dmam_free_coherent(rx_ring->dev, rx_ring->size, rx_ring 327 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->desc, rx_ring->dma); rx_ring 328 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->desc = NULL; rx_ring 338 drivers/net/ethernet/intel/ice/ice_txrx.c int ice_setup_rx_ring(struct ice_ring *rx_ring) rx_ring 340 drivers/net/ethernet/intel/ice/ice_txrx.c struct device *dev = rx_ring->dev; rx_ring 346 drivers/net/ethernet/intel/ice/ice_txrx.c WARN_ON(rx_ring->rx_buf); rx_ring 347 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_buf = rx_ring 348 drivers/net/ethernet/intel/ice/ice_txrx.c devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, rx_ring 350 drivers/net/ethernet/intel/ice/ice_txrx.c if (!rx_ring->rx_buf) rx_ring 354 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), rx_ring 356 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, rx_ring 358 drivers/net/ethernet/intel/ice/ice_txrx.c if (!rx_ring->desc) { rx_ring 360 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->size); rx_ring 364 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_use = 0; rx_ring 365 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_clean = 0; rx_ring 369 drivers/net/ethernet/intel/ice/ice_txrx.c devm_kfree(dev, rx_ring->rx_buf); rx_ring 370 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_buf = NULL; rx_ring 379 drivers/net/ethernet/intel/ice/ice_txrx.c static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) rx_ring 381 drivers/net/ethernet/intel/ice/ice_txrx.c u16 prev_ntu = rx_ring->next_to_use; rx_ring 383 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_use = val; rx_ring 386 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_alloc = val; rx_ring 401 drivers/net/ethernet/intel/ice/ice_txrx.c writel(val, rx_ring->tail); rx_ring 414 drivers/net/ethernet/intel/ice/ice_txrx.c ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) rx_ring 421 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_stats.page_reuse_count++; rx_ring 428 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_stats.alloc_page_failed++; rx_ring 433 drivers/net/ethernet/intel/ice/ice_txrx.c dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, rx_ring 439 drivers/net/ethernet/intel/ice/ice_txrx.c if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring 441 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_stats.alloc_page_failed++; rx_ring 467 drivers/net/ethernet/intel/ice/ice_txrx.c bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) rx_ring 470 drivers/net/ethernet/intel/ice/ice_txrx.c u16 ntu = rx_ring->next_to_use; rx_ring 474 drivers/net/ethernet/intel/ice/ice_txrx.c if (!rx_ring->netdev || !cleaned_count) rx_ring 478 drivers/net/ethernet/intel/ice/ice_txrx.c rx_desc = ICE_RX_DESC(rx_ring, ntu); rx_ring 479 drivers/net/ethernet/intel/ice/ice_txrx.c bi = &rx_ring->rx_buf[ntu]; rx_ring 483 drivers/net/ethernet/intel/ice/ice_txrx.c if (!ice_alloc_mapped_page(rx_ring, bi)) rx_ring 487 drivers/net/ethernet/intel/ice/ice_txrx.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, rx_ring 500 drivers/net/ethernet/intel/ice/ice_txrx.c if (unlikely(ntu == rx_ring->count)) { rx_ring 501 drivers/net/ethernet/intel/ice/ice_txrx.c rx_desc = ICE_RX_DESC(rx_ring, 0); rx_ring 502 drivers/net/ethernet/intel/ice/ice_txrx.c bi = rx_ring->rx_buf; rx_ring 512 drivers/net/ethernet/intel/ice/ice_txrx.c if (rx_ring->next_to_use != ntu) rx_ring 513 drivers/net/ethernet/intel/ice/ice_txrx.c ice_release_rx_desc(rx_ring, ntu); rx_ring 628 drivers/net/ethernet/intel/ice/ice_txrx.c ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) rx_ring 630 drivers/net/ethernet/intel/ice/ice_txrx.c u16 nta = rx_ring->next_to_alloc; rx_ring 633 drivers/net/ethernet/intel/ice/ice_txrx.c new_buf = &rx_ring->rx_buf[nta]; rx_ring 637 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 659 drivers/net/ethernet/intel/ice/ice_txrx.c ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, rx_ring 664 drivers/net/ethernet/intel/ice/ice_txrx.c rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; rx_ring 671 drivers/net/ethernet/intel/ice/ice_txrx.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, rx_ring 692 drivers/net/ethernet/intel/ice/ice_txrx.c ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, rx_ring 706 drivers/net/ethernet/intel/ice/ice_txrx.c skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, rx_ring 711 drivers/net/ethernet/intel/ice/ice_txrx.c skb_record_rx_queue(skb, rx_ring->q_index); rx_ring 751 drivers/net/ethernet/intel/ice/ice_txrx.c static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) rx_ring 758 drivers/net/ethernet/intel/ice/ice_txrx.c ice_reuse_rx_page(rx_ring, rx_buf); rx_ring 759 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_stats.page_reuse_count++; rx_ring 762 drivers/net/ethernet/intel/ice/ice_txrx.c dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, rx_ring 822 drivers/net/ethernet/intel/ice/ice_txrx.c ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, rx_ring 825 drivers/net/ethernet/intel/ice/ice_txrx.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 828 drivers/net/ethernet/intel/ice/ice_txrx.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 829 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->next_to_clean = ntc; rx_ring 831 drivers/net/ethernet/intel/ice/ice_txrx.c prefetch(ICE_RX_DESC(rx_ring, ntc)); rx_ring 839 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_buf[ntc].skb = skb; rx_ring 840 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_stats.non_eop_descs++; rx_ring 864 drivers/net/ethernet/intel/ice/ice_txrx.c ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, rx_ring 870 drivers/net/ethernet/intel/ice/ice_txrx.c if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) rx_ring 963 drivers/net/ethernet/intel/ice/ice_txrx.c ice_process_skb_fields(struct ice_ring *rx_ring, rx_ring 967 drivers/net/ethernet/intel/ice/ice_txrx.c ice_rx_hash(rx_ring, rx_desc, skb, ptype); rx_ring 970 drivers/net/ethernet/intel/ice/ice_txrx.c skb->protocol = eth_type_trans(skb, rx_ring->netdev); rx_ring 972 drivers/net/ethernet/intel/ice/ice_txrx.c ice_rx_csum(rx_ring, skb, rx_desc, ptype); rx_ring 985 drivers/net/ethernet/intel/ice/ice_txrx.c ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) rx_ring 987 drivers/net/ethernet/intel/ice/ice_txrx.c if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && rx_ring 990 drivers/net/ethernet/intel/ice/ice_txrx.c napi_gro_receive(&rx_ring->q_vector->napi, skb); rx_ring 1005 drivers/net/ethernet/intel/ice/ice_txrx.c static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) rx_ring 1008 drivers/net/ethernet/intel/ice/ice_txrx.c u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); rx_ring 1022 drivers/net/ethernet/intel/ice/ice_txrx.c rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 1043 drivers/net/ethernet/intel/ice/ice_txrx.c rx_buf = ice_get_rx_buf(rx_ring, &skb, size); rx_ring 1048 drivers/net/ethernet/intel/ice/ice_txrx.c skb = ice_construct_skb(rx_ring, rx_buf, size); rx_ring 1052 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->rx_stats.alloc_buf_failed++; rx_ring 1058 drivers/net/ethernet/intel/ice/ice_txrx.c ice_put_rx_buf(rx_ring, rx_buf); rx_ring 1062 drivers/net/ethernet/intel/ice/ice_txrx.c if (ice_is_non_eop(rx_ring, rx_desc, skb)) rx_ring 1090 drivers/net/ethernet/intel/ice/ice_txrx.c ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); rx_ring 1093 drivers/net/ethernet/intel/ice/ice_txrx.c ice_receive_skb(rx_ring, skb, vlan_tag); rx_ring 1100 drivers/net/ethernet/intel/ice/ice_txrx.c failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); rx_ring 1103 drivers/net/ethernet/intel/ice/ice_txrx.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 1104 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->stats.pkts += total_rx_pkts; rx_ring 1105 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->stats.bytes += total_rx_bytes; rx_ring 1106 drivers/net/ethernet/intel/ice/ice_txrx.c u64_stats_update_end(&rx_ring->syncp); rx_ring 1107 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->q_vector->rx.total_pkts += total_rx_pkts; rx_ring 1108 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring 236 drivers/net/ethernet/intel/ice/ice_txrx.h void ice_clean_rx_ring(struct ice_ring *rx_ring); rx_ring 238 drivers/net/ethernet/intel/ice/ice_txrx.h int ice_setup_rx_ring(struct ice_ring *rx_ring); rx_ring 240 drivers/net/ethernet/intel/ice/ice_txrx.h void ice_free_rx_ring(struct ice_ring *rx_ring); rx_ring 491 drivers/net/ethernet/intel/igb/igb.h struct igb_ring *rx_ring[16]; rx_ring 901 drivers/net/ethernet/intel/igb/igb_ethtool.c adapter->rx_ring[i]->count = new_rx_count; rx_ring 953 drivers/net/ethernet/intel/igb/igb_ethtool.c memcpy(&temp_ring[i], adapter->rx_ring[i], rx_ring 969 drivers/net/ethernet/intel/igb/igb_ethtool.c igb_free_rx_resources(adapter->rx_ring[i]); rx_ring 971 drivers/net/ethernet/intel/igb/igb_ethtool.c memcpy(adapter->rx_ring[i], &temp_ring[i], rx_ring 1548 drivers/net/ethernet/intel/igb/igb_ethtool.c struct igb_ring *rx_ring = &adapter->test_rx_ring; rx_ring 1567 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_ring->count = IGB_DEFAULT_RXD; rx_ring 1568 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_ring->dev = &adapter->pdev->dev; rx_ring 1569 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_ring->netdev = adapter->netdev; rx_ring 1570 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_ring->reg_idx = adapter->vfs_allocated_count; rx_ring 1572 drivers/net/ethernet/intel/igb/igb_ethtool.c if (igb_setup_rx_resources(rx_ring)) { rx_ring 1582 drivers/net/ethernet/intel/igb/igb_ethtool.c igb_configure_rx_ring(adapter, rx_ring); rx_ring 1584 drivers/net/ethernet/intel/igb/igb_ethtool.c igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); rx_ring 1801 drivers/net/ethernet/intel/igb/igb_ethtool.c static int igb_clean_test_rings(struct igb_ring *rx_ring, rx_ring 1811 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_ntc = rx_ring->next_to_clean; rx_ring 1813 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); rx_ring 1817 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; rx_ring 1820 drivers/net/ethernet/intel/igb/igb_ethtool.c dma_sync_single_for_cpu(rx_ring->dev, rx_ring 1830 drivers/net/ethernet/intel/igb/igb_ethtool.c dma_sync_single_for_device(rx_ring->dev, rx_ring 1850 drivers/net/ethernet/intel/igb/igb_ethtool.c if (rx_ntc == rx_ring->count) rx_ring 1857 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); rx_ring 1863 drivers/net/ethernet/intel/igb/igb_ethtool.c igb_alloc_rx_buffers(rx_ring, count); rx_ring 1864 drivers/net/ethernet/intel/igb/igb_ethtool.c rx_ring->next_to_clean = rx_ntc; rx_ring 1873 drivers/net/ethernet/intel/igb/igb_ethtool.c struct igb_ring *rx_ring = &adapter->test_rx_ring; rx_ring 1894 drivers/net/ethernet/intel/igb/igb_ethtool.c if (rx_ring->count <= tx_ring->count) rx_ring 1897 drivers/net/ethernet/intel/igb/igb_ethtool.c lc = ((rx_ring->count / 64) * 2) + 1; rx_ring 1919 drivers/net/ethernet/intel/igb/igb_ethtool.c good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); rx_ring 2333 drivers/net/ethernet/intel/igb/igb_ethtool.c ring = adapter->rx_ring[j]; rx_ring 366 drivers/net/ethernet/intel/igb/igb_main.c struct igb_ring *rx_ring; rx_ring 471 drivers/net/ethernet/intel/igb/igb_main.c rx_ring = adapter->rx_ring[n]; rx_ring 473 drivers/net/ethernet/intel/igb/igb_main.c n, rx_ring->next_to_use, rx_ring->next_to_clean); rx_ring 504 drivers/net/ethernet/intel/igb/igb_main.c rx_ring = adapter->rx_ring[n]; rx_ring 506 drivers/net/ethernet/intel/igb/igb_main.c pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); rx_ring 511 drivers/net/ethernet/intel/igb/igb_main.c for (i = 0; i < rx_ring->count; i++) { rx_ring 514 drivers/net/ethernet/intel/igb/igb_main.c buffer_info = &rx_ring->rx_buffer_info[i]; rx_ring 515 drivers/net/ethernet/intel/igb/igb_main.c rx_desc = IGB_RX_DESC(rx_ring, i); rx_ring 519 drivers/net/ethernet/intel/igb/igb_main.c if (i == rx_ring->next_to_use) rx_ring 521 drivers/net/ethernet/intel/igb/igb_main.c else if (i == rx_ring->next_to_clean) rx_ring 548 drivers/net/ethernet/intel/igb/igb_main.c igb_rx_bufsz(rx_ring), true); rx_ring 720 drivers/net/ethernet/intel/igb/igb_main.c adapter->rx_ring[i]->reg_idx = rbase_offset + rx_ring 733 drivers/net/ethernet/intel/igb/igb_main.c adapter->rx_ring[i]->reg_idx = rbase_offset + i; rx_ring 1035 drivers/net/ethernet/intel/igb/igb_main.c adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; rx_ring 1309 drivers/net/ethernet/intel/igb/igb_main.c adapter->rx_ring[rxr_idx] = ring; rx_ring 2015 drivers/net/ethernet/intel/igb/igb_main.c struct igb_ring *ring = adapter->rx_ring[i]; rx_ring 4187 drivers/net/ethernet/intel/igb/igb_main.c int igb_setup_rx_resources(struct igb_ring *rx_ring) rx_ring 4189 drivers/net/ethernet/intel/igb/igb_main.c struct device *dev = rx_ring->dev; rx_ring 4192 drivers/net/ethernet/intel/igb/igb_main.c size = sizeof(struct igb_rx_buffer) * rx_ring->count; rx_ring 4194 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->rx_buffer_info = vmalloc(size); rx_ring 4195 drivers/net/ethernet/intel/igb/igb_main.c if (!rx_ring->rx_buffer_info) rx_ring 4199 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); rx_ring 4200 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 4202 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, rx_ring 4203 drivers/net/ethernet/intel/igb/igb_main.c &rx_ring->dma, GFP_KERNEL); rx_ring 4204 drivers/net/ethernet/intel/igb/igb_main.c if (!rx_ring->desc) rx_ring 4207 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_alloc = 0; rx_ring 4208 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_clean = 0; rx_ring 4209 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_use = 0; rx_ring 4214 drivers/net/ethernet/intel/igb/igb_main.c vfree(rx_ring->rx_buffer_info); rx_ring 4215 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->rx_buffer_info = NULL; rx_ring 4233 drivers/net/ethernet/intel/igb/igb_main.c err = igb_setup_rx_resources(adapter->rx_ring[i]); rx_ring 4238 drivers/net/ethernet/intel/igb/igb_main.c igb_free_rx_resources(adapter->rx_ring[i]); rx_ring 4535 drivers/net/ethernet/intel/igb/igb_main.c struct igb_ring *rx_ring) rx_ring 4538 drivers/net/ethernet/intel/igb/igb_main.c clear_ring_build_skb_enabled(rx_ring); rx_ring 4539 drivers/net/ethernet/intel/igb/igb_main.c clear_ring_uses_large_buffer(rx_ring); rx_ring 4544 drivers/net/ethernet/intel/igb/igb_main.c set_ring_build_skb_enabled(rx_ring); rx_ring 4550 drivers/net/ethernet/intel/igb/igb_main.c set_ring_uses_large_buffer(rx_ring); rx_ring 4571 drivers/net/ethernet/intel/igb/igb_main.c struct igb_ring *rx_ring = adapter->rx_ring[i]; rx_ring 4573 drivers/net/ethernet/intel/igb/igb_main.c igb_set_rx_buffer_len(adapter, rx_ring); rx_ring 4574 drivers/net/ethernet/intel/igb/igb_main.c igb_configure_rx_ring(adapter, rx_ring); rx_ring 4696 drivers/net/ethernet/intel/igb/igb_main.c void igb_free_rx_resources(struct igb_ring *rx_ring) rx_ring 4698 drivers/net/ethernet/intel/igb/igb_main.c igb_clean_rx_ring(rx_ring); rx_ring 4700 drivers/net/ethernet/intel/igb/igb_main.c vfree(rx_ring->rx_buffer_info); rx_ring 4701 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->rx_buffer_info = NULL; rx_ring 4704 drivers/net/ethernet/intel/igb/igb_main.c if (!rx_ring->desc) rx_ring 4707 drivers/net/ethernet/intel/igb/igb_main.c dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring 4708 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->desc, rx_ring->dma); rx_ring 4710 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->desc = NULL; rx_ring 4724 drivers/net/ethernet/intel/igb/igb_main.c if (adapter->rx_ring[i]) rx_ring 4725 drivers/net/ethernet/intel/igb/igb_main.c igb_free_rx_resources(adapter->rx_ring[i]); rx_ring 4732 drivers/net/ethernet/intel/igb/igb_main.c static void igb_clean_rx_ring(struct igb_ring *rx_ring) rx_ring 4734 drivers/net/ethernet/intel/igb/igb_main.c u16 i = rx_ring->next_to_clean; rx_ring 4736 drivers/net/ethernet/intel/igb/igb_main.c dev_kfree_skb(rx_ring->skb); rx_ring 4737 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->skb = NULL; rx_ring 4740 drivers/net/ethernet/intel/igb/igb_main.c while (i != rx_ring->next_to_alloc) { rx_ring 4741 drivers/net/ethernet/intel/igb/igb_main.c struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; rx_ring 4746 drivers/net/ethernet/intel/igb/igb_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 4749 drivers/net/ethernet/intel/igb/igb_main.c igb_rx_bufsz(rx_ring), rx_ring 4753 drivers/net/ethernet/intel/igb/igb_main.c dma_unmap_page_attrs(rx_ring->dev, rx_ring 4755 drivers/net/ethernet/intel/igb/igb_main.c igb_rx_pg_size(rx_ring), rx_ring 4762 drivers/net/ethernet/intel/igb/igb_main.c if (i == rx_ring->count) rx_ring 4766 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_alloc = 0; rx_ring 4767 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_clean = 0; rx_ring 4768 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_use = 0; rx_ring 4780 drivers/net/ethernet/intel/igb/igb_main.c if (adapter->rx_ring[i]) rx_ring 4781 drivers/net/ethernet/intel/igb/igb_main.c igb_clean_rx_ring(adapter->rx_ring[i]); rx_ring 6286 drivers/net/ethernet/intel/igb/igb_main.c struct igb_ring *ring = adapter->rx_ring[i]; rx_ring 6615 drivers/net/ethernet/intel/igb/igb_main.c struct igb_ring *rx_ring, rx_ring 6631 drivers/net/ethernet/intel/igb/igb_main.c wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); rx_ring 7916 drivers/net/ethernet/intel/igb/igb_main.c static void igb_reuse_rx_page(struct igb_ring *rx_ring, rx_ring 7920 drivers/net/ethernet/intel/igb/igb_main.c u16 nta = rx_ring->next_to_alloc; rx_ring 7922 drivers/net/ethernet/intel/igb/igb_main.c new_buff = &rx_ring->rx_buffer_info[nta]; rx_ring 7926 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 7985 drivers/net/ethernet/intel/igb/igb_main.c static void igb_add_rx_frag(struct igb_ring *rx_ring, rx_ring 7991 drivers/net/ethernet/intel/igb/igb_main.c unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; rx_ring 7993 drivers/net/ethernet/intel/igb/igb_main.c unsigned int truesize = ring_uses_build_skb(rx_ring) ? rx_ring 8006 drivers/net/ethernet/intel/igb/igb_main.c static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, rx_ring 8013 drivers/net/ethernet/intel/igb/igb_main.c unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; rx_ring 8027 drivers/net/ethernet/intel/igb/igb_main.c skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); rx_ring 8032 drivers/net/ethernet/intel/igb/igb_main.c igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); rx_ring 8063 drivers/net/ethernet/intel/igb/igb_main.c static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, rx_ring 8070 drivers/net/ethernet/intel/igb/igb_main.c unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; rx_ring 8094 drivers/net/ethernet/intel/igb/igb_main.c igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); rx_ring 8169 drivers/net/ethernet/intel/igb/igb_main.c static bool igb_is_non_eop(struct igb_ring *rx_ring, rx_ring 8172 drivers/net/ethernet/intel/igb/igb_main.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 8175 drivers/net/ethernet/intel/igb/igb_main.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 8176 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_clean = ntc; rx_ring 8178 drivers/net/ethernet/intel/igb/igb_main.c prefetch(IGB_RX_DESC(rx_ring, ntc)); rx_ring 8200 drivers/net/ethernet/intel/igb/igb_main.c static bool igb_cleanup_headers(struct igb_ring *rx_ring, rx_ring 8206 drivers/net/ethernet/intel/igb/igb_main.c struct net_device *netdev = rx_ring->netdev; rx_ring 8230 drivers/net/ethernet/intel/igb/igb_main.c static void igb_process_skb_fields(struct igb_ring *rx_ring, rx_ring 8234 drivers/net/ethernet/intel/igb/igb_main.c struct net_device *dev = rx_ring->netdev; rx_ring 8236 drivers/net/ethernet/intel/igb/igb_main.c igb_rx_hash(rx_ring, rx_desc, skb); rx_ring 8238 drivers/net/ethernet/intel/igb/igb_main.c igb_rx_checksum(rx_ring, rx_desc, skb); rx_ring 8242 drivers/net/ethernet/intel/igb/igb_main.c igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); rx_ring 8249 drivers/net/ethernet/intel/igb/igb_main.c test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) rx_ring 8257 drivers/net/ethernet/intel/igb/igb_main.c skb_record_rx_queue(skb, rx_ring->queue_index); rx_ring 8259 drivers/net/ethernet/intel/igb/igb_main.c skb->protocol = eth_type_trans(skb, rx_ring->netdev); rx_ring 8262 drivers/net/ethernet/intel/igb/igb_main.c static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, rx_ring 8267 drivers/net/ethernet/intel/igb/igb_main.c rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_ring 8271 drivers/net/ethernet/intel/igb/igb_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 8282 drivers/net/ethernet/intel/igb/igb_main.c static void igb_put_rx_buffer(struct igb_ring *rx_ring, rx_ring 8287 drivers/net/ethernet/intel/igb/igb_main.c igb_reuse_rx_page(rx_ring, rx_buffer); rx_ring 8292 drivers/net/ethernet/intel/igb/igb_main.c dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, rx_ring 8293 drivers/net/ethernet/intel/igb/igb_main.c igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, rx_ring 8305 drivers/net/ethernet/intel/igb/igb_main.c struct igb_ring *rx_ring = q_vector->rx.ring; rx_ring 8306 drivers/net/ethernet/intel/igb/igb_main.c struct sk_buff *skb = rx_ring->skb; rx_ring 8308 drivers/net/ethernet/intel/igb/igb_main.c u16 cleaned_count = igb_desc_unused(rx_ring); rx_ring 8317 drivers/net/ethernet/intel/igb/igb_main.c igb_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 8321 drivers/net/ethernet/intel/igb/igb_main.c rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 8332 drivers/net/ethernet/intel/igb/igb_main.c rx_buffer = igb_get_rx_buffer(rx_ring, size); rx_ring 8336 drivers/net/ethernet/intel/igb/igb_main.c igb_add_rx_frag(rx_ring, rx_buffer, skb, size); rx_ring 8337 drivers/net/ethernet/intel/igb/igb_main.c else if (ring_uses_build_skb(rx_ring)) rx_ring 8338 drivers/net/ethernet/intel/igb/igb_main.c skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size); rx_ring 8340 drivers/net/ethernet/intel/igb/igb_main.c skb = igb_construct_skb(rx_ring, rx_buffer, rx_ring 8345 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 8350 drivers/net/ethernet/intel/igb/igb_main.c igb_put_rx_buffer(rx_ring, rx_buffer); rx_ring 8354 drivers/net/ethernet/intel/igb/igb_main.c if (igb_is_non_eop(rx_ring, rx_desc)) rx_ring 8358 drivers/net/ethernet/intel/igb/igb_main.c if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { rx_ring 8367 drivers/net/ethernet/intel/igb/igb_main.c igb_process_skb_fields(rx_ring, rx_desc, skb); rx_ring 8379 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->skb = skb; rx_ring 8381 drivers/net/ethernet/intel/igb/igb_main.c u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring 8382 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->rx_stats.packets += total_packets; rx_ring 8383 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->rx_stats.bytes += total_bytes; rx_ring 8384 drivers/net/ethernet/intel/igb/igb_main.c u64_stats_update_end(&rx_ring->rx_syncp); rx_ring 8389 drivers/net/ethernet/intel/igb/igb_main.c igb_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 8394 drivers/net/ethernet/intel/igb/igb_main.c static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) rx_ring 8396 drivers/net/ethernet/intel/igb/igb_main.c return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; rx_ring 8399 drivers/net/ethernet/intel/igb/igb_main.c static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, rx_ring 8410 drivers/net/ethernet/intel/igb/igb_main.c page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); rx_ring 8412 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 8417 drivers/net/ethernet/intel/igb/igb_main.c dma = dma_map_page_attrs(rx_ring->dev, page, 0, rx_ring 8418 drivers/net/ethernet/intel/igb/igb_main.c igb_rx_pg_size(rx_ring), rx_ring 8425 drivers/net/ethernet/intel/igb/igb_main.c if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring 8426 drivers/net/ethernet/intel/igb/igb_main.c __free_pages(page, igb_rx_pg_order(rx_ring)); rx_ring 8428 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 8434 drivers/net/ethernet/intel/igb/igb_main.c bi->page_offset = igb_rx_offset(rx_ring); rx_ring 8444 drivers/net/ethernet/intel/igb/igb_main.c void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) rx_ring 8448 drivers/net/ethernet/intel/igb/igb_main.c u16 i = rx_ring->next_to_use; rx_ring 8455 drivers/net/ethernet/intel/igb/igb_main.c rx_desc = IGB_RX_DESC(rx_ring, i); rx_ring 8456 drivers/net/ethernet/intel/igb/igb_main.c bi = &rx_ring->rx_buffer_info[i]; rx_ring 8457 drivers/net/ethernet/intel/igb/igb_main.c i -= rx_ring->count; rx_ring 8459 drivers/net/ethernet/intel/igb/igb_main.c bufsz = igb_rx_bufsz(rx_ring); rx_ring 8462 drivers/net/ethernet/intel/igb/igb_main.c if (!igb_alloc_mapped_page(rx_ring, bi)) rx_ring 8466 drivers/net/ethernet/intel/igb/igb_main.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, rx_ring 8479 drivers/net/ethernet/intel/igb/igb_main.c rx_desc = IGB_RX_DESC(rx_ring, 0); rx_ring 8480 drivers/net/ethernet/intel/igb/igb_main.c bi = rx_ring->rx_buffer_info; rx_ring 8481 drivers/net/ethernet/intel/igb/igb_main.c i -= rx_ring->count; rx_ring 8490 drivers/net/ethernet/intel/igb/igb_main.c i += rx_ring->count; rx_ring 8492 drivers/net/ethernet/intel/igb/igb_main.c if (rx_ring->next_to_use != i) { rx_ring 8494 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_use = i; rx_ring 8497 drivers/net/ethernet/intel/igb/igb_main.c rx_ring->next_to_alloc = i; rx_ring 8505 drivers/net/ethernet/intel/igb/igb_main.c writel(i, rx_ring->tail); rx_ring 184 drivers/net/ethernet/intel/igbvf/ethtool.c struct igbvf_ring *rx_ring = adapter->rx_ring; rx_ring 188 drivers/net/ethernet/intel/igbvf/ethtool.c ring->rx_pending = rx_ring->count; rx_ring 212 drivers/net/ethernet/intel/igbvf/ethtool.c (new_rx_count == adapter->rx_ring->count)) { rx_ring 222 drivers/net/ethernet/intel/igbvf/ethtool.c adapter->rx_ring->count = new_rx_count; rx_ring 251 drivers/net/ethernet/intel/igbvf/ethtool.c if (new_rx_count != adapter->rx_ring->count) { rx_ring 252 drivers/net/ethernet/intel/igbvf/ethtool.c memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring)); rx_ring 259 drivers/net/ethernet/intel/igbvf/ethtool.c igbvf_free_rx_resources(adapter->rx_ring); rx_ring 261 drivers/net/ethernet/intel/igbvf/ethtool.c memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); rx_ring 361 drivers/net/ethernet/intel/igbvf/ethtool.c hw->hw_addr + adapter->rx_ring->itr_register); rx_ring 204 drivers/net/ethernet/intel/igbvf/igbvf.h struct igbvf_ring *rx_ring; rx_ring 100 drivers/net/ethernet/intel/igbvf/netdev.c napi_gro_receive(&adapter->rx_ring->napi, skb); rx_ring 133 drivers/net/ethernet/intel/igbvf/netdev.c static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, rx_ring 136 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_adapter *adapter = rx_ring->adapter; rx_ring 145 drivers/net/ethernet/intel/igbvf/netdev.c i = rx_ring->next_to_use; rx_ring 146 drivers/net/ethernet/intel/igbvf/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 154 drivers/net/ethernet/intel/igbvf/netdev.c rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); rx_ring 212 drivers/net/ethernet/intel/igbvf/netdev.c if (i == rx_ring->count) rx_ring 214 drivers/net/ethernet/intel/igbvf/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 218 drivers/net/ethernet/intel/igbvf/netdev.c if (rx_ring->next_to_use != i) { rx_ring 219 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->next_to_use = i; rx_ring 221 drivers/net/ethernet/intel/igbvf/netdev.c i = (rx_ring->count - 1); rx_ring 231 drivers/net/ethernet/intel/igbvf/netdev.c writel(i, adapter->hw.hw_addr + rx_ring->tail); rx_ring 245 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_ring *rx_ring = adapter->rx_ring; rx_ring 257 drivers/net/ethernet/intel/igbvf/netdev.c i = rx_ring->next_to_clean; rx_ring 258 drivers/net/ethernet/intel/igbvf/netdev.c rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); rx_ring 267 drivers/net/ethernet/intel/igbvf/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 327 drivers/net/ethernet/intel/igbvf/netdev.c if (i == rx_ring->count) rx_ring 329 drivers/net/ethernet/intel/igbvf/netdev.c next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); rx_ring 331 drivers/net/ethernet/intel/igbvf/netdev.c next_buffer = &rx_ring->buffer_info[i]; rx_ring 361 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 372 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->next_to_clean = i; rx_ring 373 drivers/net/ethernet/intel/igbvf/netdev.c cleaned_count = igbvf_desc_unused(rx_ring); rx_ring 376 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 453 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_ring *rx_ring) rx_ring 458 drivers/net/ethernet/intel/igbvf/netdev.c size = sizeof(struct igbvf_buffer) * rx_ring->count; rx_ring 459 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->buffer_info = vzalloc(size); rx_ring 460 drivers/net/ethernet/intel/igbvf/netdev.c if (!rx_ring->buffer_info) rx_ring 466 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->size = rx_ring->count * desc_len; rx_ring 467 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 469 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, rx_ring 470 drivers/net/ethernet/intel/igbvf/netdev.c &rx_ring->dma, GFP_KERNEL); rx_ring 471 drivers/net/ethernet/intel/igbvf/netdev.c if (!rx_ring->desc) rx_ring 474 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->next_to_clean = 0; rx_ring 475 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->next_to_use = 0; rx_ring 477 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->adapter = adapter; rx_ring 482 drivers/net/ethernet/intel/igbvf/netdev.c vfree(rx_ring->buffer_info); rx_ring 483 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->buffer_info = NULL; rx_ring 547 drivers/net/ethernet/intel/igbvf/netdev.c static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) rx_ring 549 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_adapter *adapter = rx_ring->adapter; rx_ring 555 drivers/net/ethernet/intel/igbvf/netdev.c if (!rx_ring->buffer_info) rx_ring 559 drivers/net/ethernet/intel/igbvf/netdev.c for (i = 0; i < rx_ring->count; i++) { rx_ring 560 drivers/net/ethernet/intel/igbvf/netdev.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 592 drivers/net/ethernet/intel/igbvf/netdev.c size = sizeof(struct igbvf_buffer) * rx_ring->count; rx_ring 593 drivers/net/ethernet/intel/igbvf/netdev.c memset(rx_ring->buffer_info, 0, size); rx_ring 596 drivers/net/ethernet/intel/igbvf/netdev.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 598 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->next_to_clean = 0; rx_ring 599 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->next_to_use = 0; rx_ring 601 drivers/net/ethernet/intel/igbvf/netdev.c writel(0, adapter->hw.hw_addr + rx_ring->head); rx_ring 602 drivers/net/ethernet/intel/igbvf/netdev.c writel(0, adapter->hw.hw_addr + rx_ring->tail); rx_ring 612 drivers/net/ethernet/intel/igbvf/netdev.c void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) rx_ring 614 drivers/net/ethernet/intel/igbvf/netdev.c struct pci_dev *pdev = rx_ring->adapter->pdev; rx_ring 616 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_clean_rx_ring(rx_ring); rx_ring 618 drivers/net/ethernet/intel/igbvf/netdev.c vfree(rx_ring->buffer_info); rx_ring 619 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->buffer_info = NULL; rx_ring 621 drivers/net/ethernet/intel/igbvf/netdev.c dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring 622 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->dma); rx_ring 623 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->desc = NULL; rx_ring 741 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->itr_range = rx_ring 742 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_update_itr(adapter, adapter->rx_ring->itr_val, rx_ring 746 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->itr_range == lowest_latency) rx_ring 747 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->itr_range = low_latency; rx_ring 749 drivers/net/ethernet/intel/igbvf/netdev.c new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); rx_ring 751 drivers/net/ethernet/intel/igbvf/netdev.c if (new_itr != adapter->rx_ring->itr_val) { rx_ring 752 drivers/net/ethernet/intel/igbvf/netdev.c u32 current_itr = adapter->rx_ring->itr_val; rx_ring 757 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->itr_val = new_itr; rx_ring 759 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->set_itr = 1; rx_ring 904 drivers/net/ethernet/intel/igbvf/netdev.c if (adapter->rx_ring->set_itr) { rx_ring 905 drivers/net/ethernet/intel/igbvf/netdev.c writel(adapter->rx_ring->itr_val, rx_ring 906 drivers/net/ethernet/intel/igbvf/netdev.c adapter->hw.hw_addr + adapter->rx_ring->itr_register); rx_ring 907 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->set_itr = 0; rx_ring 910 drivers/net/ethernet/intel/igbvf/netdev.c if (napi_schedule_prep(&adapter->rx_ring->napi)) { rx_ring 913 drivers/net/ethernet/intel/igbvf/netdev.c __napi_schedule(&adapter->rx_ring->napi); rx_ring 944 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector); rx_ring 976 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_ring *rx_ring = adapter->rx_ring; rx_ring 985 drivers/net/ethernet/intel/igbvf/netdev.c adapter->eims_enable_mask |= rx_ring->eims_value; rx_ring 986 drivers/net/ethernet/intel/igbvf/netdev.c writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); rx_ring 1053 drivers/net/ethernet/intel/igbvf/netdev.c sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); rx_ring 1056 drivers/net/ethernet/intel/igbvf/netdev.c memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); rx_ring 1070 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_intr_msix_rx, 0, adapter->rx_ring->name, rx_ring 1075 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->itr_register = E1000_EITR(vector); rx_ring 1076 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->itr_val = adapter->current_itr; rx_ring 1102 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); rx_ring 1103 drivers/net/ethernet/intel/igbvf/netdev.c if (!adapter->rx_ring) { rx_ring 1108 drivers/net/ethernet/intel/igbvf/netdev.c netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); rx_ring 1182 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); rx_ring 1183 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_adapter *adapter = rx_ring->adapter; rx_ring 1200 drivers/net/ethernet/intel/igbvf/netdev.c ew32(EIMS, adapter->rx_ring->eims_value); rx_ring 1365 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_ring *rx_ring = adapter->rx_ring; rx_ring 1378 drivers/net/ethernet/intel/igbvf/netdev.c rdba = rx_ring->dma; rx_ring 1381 drivers/net/ethernet/intel/igbvf/netdev.c ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); rx_ring 1382 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->head = E1000_RDH(0); rx_ring 1383 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->tail = E1000_RDT(0); rx_ring 1497 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_alloc_rx_buffers(adapter->rx_ring, rx_ring 1498 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_desc_unused(adapter->rx_ring)); rx_ring 1544 drivers/net/ethernet/intel/igbvf/netdev.c napi_enable(&adapter->rx_ring->napi); rx_ring 1585 drivers/net/ethernet/intel/igbvf/netdev.c napi_disable(&adapter->rx_ring->napi); rx_ring 1599 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_clean_rx_ring(adapter->rx_ring); rx_ring 1718 drivers/net/ethernet/intel/igbvf/netdev.c err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); rx_ring 1736 drivers/net/ethernet/intel/igbvf/netdev.c napi_enable(&adapter->rx_ring->napi); rx_ring 1750 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_free_rx_resources(adapter->rx_ring); rx_ring 1780 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_free_rx_resources(adapter->rx_ring); rx_ring 1957 drivers/net/ethernet/intel/igbvf/netdev.c ew32(EICS, adapter->rx_ring->eims_value); rx_ring 2864 drivers/net/ethernet/intel/igbvf/netdev.c adapter->rx_ring->count = 1024; rx_ring 2891 drivers/net/ethernet/intel/igbvf/netdev.c kfree(adapter->rx_ring); rx_ring 2937 drivers/net/ethernet/intel/igbvf/netdev.c netif_napi_del(&adapter->rx_ring->napi); rx_ring 2939 drivers/net/ethernet/intel/igbvf/netdev.c kfree(adapter->rx_ring); rx_ring 367 drivers/net/ethernet/intel/igc/igc.h struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; rx_ring 516 drivers/net/ethernet/intel/igc/igc_ethtool.c adapter->rx_ring[i]->count = new_rx_count; rx_ring 568 drivers/net/ethernet/intel/igc/igc_ethtool.c memcpy(&temp_ring[i], adapter->rx_ring[i], rx_ring 583 drivers/net/ethernet/intel/igc/igc_ethtool.c igc_free_rx_resources(adapter->rx_ring[i]); rx_ring 585 drivers/net/ethernet/intel/igc/igc_ethtool.c memcpy(adapter->rx_ring[i], &temp_ring[i], rx_ring 765 drivers/net/ethernet/intel/igc/igc_ethtool.c ring = adapter->rx_ring[j]; rx_ring 69 drivers/net/ethernet/intel/igc/igc_main.c static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, rx_ring 356 drivers/net/ethernet/intel/igc/igc_main.c static void igc_clean_rx_ring(struct igc_ring *rx_ring) rx_ring 358 drivers/net/ethernet/intel/igc/igc_main.c u16 i = rx_ring->next_to_clean; rx_ring 360 drivers/net/ethernet/intel/igc/igc_main.c dev_kfree_skb(rx_ring->skb); rx_ring 361 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->skb = NULL; rx_ring 364 drivers/net/ethernet/intel/igc/igc_main.c while (i != rx_ring->next_to_alloc) { rx_ring 365 drivers/net/ethernet/intel/igc/igc_main.c struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; rx_ring 370 drivers/net/ethernet/intel/igc/igc_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 373 drivers/net/ethernet/intel/igc/igc_main.c igc_rx_bufsz(rx_ring), rx_ring 377 drivers/net/ethernet/intel/igc/igc_main.c dma_unmap_page_attrs(rx_ring->dev, rx_ring 379 drivers/net/ethernet/intel/igc/igc_main.c igc_rx_pg_size(rx_ring), rx_ring 386 drivers/net/ethernet/intel/igc/igc_main.c if (i == rx_ring->count) rx_ring 390 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_alloc = 0; rx_ring 391 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_clean = 0; rx_ring 392 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_use = 0; rx_ring 404 drivers/net/ethernet/intel/igc/igc_main.c if (adapter->rx_ring[i]) rx_ring 405 drivers/net/ethernet/intel/igc/igc_main.c igc_clean_rx_ring(adapter->rx_ring[i]); rx_ring 414 drivers/net/ethernet/intel/igc/igc_main.c void igc_free_rx_resources(struct igc_ring *rx_ring) rx_ring 416 drivers/net/ethernet/intel/igc/igc_main.c igc_clean_rx_ring(rx_ring); rx_ring 418 drivers/net/ethernet/intel/igc/igc_main.c vfree(rx_ring->rx_buffer_info); rx_ring 419 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->rx_buffer_info = NULL; rx_ring 422 drivers/net/ethernet/intel/igc/igc_main.c if (!rx_ring->desc) rx_ring 425 drivers/net/ethernet/intel/igc/igc_main.c dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring 426 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->desc, rx_ring->dma); rx_ring 428 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->desc = NULL; rx_ring 442 drivers/net/ethernet/intel/igc/igc_main.c igc_free_rx_resources(adapter->rx_ring[i]); rx_ring 451 drivers/net/ethernet/intel/igc/igc_main.c int igc_setup_rx_resources(struct igc_ring *rx_ring) rx_ring 453 drivers/net/ethernet/intel/igc/igc_main.c struct device *dev = rx_ring->dev; rx_ring 456 drivers/net/ethernet/intel/igc/igc_main.c size = sizeof(struct igc_rx_buffer) * rx_ring->count; rx_ring 457 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->rx_buffer_info = vzalloc(size); rx_ring 458 drivers/net/ethernet/intel/igc/igc_main.c if (!rx_ring->rx_buffer_info) rx_ring 464 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->size = rx_ring->count * desc_len; rx_ring 465 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 467 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, rx_ring 468 drivers/net/ethernet/intel/igc/igc_main.c &rx_ring->dma, GFP_KERNEL); rx_ring 470 drivers/net/ethernet/intel/igc/igc_main.c if (!rx_ring->desc) rx_ring 473 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_alloc = 0; rx_ring 474 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_clean = 0; rx_ring 475 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_use = 0; rx_ring 480 drivers/net/ethernet/intel/igc/igc_main.c vfree(rx_ring->rx_buffer_info); rx_ring 481 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->rx_buffer_info = NULL; rx_ring 500 drivers/net/ethernet/intel/igc/igc_main.c err = igc_setup_rx_resources(adapter->rx_ring[i]); rx_ring 505 drivers/net/ethernet/intel/igc/igc_main.c igc_free_rx_resources(adapter->rx_ring[i]); rx_ring 590 drivers/net/ethernet/intel/igc/igc_main.c igc_configure_rx_ring(adapter, adapter->rx_ring[i]); rx_ring 1186 drivers/net/ethernet/intel/igc/igc_main.c static void igc_process_skb_fields(struct igc_ring *rx_ring, rx_ring 1190 drivers/net/ethernet/intel/igc/igc_main.c igc_rx_hash(rx_ring, rx_desc, skb); rx_ring 1192 drivers/net/ethernet/intel/igc/igc_main.c skb_record_rx_queue(skb, rx_ring->queue_index); rx_ring 1194 drivers/net/ethernet/intel/igc/igc_main.c skb->protocol = eth_type_trans(skb, rx_ring->netdev); rx_ring 1197 drivers/net/ethernet/intel/igc/igc_main.c static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, rx_ring 1202 drivers/net/ethernet/intel/igc/igc_main.c rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_ring 1206 drivers/net/ethernet/intel/igc/igc_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 1226 drivers/net/ethernet/intel/igc/igc_main.c static void igc_add_rx_frag(struct igc_ring *rx_ring, rx_ring 1232 drivers/net/ethernet/intel/igc/igc_main.c unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; rx_ring 1238 drivers/net/ethernet/intel/igc/igc_main.c unsigned int truesize = ring_uses_build_skb(rx_ring) ? rx_ring 1247 drivers/net/ethernet/intel/igc/igc_main.c static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, rx_ring 1254 drivers/net/ethernet/intel/igc/igc_main.c unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; rx_ring 1286 drivers/net/ethernet/intel/igc/igc_main.c static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, rx_ring 1293 drivers/net/ethernet/intel/igc/igc_main.c unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; rx_ring 1307 drivers/net/ethernet/intel/igc/igc_main.c skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); rx_ring 1344 drivers/net/ethernet/intel/igc/igc_main.c static void igc_reuse_rx_page(struct igc_ring *rx_ring, rx_ring 1347 drivers/net/ethernet/intel/igc/igc_main.c u16 nta = rx_ring->next_to_alloc; rx_ring 1350 drivers/net/ethernet/intel/igc/igc_main.c new_buff = &rx_ring->rx_buffer_info[nta]; rx_ring 1354 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 1415 drivers/net/ethernet/intel/igc/igc_main.c static bool igc_is_non_eop(struct igc_ring *rx_ring, rx_ring 1418 drivers/net/ethernet/intel/igc/igc_main.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 1421 drivers/net/ethernet/intel/igc/igc_main.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 1422 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_clean = ntc; rx_ring 1424 drivers/net/ethernet/intel/igc/igc_main.c prefetch(IGC_RX_DESC(rx_ring, ntc)); rx_ring 1446 drivers/net/ethernet/intel/igc/igc_main.c static bool igc_cleanup_headers(struct igc_ring *rx_ring, rx_ring 1452 drivers/net/ethernet/intel/igc/igc_main.c struct net_device *netdev = rx_ring->netdev; rx_ring 1467 drivers/net/ethernet/intel/igc/igc_main.c static void igc_put_rx_buffer(struct igc_ring *rx_ring, rx_ring 1472 drivers/net/ethernet/intel/igc/igc_main.c igc_reuse_rx_page(rx_ring, rx_buffer); rx_ring 1477 drivers/net/ethernet/intel/igc/igc_main.c dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, rx_ring 1478 drivers/net/ethernet/intel/igc/igc_main.c igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, rx_ring 1492 drivers/net/ethernet/intel/igc/igc_main.c static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) rx_ring 1495 drivers/net/ethernet/intel/igc/igc_main.c u16 i = rx_ring->next_to_use; rx_ring 1503 drivers/net/ethernet/intel/igc/igc_main.c rx_desc = IGC_RX_DESC(rx_ring, i); rx_ring 1504 drivers/net/ethernet/intel/igc/igc_main.c bi = &rx_ring->rx_buffer_info[i]; rx_ring 1505 drivers/net/ethernet/intel/igc/igc_main.c i -= rx_ring->count; rx_ring 1507 drivers/net/ethernet/intel/igc/igc_main.c bufsz = igc_rx_bufsz(rx_ring); rx_ring 1510 drivers/net/ethernet/intel/igc/igc_main.c if (!igc_alloc_mapped_page(rx_ring, bi)) rx_ring 1514 drivers/net/ethernet/intel/igc/igc_main.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, rx_ring 1527 drivers/net/ethernet/intel/igc/igc_main.c rx_desc = IGC_RX_DESC(rx_ring, 0); rx_ring 1528 drivers/net/ethernet/intel/igc/igc_main.c bi = rx_ring->rx_buffer_info; rx_ring 1529 drivers/net/ethernet/intel/igc/igc_main.c i -= rx_ring->count; rx_ring 1538 drivers/net/ethernet/intel/igc/igc_main.c i += rx_ring->count; rx_ring 1540 drivers/net/ethernet/intel/igc/igc_main.c if (rx_ring->next_to_use != i) { rx_ring 1542 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_use = i; rx_ring 1545 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->next_to_alloc = i; rx_ring 1553 drivers/net/ethernet/intel/igc/igc_main.c writel(i, rx_ring->tail); rx_ring 1560 drivers/net/ethernet/intel/igc/igc_main.c struct igc_ring *rx_ring = q_vector->rx.ring; rx_ring 1561 drivers/net/ethernet/intel/igc/igc_main.c struct sk_buff *skb = rx_ring->skb; rx_ring 1562 drivers/net/ethernet/intel/igc/igc_main.c u16 cleaned_count = igc_desc_unused(rx_ring); rx_ring 1571 drivers/net/ethernet/intel/igc/igc_main.c igc_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 1575 drivers/net/ethernet/intel/igc/igc_main.c rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 1586 drivers/net/ethernet/intel/igc/igc_main.c rx_buffer = igc_get_rx_buffer(rx_ring, size); rx_ring 1590 drivers/net/ethernet/intel/igc/igc_main.c igc_add_rx_frag(rx_ring, rx_buffer, skb, size); rx_ring 1591 drivers/net/ethernet/intel/igc/igc_main.c else if (ring_uses_build_skb(rx_ring)) rx_ring 1592 drivers/net/ethernet/intel/igc/igc_main.c skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); rx_ring 1594 drivers/net/ethernet/intel/igc/igc_main.c skb = igc_construct_skb(rx_ring, rx_buffer, rx_ring 1599 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 1604 drivers/net/ethernet/intel/igc/igc_main.c igc_put_rx_buffer(rx_ring, rx_buffer); rx_ring 1608 drivers/net/ethernet/intel/igc/igc_main.c if (igc_is_non_eop(rx_ring, rx_desc)) rx_ring 1612 drivers/net/ethernet/intel/igc/igc_main.c if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { rx_ring 1621 drivers/net/ethernet/intel/igc/igc_main.c igc_process_skb_fields(rx_ring, rx_desc, skb); rx_ring 1633 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->skb = skb; rx_ring 1635 drivers/net/ethernet/intel/igc/igc_main.c u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring 1636 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->rx_stats.packets += total_packets; rx_ring 1637 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->rx_stats.bytes += total_bytes; rx_ring 1638 drivers/net/ethernet/intel/igc/igc_main.c u64_stats_update_end(&rx_ring->rx_syncp); rx_ring 1643 drivers/net/ethernet/intel/igc/igc_main.c igc_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 1648 drivers/net/ethernet/intel/igc/igc_main.c static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) rx_ring 1650 drivers/net/ethernet/intel/igc/igc_main.c return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0; rx_ring 1653 drivers/net/ethernet/intel/igc/igc_main.c static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, rx_ring 1664 drivers/net/ethernet/intel/igc/igc_main.c page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); rx_ring 1666 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 1671 drivers/net/ethernet/intel/igc/igc_main.c dma = dma_map_page_attrs(rx_ring->dev, page, 0, rx_ring 1672 drivers/net/ethernet/intel/igc/igc_main.c igc_rx_pg_size(rx_ring), rx_ring 1679 drivers/net/ethernet/intel/igc/igc_main.c if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring 1682 drivers/net/ethernet/intel/igc/igc_main.c rx_ring->rx_stats.alloc_failed++; rx_ring 1688 drivers/net/ethernet/intel/igc/igc_main.c bi->page_offset = igc_rx_offset(rx_ring); rx_ring 1926 drivers/net/ethernet/intel/igc/igc_main.c struct igc_ring *ring = adapter->rx_ring[i]; rx_ring 2349 drivers/net/ethernet/intel/igc/igc_main.c struct igc_ring *ring = adapter->rx_ring[i]; rx_ring 2764 drivers/net/ethernet/intel/igc/igc_main.c adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; rx_ring 3597 drivers/net/ethernet/intel/igc/igc_main.c adapter->rx_ring[rxr_idx] = ring; rx_ring 3678 drivers/net/ethernet/intel/igc/igc_main.c adapter->rx_ring[i]->reg_idx = i; rx_ring 135 drivers/net/ethernet/intel/ixgb/ixgb.h struct ixgb_desc_ring rx_ring; rx_ring 473 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c struct ixgb_desc_ring *rxdr = &adapter->rx_ring; rx_ring 487 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c struct ixgb_desc_ring *rxdr = &adapter->rx_ring; rx_ring 492 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c rx_old = adapter->rx_ring; rx_ring 518 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c rx_new = adapter->rx_ring; rx_ring 520 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c adapter->rx_ring = rx_old; rx_ring 524 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c adapter->rx_ring = rx_new; rx_ring 535 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c adapter->rx_ring = rx_old; rx_ring 193 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring)); rx_ring 752 drivers/net/ethernet/intel/ixgb/ixgb_main.c struct ixgb_desc_ring *rxdr = &adapter->rx_ring; rx_ring 823 drivers/net/ethernet/intel/ixgb/ixgb_main.c u64 rdba = adapter->rx_ring.dma; rx_ring 824 drivers/net/ethernet/intel/ixgb/ixgb_main.c u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); rx_ring 959 drivers/net/ethernet/intel/ixgb/ixgb_main.c struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; rx_ring 964 drivers/net/ethernet/intel/ixgb/ixgb_main.c vfree(rx_ring->buffer_info); rx_ring 965 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_ring->buffer_info = NULL; rx_ring 967 drivers/net/ethernet/intel/ixgb/ixgb_main.c dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring 968 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_ring->dma); rx_ring 970 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_ring->desc = NULL; rx_ring 981 drivers/net/ethernet/intel/ixgb/ixgb_main.c struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; rx_ring 989 drivers/net/ethernet/intel/ixgb/ixgb_main.c for (i = 0; i < rx_ring->count; i++) { rx_ring 990 drivers/net/ethernet/intel/ixgb/ixgb_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1006 drivers/net/ethernet/intel/ixgb/ixgb_main.c size = sizeof(struct ixgb_buffer) * rx_ring->count; rx_ring 1007 drivers/net/ethernet/intel/ixgb/ixgb_main.c memset(rx_ring->buffer_info, 0, size); rx_ring 1011 drivers/net/ethernet/intel/ixgb/ixgb_main.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 1013 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_ring->next_to_clean = 0; rx_ring 1014 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_ring->next_to_use = 0; rx_ring 1935 drivers/net/ethernet/intel/ixgb/ixgb_main.c struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; rx_ring 1945 drivers/net/ethernet/intel/ixgb/ixgb_main.c i = rx_ring->next_to_clean; rx_ring 1946 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_desc = IXGB_RX_DESC(*rx_ring, i); rx_ring 1947 drivers/net/ethernet/intel/ixgb/ixgb_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1964 drivers/net/ethernet/intel/ixgb/ixgb_main.c if (++i == rx_ring->count) rx_ring 1966 drivers/net/ethernet/intel/ixgb/ixgb_main.c next_rxd = IXGB_RX_DESC(*rx_ring, i); rx_ring 1970 drivers/net/ethernet/intel/ixgb/ixgb_main.c if (j == rx_ring->count) rx_ring 1972 drivers/net/ethernet/intel/ixgb/ixgb_main.c next2_buffer = &rx_ring->buffer_info[j]; rx_ring 1975 drivers/net/ethernet/intel/ixgb/ixgb_main.c next_buffer = &rx_ring->buffer_info[i]; rx_ring 2037 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_ring->next_to_clean = i; rx_ring 2039 drivers/net/ethernet/intel/ixgb/ixgb_main.c cleaned_count = IXGB_DESC_UNUSED(rx_ring); rx_ring 2054 drivers/net/ethernet/intel/ixgb/ixgb_main.c struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; rx_ring 2063 drivers/net/ethernet/intel/ixgb/ixgb_main.c i = rx_ring->next_to_use; rx_ring 2064 drivers/net/ethernet/intel/ixgb/ixgb_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 2065 drivers/net/ethernet/intel/ixgb/ixgb_main.c cleancount = IXGB_DESC_UNUSED(rx_ring); rx_ring 2096 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_desc = IXGB_RX_DESC(*rx_ring, i); rx_ring 2104 drivers/net/ethernet/intel/ixgb/ixgb_main.c if (++i == rx_ring->count) rx_ring 2106 drivers/net/ethernet/intel/ixgb/ixgb_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 2109 drivers/net/ethernet/intel/ixgb/ixgb_main.c if (likely(rx_ring->next_to_use != i)) { rx_ring 2110 drivers/net/ethernet/intel/ixgb/ixgb_main.c rx_ring->next_to_use = i; rx_ring 2112 drivers/net/ethernet/intel/ixgb/ixgb_main.c i = (rx_ring->count - 1); rx_ring 272 drivers/net/ethernet/intel/ixgb/ixgb_param.c struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; rx_ring 275 drivers/net/ethernet/intel/ixgb/ixgb_param.c rx_ring->count = RxDescriptors[bd]; rx_ring 276 drivers/net/ethernet/intel/ixgb/ixgb_param.c ixgb_validate_option(&rx_ring->count, &opt); rx_ring 278 drivers/net/ethernet/intel/ixgb/ixgb_param.c rx_ring->count = opt.def; rx_ring 280 drivers/net/ethernet/intel/ixgb/ixgb_param.c rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); rx_ring 646 drivers/net/ethernet/intel/ixgbe/ixgbe.h struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; rx_ring 970 drivers/net/ethernet/intel/ixgbe/ixgbe.h static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, rx_ring 975 drivers/net/ethernet/intel/ixgbe/ixgbe.h ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); rx_ring 982 drivers/net/ethernet/intel/ixgbe/ixgbe.h ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); rx_ring 987 drivers/net/ethernet/intel/ixgbe/ixgbe.h rx_ring->last_rx_timestamp = jiffies; rx_ring 1011 drivers/net/ethernet/intel/ixgbe/ixgbe.h void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, rx_ring 1023 drivers/net/ethernet/intel/ixgbe/ixgbe.h static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, rx_ring 1024 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; rx_ring 1028 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ring->rx_pending = rx_ring->count; rx_ring 1066 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c adapter->rx_ring[i]->count = new_rx_count; rx_ring 1141 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c memcpy(&temp_ring[i], adapter->rx_ring[i], rx_ring 1161 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ixgbe_free_rx_resources(adapter->rx_ring[i]); rx_ring 1163 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c memcpy(adapter->rx_ring[i], &temp_ring[i], rx_ring 1240 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ring = adapter->rx_ring[j]; rx_ring 1724 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; rx_ring 1758 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_ring->count = IXGBE_DEFAULT_RXD; rx_ring 1759 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_ring->queue_index = 0; rx_ring 1760 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_ring->dev = &adapter->pdev->dev; rx_ring 1761 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_ring->netdev = adapter->netdev; rx_ring 1762 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; rx_ring 1764 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c err = ixgbe_setup_rx_resources(adapter, rx_ring); rx_ring 1772 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ixgbe_configure_rx_ring(adapter, rx_ring); rx_ring 1886 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, rx_ring 1894 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_ntc = rx_ring->next_to_clean; rx_ring 1896 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); rx_ring 1931 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; rx_ring 1934 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c dma_sync_single_for_cpu(rx_ring->dev, rx_ring 1936 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ixgbe_rx_bufsz(rx_ring), rx_ring 1946 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c dma_sync_single_for_device(rx_ring->dev, rx_ring 1948 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ixgbe_rx_bufsz(rx_ring), rx_ring 1953 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c if (rx_ntc == rx_ring->count) rx_ring 1957 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); rx_ring 1963 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ixgbe_alloc_rx_buffers(rx_ring, count); rx_ring 1964 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c rx_ring->next_to_clean = rx_ntc; rx_ring 1973 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; rx_ring 1998 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c if (rx_ring->count <= tx_ring->count) rx_ring 2001 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c lc = ((rx_ring->count / 64) * 2) + 1; rx_ring 2025 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); rx_ring 2706 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c queue = adapter->rx_ring[ring]->reg_idx; rx_ring 671 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; rx_ring 678 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; rx_ring 693 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; rx_ring 1151 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, rx_ring 1155 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev); rx_ring 43 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->reg_idx = reg_idx; rx_ring 44 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; rx_ring 72 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->reg_idx = reg_idx; rx_ring 73 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->netdev = adapter->netdev; rx_ring 163 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[offset + i]->reg_idx = rx_idx; rx_ring 164 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[offset + i]->netdev = adapter->netdev; rx_ring 166 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[offset + i]->dcb_tc = tc; rx_ring 210 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->reg_idx = reg_idx; rx_ring 211 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; rx_ring 217 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->reg_idx = reg_idx; rx_ring 218 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->netdev = adapter->netdev; rx_ring 257 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->reg_idx = i; rx_ring 258 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->netdev = adapter->netdev; rx_ring 282 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[0]->reg_idx = 0; rx_ring 996 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[rxr_idx] = ring; rx_ring 1031 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[ring->queue_index] = NULL; rx_ring 1101 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c if (adapter->rx_ring[i]) rx_ring 1102 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[i]->ring_idx = i; rx_ring 576 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring; rx_ring 715 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring = adapter->rx_ring[n]; rx_ring 717 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c n, rx_ring->next_to_use, rx_ring->next_to_clean); rx_ring 772 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring = adapter->rx_ring[n]; rx_ring 774 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); rx_ring 785 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (i = 0; i < rx_ring->count; i++) { rx_ring 788 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (i == rx_ring->next_to_use) rx_ring 790 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c else if (i == rx_ring->next_to_clean) rx_ring 795 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_buffer_info = &rx_ring->rx_buffer_info[i]; rx_ring 796 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_desc = IXGBE_RX_DESC(rx_ring, i); rx_ring 821 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_bufsz(rx_ring), true); rx_ring 1312 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring, rx_ring 1317 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u8 reg_idx = rx_ring->reg_idx; rx_ring 1320 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rxctrl = dca3_get_tag(rx_ring->dev, cpu); rx_ring 1525 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) rx_ring 1527 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; rx_ring 1530 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, rx_ring 1541 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); rx_ring 1543 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring 1548 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma = dma_map_page_attrs(rx_ring->dev, page, 0, rx_ring 1549 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_pg_size(rx_ring), rx_ring 1557 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring 1558 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c __free_pages(page, ixgbe_rx_pg_order(rx_ring)); rx_ring 1560 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring 1566 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c bi->page_offset = ixgbe_rx_offset(rx_ring); rx_ring 1569 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_stats.alloc_rx_page++; rx_ring 1579 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) rx_ring 1583 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u16 i = rx_ring->next_to_use; rx_ring 1590 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_desc = IXGBE_RX_DESC(rx_ring, i); rx_ring 1591 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c bi = &rx_ring->rx_buffer_info[i]; rx_ring 1592 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c i -= rx_ring->count; rx_ring 1594 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c bufsz = ixgbe_rx_bufsz(rx_ring); rx_ring 1597 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!ixgbe_alloc_mapped_page(rx_ring, bi)) rx_ring 1601 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, rx_ring 1615 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_desc = IXGBE_RX_DESC(rx_ring, 0); rx_ring 1616 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c bi = rx_ring->rx_buffer_info; rx_ring 1617 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c i -= rx_ring->count; rx_ring 1626 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c i += rx_ring->count; rx_ring 1628 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (rx_ring->next_to_use != i) { rx_ring 1629 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_use = i; rx_ring 1632 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_alloc = i; rx_ring 1640 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c writel(i, rx_ring->tail); rx_ring 1655 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, rx_ring 1662 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; rx_ring 1663 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_stats.rsc_flush++; rx_ring 1665 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_set_rsc_gso_size(rx_ring, skb); rx_ring 1681 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, rx_ring 1685 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct net_device *dev = rx_ring->netdev; rx_ring 1686 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 flags = rx_ring->q_vector->adapter->flags; rx_ring 1688 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_update_rsc_stats(rx_ring, skb); rx_ring 1690 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_hash(rx_ring, rx_desc, skb); rx_ring 1692 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_checksum(rx_ring, rx_desc, skb); rx_ring 1695 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); rx_ring 1704 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_ipsec_rx(rx_ring, rx_desc, skb); rx_ring 1708 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb_record_rx_queue(skb, rx_ring->queue_index); rx_ring 1733 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, rx_ring 1737 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 1740 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 1741 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_clean = ntc; rx_ring 1743 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c prefetch(IXGBE_RX_DESC(rx_ring, ntc)); rx_ring 1746 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ring_is_rsc_enabled(rx_ring)) { rx_ring 1768 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_buffer_info[ntc].skb = skb; rx_ring 1769 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_stats.non_eop_descs++; rx_ring 1786 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, rx_ring 1826 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, rx_ring 1829 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ring_uses_build_skb(rx_ring)) { rx_ring 1832 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 1840 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 1849 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, rx_ring 1850 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_pg_size(rx_ring), rx_ring 1878 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, rx_ring 1882 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct net_device *netdev = rx_ring->netdev; rx_ring 1901 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_pull_tail(rx_ring, skb); rx_ring 1905 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) rx_ring 1923 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, rx_ring 1927 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u16 nta = rx_ring->next_to_alloc; rx_ring 1929 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c new_buff = &rx_ring->rx_buffer_info[nta]; rx_ring 1933 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 2002 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, rx_ring 2008 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; rx_ring 2010 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int truesize = ring_uses_build_skb(rx_ring) ? rx_ring 2023 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, rx_ring 2030 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_ring 2043 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_dma_sync_frag(rx_ring, *skb); rx_ring 2047 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 2058 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, rx_ring 2064 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_reuse_rx_page(rx_ring, rx_buffer); rx_ring 2071 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, rx_ring 2072 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_pg_size(rx_ring), rx_ring 2085 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, rx_ring 2092 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; rx_ring 2121 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); rx_ring 2146 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, rx_ring 2153 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; rx_ring 2197 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring, rx_ring 2206 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp_prog = READ_ONCE(rx_ring->xdp_prog); rx_ring 2236 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c trace_xdp_exception(rx_ring->netdev, xdp_prog, act); rx_ring 2247 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, rx_ring 2252 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; rx_ring 2256 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int truesize = ring_uses_build_skb(rx_ring) ? rx_ring 2278 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring, rx_ring 2287 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u16 cleaned_count = ixgbe_desc_unused(rx_ring); rx_ring 2291 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp.rxq = &rx_ring->xdp_rxq; rx_ring 2301 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 2305 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 2316 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); rx_ring 2324 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_offset(rx_ring); rx_ring 2327 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); rx_ring 2335 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); rx_ring 2342 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); rx_ring 2343 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c } else if (ring_uses_build_skb(rx_ring)) { rx_ring 2344 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb = ixgbe_build_skb(rx_ring, rx_buffer, rx_ring 2347 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb = ixgbe_construct_skb(rx_ring, rx_buffer, rx_ring 2353 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring 2358 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); rx_ring 2362 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) rx_ring 2366 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) rx_ring 2373 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_process_skb_fields(rx_ring, rx_desc, skb); rx_ring 2377 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { rx_ring 2382 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c mss = rx_ring->netdev->mtu - rx_ring 2419 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 2420 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->stats.packets += total_rx_packets; rx_ring 2421 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->stats.bytes += total_rx_bytes; rx_ring 2422 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u64_stats_update_end(&rx_ring->syncp); rx_ring 3698 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); rx_ring 3701 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); rx_ring 3708 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring) rx_ring 3712 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u8 reg_idx = rx_ring->reg_idx; rx_ring 3728 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (rx_ring->xsk_umem) { rx_ring 3729 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr - rx_ring 3744 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { rx_ring 4267 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring; rx_ring 4301 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring = adapter->rx_ring[i]; rx_ring 4303 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c clear_ring_rsc_enabled(rx_ring); rx_ring 4304 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); rx_ring 4305 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); rx_ring 4308 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c set_ring_rsc_enabled(rx_ring); rx_ring 4310 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) rx_ring 4311 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); rx_ring 4316 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); rx_ring 4320 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); rx_ring 4324 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); rx_ring 4411 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); rx_ring 4519 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *ring = adapter->rx_ring[i]; rx_ring 4557 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *ring = adapter->rx_ring[i]; rx_ring 5271 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c queue = adapter->rx_ring[ring]->reg_idx; rx_ring 5288 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) rx_ring 5290 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u16 i = rx_ring->next_to_clean; rx_ring 5291 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; rx_ring 5293 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (rx_ring->xsk_umem) { rx_ring 5294 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_xsk_clean_rx_ring(rx_ring); rx_ring 5299 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c while (i != rx_ring->next_to_alloc) { rx_ring 5303 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_unmap_page_attrs(rx_ring->dev, rx_ring 5305 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_pg_size(rx_ring), rx_ring 5314 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 5317 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_bufsz(rx_ring), rx_ring 5321 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, rx_ring 5322 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_rx_pg_size(rx_ring), rx_ring 5330 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (i == rx_ring->count) { rx_ring 5332 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_buffer = rx_ring->rx_buffer_info; rx_ring 5337 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_alloc = 0; rx_ring 5338 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_clean = 0; rx_ring 5339 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_use = 0; rx_ring 5364 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c adapter->rx_ring[baseq + i]->netdev = vdev; rx_ring 5383 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c adapter->rx_ring[baseq + i]->netdev = NULL; rx_ring 5755 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *ring = adapter->rx_ring[i]; rx_ring 5798 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *ring = adapter->rx_ring[i]; rx_ring 6052 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_clean_rx_ring(adapter->rx_ring[i]); rx_ring 6537 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring) rx_ring 6539 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct device *dev = rx_ring->dev; rx_ring 6544 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; rx_ring 6546 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (rx_ring->q_vector) rx_ring 6547 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ring_node = rx_ring->q_vector->numa_node; rx_ring 6549 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); rx_ring 6550 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!rx_ring->rx_buffer_info) rx_ring 6551 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_buffer_info = vmalloc(size); rx_ring 6552 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!rx_ring->rx_buffer_info) rx_ring 6556 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring 6557 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 6560 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->desc = dma_alloc_coherent(dev, rx_ring 6561 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->size, rx_ring 6562 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c &rx_ring->dma, rx_ring 6565 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!rx_ring->desc) rx_ring 6566 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, rx_ring 6567 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c &rx_ring->dma, GFP_KERNEL); rx_ring 6568 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!rx_ring->desc) rx_ring 6571 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_clean = 0; rx_ring 6572 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->next_to_use = 0; rx_ring 6575 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, rx_ring 6576 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->queue_index) < 0) rx_ring 6579 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->xdp_prog = adapter->xdp_prog; rx_ring 6583 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c vfree(rx_ring->rx_buffer_info); rx_ring 6584 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_buffer_info = NULL; rx_ring 6604 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); rx_ring 6620 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_free_rx_resources(adapter->rx_ring[i]); rx_ring 6671 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) rx_ring 6673 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_clean_rx_ring(rx_ring); rx_ring 6675 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->xdp_prog = NULL; rx_ring 6676 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring 6677 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c vfree(rx_ring->rx_buffer_info); rx_ring 6678 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->rx_buffer_info = NULL; rx_ring 6681 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!rx_ring->desc) rx_ring 6684 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring 6685 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->desc, rx_ring->dma); rx_ring 6687 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring->desc = NULL; rx_ring 6705 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (adapter->rx_ring[i]->desc) rx_ring 6706 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_free_rx_resources(adapter->rx_ring[i]); rx_ring 6726 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *ring = adapter->rx_ring[i]; rx_ring 7058 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; rx_ring 7059 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; rx_ring 7066 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; rx_ring 7067 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c non_eop_descs += rx_ring->rx_stats.non_eop_descs; rx_ring 7068 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; rx_ring 7069 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; rx_ring 7070 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; rx_ring 7071 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c hw_csum_rx_error += rx_ring->rx_stats.csum_err; rx_ring 7072 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c bytes += rx_ring->stats.bytes; rx_ring 7073 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c packets += rx_ring->stats.packets; rx_ring 8941 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); rx_ring 9268 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; rx_ring 10177 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; rx_ring 10251 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *ring = adapter->rx_ring[i]; rx_ring 10281 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c (void)xchg(&adapter->rx_ring[i]->xdp_prog, rx_ring 10454 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring) rx_ring 10458 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u8 reg_idx = rx_ring->reg_idx; rx_ring 10498 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) rx_ring 10500 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); rx_ring 10501 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); rx_ring 10514 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; rx_ring 10516 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring = adapter->rx_ring[ring]; rx_ring 10523 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_disable_rxr_hw(adapter, rx_ring); rx_ring 10529 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c napi_disable(&rx_ring->q_vector->napi); rx_ring 10534 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_clean_rx_ring(rx_ring); rx_ring 10539 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_reset_rxr_stats(rx_ring); rx_ring 10552 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; rx_ring 10554 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring = adapter->rx_ring[ring]; rx_ring 10559 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c napi_enable(&rx_ring->q_vector->napi); rx_ring 10564 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_configure_rx_ring(adapter, rx_ring); rx_ring 11051 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u64_stats_init(&adapter->rx_ring[i]->syncp); rx_ring 727 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c struct ixgbe_ring *rx_ring; rx_ring 742 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c rx_ring = adapter->rx_ring[n]; rx_ring 743 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c if (time_after(rx_ring->last_rx_timestamp, rx_event)) rx_ring 744 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c rx_event = rx_ring->last_rx_timestamp; rx_ring 17 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, rx_ring 20 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, rx_ring 38 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count); rx_ring 40 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h struct ixgbe_ring *rx_ring, rx_ring 42 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); rx_ring 81 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); rx_ring 143 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct ixgbe_ring *rx_ring, rx_ring 146 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct xdp_umem *umem = rx_ring->xsk_umem; rx_ring 154 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp_prog = READ_ONCE(rx_ring->xdp_prog); rx_ring 172 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); rx_ring 179 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c trace_xdp_exception(rx_ring->netdev, xdp_prog, act); rx_ring 190 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, rx_ring 195 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_ring 198 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 206 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, rx_ring 209 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c u16 nta = rx_ring->next_to_alloc; rx_ring 212 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; rx_ring 215 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 229 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct ixgbe_ring *rx_ring; rx_ring 233 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring = container_of(alloc, struct ixgbe_ring, zca); rx_ring 234 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; rx_ring 235 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c mask = rx_ring->xsk_umem->chunk_mask; rx_ring 237 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c nta = rx_ring->next_to_alloc; rx_ring 238 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi = rx_ring->rx_buffer_info; rx_ring 241 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 245 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); rx_ring 248 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); rx_ring 251 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, rx_ring 252 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->xsk_umem->headroom); rx_ring 255 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, rx_ring 258 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct xdp_umem *umem = rx_ring->xsk_umem; rx_ring 266 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring 284 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, rx_ring 287 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct xdp_umem *umem = rx_ring->xsk_umem; rx_ring 291 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring 295 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c handle &= rx_ring->xsk_umem->chunk_mask; rx_ring 312 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, rx_ring 313 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bool alloc(struct ixgbe_ring *rx_ring, rx_ring 318 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c u16 i = rx_ring->next_to_use; rx_ring 325 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_desc = IXGBE_RX_DESC(rx_ring, i); rx_ring 326 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi = &rx_ring->rx_buffer_info[i]; rx_ring 327 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c i -= rx_ring->count; rx_ring 330 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c if (!alloc(rx_ring, bi)) { rx_ring 336 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, rx_ring 338 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->rx_buf_len, rx_ring 350 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_desc = IXGBE_RX_DESC(rx_ring, 0); rx_ring 351 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi = rx_ring->rx_buffer_info; rx_ring 352 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c i -= rx_ring->count; rx_ring 361 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c i += rx_ring->count; rx_ring 363 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c if (rx_ring->next_to_use != i) { rx_ring 364 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->next_to_use = i; rx_ring 367 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->next_to_alloc = i; rx_ring 375 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c writel(i, rx_ring->tail); rx_ring 381 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) rx_ring 383 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c __ixgbe_alloc_rx_buffers_zc(rx_ring, count, rx_ring 387 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring, rx_ring 390 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c return __ixgbe_alloc_rx_buffers_zc(rx_ring, count, rx_ring 394 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, rx_ring 403 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c skb = __napi_alloc_skb(&rx_ring->q_vector->napi, rx_ring 414 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_reuse_rx_buffer_zc(rx_ring, bi); rx_ring 418 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring) rx_ring 420 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 422 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 423 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->next_to_clean = ntc; rx_ring 424 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c prefetch(IXGBE_RX_DESC(rx_ring, ntc)); rx_ring 428 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct ixgbe_ring *rx_ring, rx_ring 433 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c u16 cleaned_count = ixgbe_desc_unused(rx_ring); rx_ring 439 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp.rxq = &rx_ring->xdp_rxq; rx_ring 449 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c !ixgbe_alloc_rx_buffers_fast_zc(rx_ring, rx_ring 454 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 465 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi = ixgbe_get_rx_buffer_zc(rx_ring, size); rx_ring 471 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_reuse_rx_buffer_zc(rx_ring, bi); rx_ring 472 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_inc_ntc(rx_ring); rx_ring 474 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_ring 480 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_reuse_rx_buffer_zc(rx_ring, bi); rx_ring 481 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_inc_ntc(rx_ring); rx_ring 491 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); rx_ring 499 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_reuse_rx_buffer_zc(rx_ring, bi); rx_ring 505 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_inc_ntc(rx_ring); rx_ring 510 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); rx_ring 512 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring 517 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_inc_ntc(rx_ring); rx_ring 525 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ixgbe_process_skb_fields(rx_ring, rx_desc, skb); rx_ring 542 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 543 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->stats.packets += total_rx_packets; rx_ring 544 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_ring->stats.bytes += total_rx_bytes; rx_ring 545 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c u64_stats_update_end(&rx_ring->syncp); rx_ring 549 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { rx_ring 550 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) rx_ring 551 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xsk_set_rx_need_wakeup(rx_ring->xsk_umem); rx_ring 553 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); rx_ring 560 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) rx_ring 562 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c u16 i = rx_ring->next_to_clean; rx_ring 563 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; rx_ring 565 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c while (i != rx_ring->next_to_alloc) { rx_ring 566 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle); rx_ring 569 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c if (i == rx_ring->count) { rx_ring 571 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c bi = rx_ring->rx_buffer_info; rx_ring 244 drivers/net/ethernet/intel/ixgbevf/ethtool.c struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; rx_ring 273 drivers/net/ethernet/intel/ixgbevf/ethtool.c adapter->rx_ring[i]->count = new_rx_count; rx_ring 327 drivers/net/ethernet/intel/ixgbevf/ethtool.c rx_ring = vmalloc(array_size(sizeof(*rx_ring), rx_ring 329 drivers/net/ethernet/intel/ixgbevf/ethtool.c if (!rx_ring) { rx_ring 336 drivers/net/ethernet/intel/ixgbevf/ethtool.c rx_ring[i] = *adapter->rx_ring[i]; rx_ring 339 drivers/net/ethernet/intel/ixgbevf/ethtool.c memset(&rx_ring[i].xdp_rxq, 0, rx_ring 340 drivers/net/ethernet/intel/ixgbevf/ethtool.c sizeof(rx_ring[i].xdp_rxq)); rx_ring 342 drivers/net/ethernet/intel/ixgbevf/ethtool.c rx_ring[i].count = new_rx_count; rx_ring 343 drivers/net/ethernet/intel/ixgbevf/ethtool.c err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); rx_ring 347 drivers/net/ethernet/intel/ixgbevf/ethtool.c ixgbevf_free_rx_resources(&rx_ring[i]); rx_ring 350 drivers/net/ethernet/intel/ixgbevf/ethtool.c vfree(rx_ring); rx_ring 351 drivers/net/ethernet/intel/ixgbevf/ethtool.c rx_ring = NULL; rx_ring 380 drivers/net/ethernet/intel/ixgbevf/ethtool.c if (rx_ring) { rx_ring 382 drivers/net/ethernet/intel/ixgbevf/ethtool.c ixgbevf_free_rx_resources(adapter->rx_ring[i]); rx_ring 383 drivers/net/ethernet/intel/ixgbevf/ethtool.c *adapter->rx_ring[i] = rx_ring[i]; rx_ring 387 drivers/net/ethernet/intel/ixgbevf/ethtool.c vfree(rx_ring); rx_ring 388 drivers/net/ethernet/intel/ixgbevf/ethtool.c rx_ring = NULL; rx_ring 489 drivers/net/ethernet/intel/ixgbevf/ethtool.c ring = adapter->rx_ring[j]; rx_ring 538 drivers/net/ethernet/intel/ixgbevf/ipsec.c void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, rx_ring 542 drivers/net/ethernet/intel/ixgbevf/ipsec.c struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev); rx_ring 347 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ rx_ring 453 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h struct ixgbevf_ring *rx_ring); rx_ring 466 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, rx_ring 478 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, rx_ring 115 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, rx_ring 508 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, rx_ring 512 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_hash(rx_ring, rx_desc, skb); rx_ring 513 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_checksum(rx_ring, rx_desc, skb); rx_ring 517 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned long *active_vlans = netdev_priv(rx_ring->netdev); rx_ring 524 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); rx_ring 526 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb->protocol = eth_type_trans(skb, rx_ring->netdev); rx_ring 530 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, rx_ring 535 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_ring 539 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 550 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, rx_ring 556 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_reuse_rx_page(rx_ring, rx_buffer); rx_ring 562 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, rx_ring 563 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_pg_size(rx_ring), rx_ring 584 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, rx_ring 587 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u32 ntc = rx_ring->next_to_clean + 1; rx_ring 590 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring 591 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->next_to_clean = ntc; rx_ring 593 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); rx_ring 601 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring) rx_ring 603 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0; rx_ring 606 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, rx_ring 617 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring)); rx_ring 619 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring 624 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dma = dma_map_page_attrs(rx_ring->dev, page, 0, rx_ring 625 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_pg_size(rx_ring), rx_ring 631 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (dma_mapping_error(rx_ring->dev, dma)) { rx_ring 632 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c __free_pages(page, ixgbevf_rx_pg_order(rx_ring)); rx_ring 634 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring 640 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c bi->page_offset = ixgbevf_rx_offset(rx_ring); rx_ring 642 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->rx_stats.alloc_rx_page++; rx_ring 652 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, rx_ring 657 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int i = rx_ring->next_to_use; rx_ring 660 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (!cleaned_count || !rx_ring->netdev) rx_ring 663 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_desc = IXGBEVF_RX_DESC(rx_ring, i); rx_ring 664 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c bi = &rx_ring->rx_buffer_info[i]; rx_ring 665 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c i -= rx_ring->count; rx_ring 668 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) rx_ring 672 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dma_sync_single_range_for_device(rx_ring->dev, bi->dma, rx_ring 674 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_bufsz(rx_ring), rx_ring 686 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); rx_ring 687 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c bi = rx_ring->rx_buffer_info; rx_ring 688 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c i -= rx_ring->count; rx_ring 697 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c i += rx_ring->count; rx_ring 699 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (rx_ring->next_to_use != i) { rx_ring 701 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->next_to_use = i; rx_ring 704 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->next_to_alloc = i; rx_ring 712 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_write_tail(rx_ring, i); rx_ring 734 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, rx_ring 745 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct net_device *netdev = rx_ring->netdev; rx_ring 767 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, rx_ring 771 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u16 nta = rx_ring->next_to_alloc; rx_ring 773 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c new_buff = &rx_ring->rx_buffer_info[nta]; rx_ring 777 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring 834 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, rx_ring 840 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; rx_ring 842 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int truesize = ring_uses_build_skb(rx_ring) ? rx_ring 856 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, rx_ring 863 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; rx_ring 892 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); rx_ring 933 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, rx_ring 940 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; rx_ring 1061 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_ring *rx_ring, rx_ring 1070 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp_prog = READ_ONCE(rx_ring->xdp_prog); rx_ring 1080 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; rx_ring 1087 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c trace_xdp_exception(rx_ring->netdev, xdp_prog, act); rx_ring 1098 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, rx_ring 1103 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; rx_ring 1107 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int truesize = ring_uses_build_skb(rx_ring) ? rx_ring 1116 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_ring *rx_ring, rx_ring 1121 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u16 cleaned_count = ixgbevf_desc_unused(rx_ring); rx_ring 1122 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct sk_buff *skb = rx_ring->skb; rx_ring 1126 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp.rxq = &rx_ring->xdp_rxq; rx_ring 1135 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); rx_ring 1139 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_ring 1150 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size); rx_ring 1158 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_offset(rx_ring); rx_ring 1161 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); rx_ring 1167 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, rx_ring 1175 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); rx_ring 1176 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c } else if (ring_uses_build_skb(rx_ring)) { rx_ring 1177 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb = ixgbevf_build_skb(rx_ring, rx_buffer, rx_ring 1180 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb = ixgbevf_construct_skb(rx_ring, rx_buffer, rx_ring 1186 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring 1191 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb); rx_ring 1195 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (ixgbevf_is_non_eop(rx_ring, rx_desc)) rx_ring 1199 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { rx_ring 1212 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ether_addr_equal(rx_ring->netdev->dev_addr, rx_ring 1219 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); rx_ring 1231 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->skb = skb; rx_ring 1235 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c adapter->xdp_ring[rx_ring->queue_index]; rx_ring 1244 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u64_stats_update_begin(&rx_ring->syncp); rx_ring 1245 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->stats.packets += total_rx_packets; rx_ring 1246 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->stats.bytes += total_rx_bytes; rx_ring 1247 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u64_stats_update_end(&rx_ring->syncp); rx_ring 1967 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_ring *rx_ring) rx_ring 1973 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c clear_ring_build_skb_enabled(rx_ring); rx_ring 1974 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c clear_ring_uses_large_buffer(rx_ring); rx_ring 1979 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c set_ring_build_skb_enabled(rx_ring); rx_ring 1985 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c set_ring_uses_large_buffer(rx_ring); rx_ring 2017 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; rx_ring 2019 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_set_rx_buffer_len(adapter, rx_ring); rx_ring 2020 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_configure_rx_ring(adapter, rx_ring); rx_ring 2327 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) rx_ring 2329 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u16 i = rx_ring->next_to_clean; rx_ring 2332 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (rx_ring->skb) { rx_ring 2333 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dev_kfree_skb(rx_ring->skb); rx_ring 2334 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->skb = NULL; rx_ring 2338 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c while (i != rx_ring->next_to_alloc) { rx_ring 2341 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_buffer = &rx_ring->rx_buffer_info[i]; rx_ring 2346 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dma_sync_single_range_for_cpu(rx_ring->dev, rx_ring 2349 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_bufsz(rx_ring), rx_ring 2353 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dma_unmap_page_attrs(rx_ring->dev, rx_ring 2355 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_rx_pg_size(rx_ring), rx_ring 2363 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (i == rx_ring->count) rx_ring 2367 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->next_to_alloc = 0; rx_ring 2368 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->next_to_clean = 0; rx_ring 2369 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->next_to_use = 0; rx_ring 2443 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_clean_rx_ring(adapter->rx_ring[i]); rx_ring 2472 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); rx_ring 2797 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c adapter->rx_ring[rxr_idx] = ring; rx_ring 2832 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c adapter->rx_ring[ring->queue_index] = NULL; rx_ring 3126 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; rx_ring 3128 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c hw_csum_rx_error += rx_ring->rx_stats.csum_err; rx_ring 3129 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; rx_ring 3130 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; rx_ring 3131 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; rx_ring 3462 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_ring *rx_ring) rx_ring 3466 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; rx_ring 3467 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->rx_buffer_info = vmalloc(size); rx_ring 3468 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (!rx_ring->rx_buffer_info) rx_ring 3471 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u64_stats_init(&rx_ring->syncp); rx_ring 3474 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring 3475 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring 3477 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, rx_ring 3478 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c &rx_ring->dma, GFP_KERNEL); rx_ring 3480 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (!rx_ring->desc) rx_ring 3484 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, rx_ring 3485 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->queue_index) < 0) rx_ring 3488 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->xdp_prog = adapter->xdp_prog; rx_ring 3492 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c vfree(rx_ring->rx_buffer_info); rx_ring 3493 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->rx_buffer_info = NULL; rx_ring 3494 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); rx_ring 3513 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); rx_ring 3524 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_free_rx_resources(adapter->rx_ring[i]); rx_ring 3534 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) rx_ring 3536 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_clean_rx_ring(rx_ring); rx_ring 3538 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->xdp_prog = NULL; rx_ring 3539 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring 3540 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c vfree(rx_ring->rx_buffer_info); rx_ring 3541 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->rx_buffer_info = NULL; rx_ring 3543 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring 3544 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->dma); rx_ring 3546 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_ring->desc = NULL; rx_ring 3560 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (adapter->rx_ring[i]->desc) rx_ring 3561 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_free_rx_resources(adapter->rx_ring[i]); rx_ring 4389 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ring = adapter->rx_ring[i]; rx_ring 4453 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_ring *ring = adapter->rx_ring[i]; rx_ring 4477 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); rx_ring 502 drivers/net/ethernet/marvell/skge.c p->rx_pending = skge->rx_ring.count; rx_ring 516 drivers/net/ethernet/marvell/skge.c skge->rx_ring.count = p->rx_pending; rx_ring 984 drivers/net/ethernet/marvell/skge.c struct skge_ring *ring = &skge->rx_ring; rx_ring 1009 drivers/net/ethernet/marvell/skge.c struct skge_ring *ring = &skge->rx_ring; rx_ring 2546 drivers/net/ethernet/marvell/skge.c rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); rx_ring 2561 drivers/net/ethernet/marvell/skge.c err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); rx_ring 2598 drivers/net/ethernet/marvell/skge.c skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); rx_ring 2625 drivers/net/ethernet/marvell/skge.c kfree(skge->rx_ring.start); rx_ring 2715 drivers/net/ethernet/marvell/skge.c kfree(skge->rx_ring.start); rx_ring 3055 drivers/net/ethernet/marvell/skge.c e - skge->rx_ring.start, status, len); rx_ring 3122 drivers/net/ethernet/marvell/skge.c e - skge->rx_ring.start, control, status); rx_ring 3197 drivers/net/ethernet/marvell/skge.c struct skge_ring *ring = &skge->rx_ring; rx_ring 3710 drivers/net/ethernet/marvell/skge.c for (e = skge->rx_ring.to_clean; ; e = e->next) { rx_ring 3835 drivers/net/ethernet/marvell/skge.c skge->rx_ring.count = DEFAULT_RX_RING_SIZE; rx_ring 2459 drivers/net/ethernet/marvell/skge.h struct skge_ring rx_ring ____cacheline_aligned_in_smp; rx_ring 1353 drivers/net/ethernet/marvell/sky2.c struct rx_ring_info *re = sky2->rx_ring + i; rx_ring 1492 drivers/net/ethernet/marvell/sky2.c struct rx_ring_info *re = sky2->rx_ring + i; rx_ring 1546 drivers/net/ethernet/marvell/sky2.c re = sky2->rx_ring + i; rx_ring 1612 drivers/net/ethernet/marvell/sky2.c sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), rx_ring 1614 drivers/net/ethernet/marvell/sky2.c if (!sky2->rx_ring) rx_ring 1640 drivers/net/ethernet/marvell/sky2.c kfree(sky2->rx_ring); rx_ring 1643 drivers/net/ethernet/marvell/sky2.c sky2->rx_ring = NULL; rx_ring 2559 drivers/net/ethernet/marvell/sky2.c struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; rx_ring 2568 drivers/net/ethernet/marvell/sky2.c prefetch(sky2->rx_ring + sky2->rx_next); rx_ring 2667 drivers/net/ethernet/marvell/sky2.c struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb; rx_ring 2689 drivers/net/ethernet/marvell/sky2.c skb = sky2->rx_ring[sky2->rx_next].skb; rx_ring 2697 drivers/net/ethernet/marvell/sky2.c skb = sky2->rx_ring[sky2->rx_next].skb; rx_ring 2241 drivers/net/ethernet/marvell/sky2.h struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp; rx_ring 1192 drivers/net/ethernet/mediatek/mtk_eth_soc.c return ð->rx_ring[0]; rx_ring 1195 drivers/net/ethernet/mediatek/mtk_eth_soc.c ring = ð->rx_ring[i]; rx_ring 1212 drivers/net/ethernet/mediatek/mtk_eth_soc.c ring = ð->rx_ring[0]; rx_ring 1216 drivers/net/ethernet/mediatek/mtk_eth_soc.c ring = ð->rx_ring[i]; rx_ring 1650 drivers/net/ethernet/mediatek/mtk_eth_soc.c ring = ð->rx_ring[ring_no]; rx_ring 2096 drivers/net/ethernet/mediatek/mtk_eth_soc.c mtk_rx_clean(eth, ð->rx_ring[0]); rx_ring 2102 drivers/net/ethernet/mediatek/mtk_eth_soc.c mtk_rx_clean(eth, ð->rx_ring[i]); rx_ring 877 drivers/net/ethernet/mediatek/mtk_eth_soc.h struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; rx_ring 107 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); rx_ring 134 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->size = priv->rx_ring[cq->ring]->actual_size; rx_ring 427 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c data[index++] = priv->rx_ring[i]->packets; rx_ring 428 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c data[index++] = priv->rx_ring[i]->bytes; rx_ring 429 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c data[index++] = priv->rx_ring[i]->dropped; rx_ring 430 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c data[index++] = priv->rx_ring[i]->xdp_drop; rx_ring 431 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c data[index++] = priv->rx_ring[i]->xdp_tx; rx_ring 432 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c data[index++] = priv->rx_ring[i]->xdp_tx_full; rx_ring 1148 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : rx_ring 1149 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c priv->rx_ring[0]->size) && rx_ring 1194 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size; rx_ring 1470 drivers/net/ethernet/mellanox/mlx4/en_netdev.c rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); rx_ring 1471 drivers/net/ethernet/mellanox/mlx4/en_netdev.c rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); rx_ring 1588 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; rx_ring 1601 drivers/net/ethernet/mellanox/mlx4/en_netdev.c free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); rx_ring 1611 drivers/net/ethernet/mellanox/mlx4/en_netdev.c tx_ring->recycle_ring = priv->rx_ring[rr_index]; rx_ring 1682 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->cqn = cq->mcq.cqn; rx_ring 1867 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); rx_ring 1997 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); rx_ring 2058 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->bytes = 0; rx_ring 2059 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->packets = 0; rx_ring 2060 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->csum_ok = 0; rx_ring 2061 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->csum_none = 0; rx_ring 2062 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->csum_complete = 0; rx_ring 2130 drivers/net/ethernet/mellanox/mlx4/en_netdev.c if (priv->rx_ring[i]) rx_ring 2131 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], rx_ring 2167 drivers/net/ethernet/mellanox/mlx4/en_netdev.c if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], rx_ring 2183 drivers/net/ethernet/mellanox/mlx4/en_netdev.c if (priv->rx_ring[i]) rx_ring 2184 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], rx_ring 2255 drivers/net/ethernet/mellanox/mlx4/en_netdev.c memcpy(dst->rx_ring, src->rx_ring, rx_ring 2294 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[0]->xdp_prog, rx_ring 2304 drivers/net/ethernet/mellanox/mlx4/en_netdev.c rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog, rx_ring 2802 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->xdp_prog, rx_ring 2804 drivers/net/ethernet/mellanox/mlx4/en_netdev.c rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); rx_ring 2856 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->xdp_prog, rx_ring 2858 drivers/net/ethernet/mellanox/mlx4/en_netdev.c rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); rx_ring 2891 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[0]->xdp_prog, rx_ring 163 drivers/net/ethernet/mellanox/mlx4/en_port.c const struct mlx4_en_rx_ring *ring = priv->rx_ring[i]; rx_ring 250 drivers/net/ethernet/mellanox/mlx4/en_port.c const struct mlx4_en_rx_ring *ring = priv->rx_ring[i]; rx_ring 192 drivers/net/ethernet/mellanox/mlx4/en_rx.c ring = priv->rx_ring[ring_ind]; rx_ring 215 drivers/net/ethernet/mellanox/mlx4/en_rx.c ring = priv->rx_ring[ring_ind]; rx_ring 336 drivers/net/ethernet/mellanox/mlx4/en_rx.c ring = priv->rx_ring[ring_ind]; rx_ring 368 drivers/net/ethernet/mellanox/mlx4/en_rx.c ring = priv->rx_ring[ring_ind]; rx_ring 378 drivers/net/ethernet/mellanox/mlx4/en_rx.c mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); rx_ring 382 drivers/net/ethernet/mellanox/mlx4/en_rx.c if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) rx_ring 383 drivers/net/ethernet/mellanox/mlx4/en_rx.c priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; rx_ring 401 drivers/net/ethernet/mellanox/mlx4/en_rx.c if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { rx_ring 680 drivers/net/ethernet/mellanox/mlx4/en_rx.c ring = priv->rx_ring[cq_ring]; rx_ring 1164 drivers/net/ethernet/mellanox/mlx4/en_rx.c err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], rx_ring 1195 drivers/net/ethernet/mellanox/mlx4/en_rx.c priv->rx_ring[0]->cqn, -1, &context); rx_ring 1121 drivers/net/ethernet/mellanox/mlx4/en_tx.c netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, rx_ring 1172 drivers/net/ethernet/mellanox/mlx4/en_tx.c rx_ring->xdp_tx++; rx_ring 1189 drivers/net/ethernet/mellanox/mlx4/en_tx.c rx_ring->xdp_tx_full++; rx_ring 593 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; rx_ring 703 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, rx_ring 541 drivers/net/ethernet/natsemi/natsemi.c struct netdev_desc *rx_ring; rx_ring 1874 drivers/net/ethernet/natsemi/natsemi.c printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); rx_ring 1877 drivers/net/ethernet/natsemi/natsemi.c i, np->rx_ring[i].next_desc, rx_ring 1878 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[i].cmd_status, rx_ring 1879 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[i].addr); rx_ring 1919 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring = pci_alloc_consistent(np->pci_dev, rx_ring 1922 drivers/net/ethernet/natsemi/natsemi.c if (!np->rx_ring) rx_ring 1924 drivers/net/ethernet/natsemi/natsemi.c np->tx_ring = &np->rx_ring[RX_RING_SIZE]; rx_ring 1950 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); rx_ring 1952 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); rx_ring 1992 drivers/net/ethernet/natsemi/natsemi.c np->rx_head_desc = &np->rx_ring[0]; rx_ring 1999 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma rx_ring 2002 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); rx_ring 2034 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[i].cmd_status = 0; rx_ring 2035 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ rx_ring 2057 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring, np->ring_dma); rx_ring 2068 drivers/net/ethernet/natsemi/natsemi.c np->rx_head_desc = &np->rx_ring[0]; rx_ring 2071 drivers/net/ethernet/natsemi/natsemi.c np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); rx_ring 2386 drivers/net/ethernet/natsemi/natsemi.c np->rx_head_desc = &np->rx_ring[entry]; rx_ring 409 drivers/net/ethernet/netronome/nfp/nfp_net.h struct nfp_net_rx_ring *rx_ring; rx_ring 594 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, rx_ring 599 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->idx = idx; rx_ring 600 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->r_vec = r_vec; rx_ring 601 drivers/net/ethernet/netronome/nfp/nfp_net_common.c u64_stats_init(&rx_ring->r_vec->rx_sync); rx_ring 603 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; rx_ring 604 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); rx_ring 1437 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_rx_ring *rx_ring, rx_ring 1442 drivers/net/ethernet/netronome/nfp/nfp_net_common.c wr_idx = D_IDX(rx_ring, rx_ring->wr_p); rx_ring 1447 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[wr_idx].frag = frag; rx_ring 1448 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; rx_ring 1451 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxds[wr_idx].fld.reserved = 0; rx_ring 1452 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; rx_ring 1453 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, rx_ring 1456 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->wr_p++; rx_ring 1457 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) { rx_ring 1462 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH); rx_ring 1472 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) rx_ring 1479 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0) rx_ring 1483 drivers/net/ethernet/netronome/nfp/nfp_net_common.c wr_idx = D_IDX(rx_ring, rx_ring->wr_p); rx_ring 1484 drivers/net/ethernet/netronome/nfp/nfp_net_common.c last_idx = rx_ring->cnt - 1; rx_ring 1485 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr; rx_ring 1486 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag; rx_ring 1487 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[last_idx].dma_addr = 0; rx_ring 1488 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[last_idx].frag = NULL; rx_ring 1490 drivers/net/ethernet/netronome/nfp/nfp_net_common.c memset(rx_ring->rxds, 0, rx_ring->size); rx_ring 1491 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->wr_p = 0; rx_ring 1492 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rd_p = 0; rx_ring 1506 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_rx_ring *rx_ring) rx_ring 1510 drivers/net/ethernet/netronome/nfp/nfp_net_common.c for (i = 0; i < rx_ring->cnt - 1; i++) { rx_ring 1515 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (!rx_ring->rxbufs[i].frag) rx_ring 1518 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); rx_ring 1519 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); rx_ring 1520 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[i].dma_addr = 0; rx_ring 1521 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[i].frag = NULL; rx_ring 1532 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_rx_ring *rx_ring) rx_ring 1537 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rxbufs = rx_ring->rxbufs; rx_ring 1539 drivers/net/ethernet/netronome/nfp/nfp_net_common.c for (i = 0; i < rx_ring->cnt - 1; i++) { rx_ring 1542 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_ring_bufs_free(dp, rx_ring); rx_ring 1557 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_rx_ring *rx_ring) rx_ring 1561 drivers/net/ethernet/netronome/nfp/nfp_net_common.c for (i = 0; i < rx_ring->cnt - 1; i++) rx_ring 1562 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, rx_ring 1563 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs[i].dma_addr); rx_ring 1713 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf, rx_ring 1731 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); rx_ring 1737 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, rx_ring 1753 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, rx_ring 1764 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr); rx_ring 1802 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) rx_ring 1804 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_r_vector *r_vec = rx_ring->r_vec; rx_ring 1818 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp.rxq = &rx_ring->xdp_rxq; rx_ring 1832 drivers/net/ethernet/netronome/nfp/nfp_net_common.c idx = D_IDX(rx_ring, rx_ring->rd_p); rx_ring 1834 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rxd = &rx_ring->rxds[idx]; rx_ring 1845 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rd_p++; rx_ring 1848 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rxbuf = &rx_ring->rxbufs[idx]; rx_ring 1882 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); rx_ring 1900 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, rx_ring 1927 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring, rx_ring 1942 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rx_ring 1955 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rx_ring 1965 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, rx_ring 1976 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); rx_ring 1981 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); rx_ring 1987 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); rx_ring 1995 drivers/net/ethernet/netronome/nfp/nfp_net_common.c skb_record_rx_queue(skb, rx_ring->idx); rx_ring 2016 drivers/net/ethernet/netronome/nfp/nfp_net_common.c napi_gro_receive(&rx_ring->r_vec->napi, skb); rx_ring 2053 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (r_vec->rx_ring) rx_ring 2054 drivers/net/ethernet/netronome/nfp/nfp_net_common.c pkts_polled = nfp_net_rx(r_vec->rx_ring, budget); rx_ring 2198 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring) rx_ring 2208 drivers/net/ethernet/netronome/nfp/nfp_net_common.c idx = D_IDX(rx_ring, rx_ring->rd_p); rx_ring 2210 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rxd = &rx_ring->rxds[idx]; rx_ring 2219 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rd_p++; rx_ring 2221 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rxbuf = &rx_ring->rxbufs[idx]; rx_ring 2244 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); rx_ring 2250 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); rx_ring 2255 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); rx_ring 2261 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); rx_ring 2273 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; rx_ring 2278 drivers/net/ethernet/netronome/nfp/nfp_net_common.c while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--) rx_ring 2497 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) rx_ring 2499 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_r_vector *r_vec = rx_ring->r_vec; rx_ring 2503 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring 2504 drivers/net/ethernet/netronome/nfp/nfp_net_common.c kvfree(rx_ring->rxbufs); rx_ring 2506 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (rx_ring->rxds) rx_ring 2507 drivers/net/ethernet/netronome/nfp/nfp_net_common.c dma_free_coherent(dp->dev, rx_ring->size, rx_ring 2508 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxds, rx_ring->dma); rx_ring 2510 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->cnt = 0; rx_ring 2511 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs = NULL; rx_ring 2512 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxds = NULL; rx_ring 2513 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->dma = 0; rx_ring 2514 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->size = 0; rx_ring 2525 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) rx_ring 2530 drivers/net/ethernet/netronome/nfp/nfp_net_common.c err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring 2531 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->idx); rx_ring 2536 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->cnt = dp->rxd_cnt; rx_ring 2537 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); rx_ring 2538 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, rx_ring 2539 drivers/net/ethernet/netronome/nfp/nfp_net_common.c &rx_ring->dma, rx_ring 2541 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (!rx_ring->rxds) { rx_ring 2543 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->cnt); rx_ring 2547 drivers/net/ethernet/netronome/nfp/nfp_net_common.c rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), rx_ring 2549 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (!rx_ring->rxbufs) rx_ring 2555 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_rx_ring_free(rx_ring); rx_ring 2606 drivers/net/ethernet/netronome/nfp/nfp_net_common.c r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; rx_ring 2784 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct nfp_net_rx_ring *rx_ring, unsigned int idx) rx_ring 2787 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma); rx_ring 2788 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt)); rx_ring 2789 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry); rx_ring 14 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c struct nfp_net_rx_ring *rx_ring; rx_ring 23 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c if (!r_vec->nfp_net || !r_vec->rx_ring) rx_ring 26 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c rx_ring = r_vec->rx_ring; rx_ring 30 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c rxd_cnt = rx_ring->cnt; rx_ring 32 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); rx_ring 33 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); rx_ring 36 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c rx_ring->idx, rx_ring->fl_qcidx, rx_ring 37 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, rx_ring 38 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); rx_ring 41 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c rxd = &rx_ring->rxds[i]; rx_ring 45 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c frag = READ_ONCE(rx_ring->rxbufs[i].frag); rx_ring 49 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c if (rx_ring->rxbufs[i].dma_addr) rx_ring 51 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c &rx_ring->rxbufs[i].dma_addr); rx_ring 53 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c if (i == rx_ring->rd_p % rxd_cnt) rx_ring 55 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c if (i == rx_ring->wr_p % rxd_cnt) rx_ring 804 drivers/net/ethernet/nvidia/forcedeth.c union ring_type rx_ring; rx_ring 1023 drivers/net/ethernet/nvidia/forcedeth.c if (np->rx_ring.orig) rx_ring 1028 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.orig, np->ring_addr); rx_ring 1030 drivers/net/ethernet/nvidia/forcedeth.c if (np->rx_ring.ex) rx_ring 1035 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex, np->ring_addr); rx_ring 1833 drivers/net/ethernet/nvidia/forcedeth.c if (less_rx-- == np->rx_ring.orig) rx_ring 1854 drivers/net/ethernet/nvidia/forcedeth.c np->put_rx.orig = np->rx_ring.orig; rx_ring 1874 drivers/net/ethernet/nvidia/forcedeth.c if (less_rx-- == np->rx_ring.ex) rx_ring 1896 drivers/net/ethernet/nvidia/forcedeth.c np->put_rx.ex = np->rx_ring.ex; rx_ring 1924 drivers/net/ethernet/nvidia/forcedeth.c np->get_rx = np->rx_ring; rx_ring 1925 drivers/net/ethernet/nvidia/forcedeth.c np->put_rx = np->rx_ring; rx_ring 1928 drivers/net/ethernet/nvidia/forcedeth.c np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; rx_ring 1930 drivers/net/ethernet/nvidia/forcedeth.c np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; rx_ring 1937 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.orig[i].flaglen = 0; rx_ring 1938 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.orig[i].buf = 0; rx_ring 1940 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex[i].flaglen = 0; rx_ring 1941 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex[i].txvlan = 0; rx_ring 1942 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex[i].bufhigh = 0; rx_ring 1943 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex[i].buflow = 0; rx_ring 2067 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.orig[i].flaglen = 0; rx_ring 2068 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.orig[i].buf = 0; rx_ring 2070 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex[i].flaglen = 0; rx_ring 2071 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex[i].txvlan = 0; rx_ring 2072 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex[i].bufhigh = 0; rx_ring 2073 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex[i].buflow = 0; rx_ring 2948 drivers/net/ethernet/nvidia/forcedeth.c np->get_rx.orig = np->rx_ring.orig; rx_ring 3037 drivers/net/ethernet/nvidia/forcedeth.c np->get_rx.ex = np->rx_ring.ex; rx_ring 4712 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.orig = (struct ring_desc *)rxtx_ring; rx_ring 4713 drivers/net/ethernet/nvidia/forcedeth.c np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; rx_ring 4715 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; rx_ring 4716 drivers/net/ethernet/nvidia/forcedeth.c np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; rx_ring 5171 drivers/net/ethernet/nvidia/forcedeth.c flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); rx_ring 5172 drivers/net/ethernet/nvidia/forcedeth.c len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); rx_ring 5175 drivers/net/ethernet/nvidia/forcedeth.c flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); rx_ring 5176 drivers/net/ethernet/nvidia/forcedeth.c len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); rx_ring 5813 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev, rx_ring 5819 drivers/net/ethernet/nvidia/forcedeth.c if (!np->rx_ring.orig) rx_ring 5821 drivers/net/ethernet/nvidia/forcedeth.c np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; rx_ring 5823 drivers/net/ethernet/nvidia/forcedeth.c np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev, rx_ring 5828 drivers/net/ethernet/nvidia/forcedeth.c if (!np->rx_ring.ex) rx_ring 5830 drivers/net/ethernet/nvidia/forcedeth.c np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; rx_ring 587 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h struct pch_gbe_rx_ring *rx_ring; rx_ring 613 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h struct pch_gbe_rx_ring *rx_ring); rx_ring 277 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c struct pch_gbe_rx_ring *rxdr = adapter->rx_ring; rx_ring 310 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c rx_old = adapter->rx_ring; rx_ring 323 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c adapter->rx_ring = rxdr; rx_ring 335 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring); rx_ring 345 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c adapter->rx_ring = rxdr; rx_ring 352 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c pch_gbe_free_rx_resources(adapter, adapter->rx_ring); rx_ring 354 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c adapter->rx_ring = rx_old; rx_ring 579 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev, rx_ring 580 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c sizeof(*adapter->rx_ring), GFP_KERNEL); rx_ring 581 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c if (!adapter->rx_ring) rx_ring 855 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c (unsigned long long)adapter->rx_ring->dma, rx_ring 856 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c adapter->rx_ring->size); rx_ring 874 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rdba = adapter->rx_ring->dma; rx_ring 875 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rdlen = adapter->rx_ring->size - 0x10; rx_ring 959 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c struct pch_gbe_rx_ring *rx_ring) rx_ring 967 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c for (i = 0; i < rx_ring->count; i++) { rx_ring 968 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 973 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count; rx_ring 974 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c memset(rx_ring->buffer_info, 0, size); rx_ring 977 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c memset(rx_ring->desc, 0, rx_ring->size); rx_ring 978 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->next_to_clean = 0; rx_ring 979 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->next_to_use = 0; rx_ring 980 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P); rx_ring 981 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE); rx_ring 1354 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c struct pch_gbe_rx_ring *rx_ring, int cleaned_count) rx_ring 1366 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c i = rx_ring->next_to_use; rx_ring 1369 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1392 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_desc = PCH_GBE_RX_DESC(*rx_ring, i); rx_ring 1401 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c if (unlikely(++i == rx_ring->count)) rx_ring 1404 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c if (likely(rx_ring->next_to_use != i)) { rx_ring 1405 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->next_to_use = i; rx_ring 1407 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c i = (rx_ring->count - 1); rx_ring 1408 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c iowrite32(rx_ring->dma + rx_ring 1417 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c struct pch_gbe_rx_ring *rx_ring, int cleaned_count) rx_ring 1427 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; rx_ring 1428 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->rx_buff_pool = rx_ring 1430 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c &rx_ring->rx_buff_pool_logic, GFP_KERNEL); rx_ring 1431 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c if (!rx_ring->rx_buff_pool) rx_ring 1434 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->rx_buff_pool_size = size; rx_ring 1435 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c for (i = 0; i < rx_ring->count; i++) { rx_ring 1436 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1437 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; rx_ring 1617 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c struct pch_gbe_rx_ring *rx_ring, rx_ring 1633 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c i = rx_ring->next_to_clean; rx_ring 1637 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_desc = PCH_GBE_RX_DESC(*rx_ring, i); rx_ring 1647 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c buffer_info = &rx_ring->buffer_info[i]; rx_ring 1708 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring 1712 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c if (++i == rx_ring->count) rx_ring 1715 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->next_to_clean = i; rx_ring 1717 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); rx_ring 1774 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c struct pch_gbe_rx_ring *rx_ring) rx_ring 1781 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count; rx_ring 1782 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->buffer_info = vzalloc(size); rx_ring 1783 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c if (!rx_ring->buffer_info) rx_ring 1786 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); rx_ring 1787 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, rx_ring 1788 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c &rx_ring->dma, GFP_KERNEL); rx_ring 1789 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c if (!rx_ring->desc) { rx_ring 1790 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c vfree(rx_ring->buffer_info); rx_ring 1793 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->next_to_clean = 0; rx_ring 1794 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->next_to_use = 0; rx_ring 1795 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c for (desNo = 0; desNo < rx_ring->count; desNo++) { rx_ring 1796 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo); rx_ring 1801 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->desc, (unsigned long long)rx_ring->dma, rx_ring 1802 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->next_to_clean, rx_ring->next_to_use); rx_ring 1829 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c struct pch_gbe_rx_ring *rx_ring) rx_ring 1833 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_gbe_clean_rx_ring(adapter, rx_ring); rx_ring 1834 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c vfree(rx_ring->buffer_info); rx_ring 1835 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->buffer_info = NULL; rx_ring 1836 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring 1837 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->desc = NULL; rx_ring 1879 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; rx_ring 1902 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); rx_ring 1909 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); rx_ring 1936 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; rx_ring 1955 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); rx_ring 1957 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, rx_ring 1958 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); rx_ring 1959 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->rx_buff_pool_logic = 0; rx_ring 1960 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->rx_buff_pool_size = 0; rx_ring 1961 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c rx_ring->rx_buff_pool = NULL; rx_ring 2018 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring); rx_ring 2031 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_gbe_free_rx_resources(adapter, adapter->rx_ring); rx_ring 2055 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_gbe_free_rx_resources(adapter, adapter->rx_ring); rx_ring 2301 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); rx_ring 464 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; rx_ring 465 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c rx_ring->count = RxDescriptors; rx_ring 466 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c pch_gbe_validate_option(&rx_ring->count, &opt, adapter); rx_ring 467 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c rx_ring->count = roundup(rx_ring->count, rx_ring 484 drivers/net/ethernet/packetengines/hamachi.c struct hamachi_desc *rx_ring; rx_ring 656 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring = ring_space; rx_ring 776 drivers/net/ethernet/packetengines/hamachi.c pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring, rx_ring 1055 drivers/net/ethernet/packetengines/hamachi.c printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring); rx_ring 1058 drivers/net/ethernet/packetengines/hamachi.c le32_to_cpu(hmp->rx_ring[i].status_n_length)); rx_ring 1079 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn); rx_ring 1119 drivers/net/ethernet/packetengines/hamachi.c leXX_to_cpu(hmp->rx_ring[i].addr), rx_ring 1134 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, rx_ring 1136 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | rx_ring 1141 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); rx_ring 1176 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[i].status_n_length = 0; rx_ring 1186 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, rx_ring 1189 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | rx_ring 1193 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); rx_ring 1403 drivers/net/ethernet/packetengines/hamachi.c entry, hmp->rx_ring[entry].status_n_length); rx_ring 1408 drivers/net/ethernet/packetengines/hamachi.c struct hamachi_desc *desc = &(hmp->rx_ring[entry]); rx_ring 1432 drivers/net/ethernet/packetengines/hamachi.c dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]); rx_ring 1435 drivers/net/ethernet/packetengines/hamachi.c le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000, rx_ring 1436 drivers/net/ethernet/packetengines/hamachi.c le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff, rx_ring 1437 drivers/net/ethernet/packetengines/hamachi.c le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length)); rx_ring 1487 drivers/net/ethernet/packetengines/hamachi.c leXX_to_cpu(hmp->rx_ring[entry].addr), rx_ring 1500 drivers/net/ethernet/packetengines/hamachi.c leXX_to_cpu(hmp->rx_ring[entry].addr), rx_ring 1505 drivers/net/ethernet/packetengines/hamachi.c leXX_to_cpu(hmp->rx_ring[entry].addr), rx_ring 1581 drivers/net/ethernet/packetengines/hamachi.c desc = &(hmp->rx_ring[entry]); rx_ring 1681 drivers/net/ethernet/packetengines/hamachi.c readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ', rx_ring 1682 drivers/net/ethernet/packetengines/hamachi.c i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr); rx_ring 1705 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[i].status_n_length = 0; rx_ring 1708 drivers/net/ethernet/packetengines/hamachi.c leXX_to_cpu(hmp->rx_ring[i].addr), rx_ring 1713 drivers/net/ethernet/packetengines/hamachi.c hmp->rx_ring[i].addr = cpu_to_leXX(0xBADF00D0); /* An invalid address. */ rx_ring 1902 drivers/net/ethernet/packetengines/hamachi.c pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring, rx_ring 309 drivers/net/ethernet/packetengines/yellowfin.c struct yellowfin_desc *rx_ring; rx_ring 446 drivers/net/ethernet/packetengines/yellowfin.c np->rx_ring = ring_space; rx_ring 511 drivers/net/ethernet/packetengines/yellowfin.c pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); rx_ring 693 drivers/net/ethernet/packetengines/yellowfin.c pr_warn(" Rx ring %p: ", yp->rx_ring); rx_ring 695 drivers/net/ethernet/packetengines/yellowfin.c pr_cont(" %08x", yp->rx_ring[i].result_status); rx_ring 731 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[i].dbdma_cmd = rx_ring 733 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + rx_ring 743 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, rx_ring 751 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); rx_ring 1041 drivers/net/ethernet/packetengines/yellowfin.c entry, yp->rx_ring[entry].result_status); rx_ring 1043 drivers/net/ethernet/packetengines/yellowfin.c entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, rx_ring 1044 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[entry].result_status); rx_ring 1049 drivers/net/ethernet/packetengines/yellowfin.c struct yellowfin_desc *desc = &yp->rx_ring[entry]; rx_ring 1125 drivers/net/ethernet/packetengines/yellowfin.c le32_to_cpu(yp->rx_ring[entry].addr), rx_ring 1158 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, rx_ring 1161 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); rx_ring 1162 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ rx_ring 1164 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[entry - 1].dbdma_cmd = rx_ring 1167 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd = rx_ring 1231 drivers/net/ethernet/packetengines/yellowfin.c ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ', rx_ring 1232 drivers/net/ethernet/packetengines/yellowfin.c i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr, rx_ring 1233 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[i].result_status); rx_ring 1235 drivers/net/ethernet/packetengines/yellowfin.c if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) { rx_ring 1241 drivers/net/ethernet/packetengines/yellowfin.c get_unaligned(((u16*)yp->rx_ring[i].addr) + j)); rx_ring 1253 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); rx_ring 1254 drivers/net/ethernet/packetengines/yellowfin.c yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ rx_ring 1384 drivers/net/ethernet/packetengines/yellowfin.c pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); rx_ring 544 drivers/net/ethernet/pasemi/pasemi_mac.c struct pasemi_mac_rxring *rx = rx_ring(mac); rx_ring 570 drivers/net/ethernet/pasemi/pasemi_mac.c rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); rx_ring 572 drivers/net/ethernet/pasemi/pasemi_mac.c kfree(rx_ring(mac)->ring_info); rx_ring 573 drivers/net/ethernet/pasemi/pasemi_mac.c pasemi_dma_free_chan(&rx_ring(mac)->chan); rx_ring 581 drivers/net/ethernet/pasemi/pasemi_mac.c struct pasemi_mac_rxring *rx = rx_ring(mac); rx_ring 587 drivers/net/ethernet/pasemi/pasemi_mac.c fill = rx_ring(mac)->next_to_fill; rx_ring 622 drivers/net/ethernet/pasemi/pasemi_mac.c rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & rx_ring 628 drivers/net/ethernet/pasemi/pasemi_mac.c struct pasemi_mac_rxring *rx = rx_ring(mac); rx_ring 661 drivers/net/ethernet/pasemi/pasemi_mac.c struct pasemi_dmachan *chan = &rx_ring(mac)->chan; rx_ring 793 drivers/net/ethernet/pasemi/pasemi_mac.c rx_ring(mac)->next_to_clean = n; rx_ring 806 drivers/net/ethernet/pasemi/pasemi_mac.c spin_unlock(&rx_ring(mac)->lock); rx_ring 1120 drivers/net/ethernet/pasemi/pasemi_mac.c pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU | rx_ring 1133 drivers/net/ethernet/pasemi/pasemi_mac.c write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno), rx_ring 1237 drivers/net/ethernet/pasemi/pasemi_mac.c int rxch = rx_ring(mac)->chan.chno; rx_ring 1279 drivers/net/ethernet/pasemi/pasemi_mac.c rxch = rx_ring(mac)->chan.chno; rx_ring 1312 drivers/net/ethernet/pasemi/pasemi_mac.c pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); rx_ring 1562 drivers/net/ethernet/pasemi/pasemi_mac.c pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); rx_ring 1616 drivers/net/ethernet/pasemi/pasemi_mac.c pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); rx_ring 1647 drivers/net/ethernet/pasemi/pasemi_mac.c rx_ring(mac)->next_to_fill = 0; rx_ring 697 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c u8 rx_ring, u8 tx_ring) rx_ring 699 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c if (rx_ring == 0 || tx_ring == 0) rx_ring 702 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c if (rx_ring != 0) { rx_ring 703 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c if (rx_ring > adapter->max_sds_rings) { rx_ring 706 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c rx_ring, adapter->max_sds_rings); rx_ring 179 drivers/net/ethernet/rdc/r6040.c struct r6040_descriptor *rx_ring; rx_ring 326 drivers/net/ethernet/rdc/r6040.c lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring; rx_ring 327 drivers/net/ethernet/rdc/r6040.c r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT); rx_ring 330 drivers/net/ethernet/rdc/r6040.c desc = lp->rx_ring; rx_ring 343 drivers/net/ethernet/rdc/r6040.c } while (desc != lp->rx_ring); rx_ring 486 drivers/net/ethernet/rdc/r6040.c if (lp->rx_ring) { rx_ring 488 drivers/net/ethernet/rdc/r6040.c RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); rx_ring 489 drivers/net/ethernet/rdc/r6040.c lp->rx_ring = NULL; rx_ring 760 drivers/net/ethernet/rdc/r6040.c lp->rx_ring = rx_ring 762 drivers/net/ethernet/rdc/r6040.c if (!lp->rx_ring) { rx_ring 787 drivers/net/ethernet/rdc/r6040.c pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring, rx_ring 338 drivers/net/ethernet/realtek/8139cp.c struct cp_desc *rx_ring; rx_ring 482 drivers/net/ethernet/realtek/8139cp.c desc = &cp->rx_ring[rx_tail]; rx_ring 542 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring[rx_tail].opts2 = 0; rx_ring 543 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); rx_ring 1079 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring[i].opts2 = 0; rx_ring 1080 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring[i].addr = cpu_to_le64(mapping); rx_ring 1082 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring[i].opts1 = rx_ring 1085 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring[i].opts1 = rx_ring 1123 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring = mem; rx_ring 1124 drivers/net/ethernet/realtek/8139cp.c cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; rx_ring 1128 drivers/net/ethernet/realtek/8139cp.c dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); rx_ring 1140 drivers/net/ethernet/realtek/8139cp.c desc = cp->rx_ring + i; rx_ring 1162 drivers/net/ethernet/realtek/8139cp.c memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); rx_ring 1173 drivers/net/ethernet/realtek/8139cp.c dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, rx_ring 1175 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring = NULL; rx_ring 587 drivers/net/ethernet/realtek/8139too.c unsigned char *rx_ring; rx_ring 1332 drivers/net/ethernet/realtek/8139too.c tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, rx_ring 1334 drivers/net/ethernet/realtek/8139too.c if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { rx_ring 1340 drivers/net/ethernet/realtek/8139too.c if (tp->rx_ring) rx_ring 1342 drivers/net/ethernet/realtek/8139too.c tp->rx_ring, tp->rx_ring_dma); rx_ring 1951 drivers/net/ethernet/realtek/8139too.c unsigned char *rx_ring = tp->rx_ring; rx_ring 1969 drivers/net/ethernet/realtek/8139too.c rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset)); rx_ring 1981 drivers/net/ethernet/realtek/8139too.c &rx_ring[ring_offset], 70, true); rx_ring 2042 drivers/net/ethernet/realtek/8139too.c wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); rx_ring 2044 drivers/net/ethernet/realtek/8139too.c skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size); rx_ring 2286 drivers/net/ethernet/realtek/8139too.c tp->rx_ring, tp->rx_ring_dma); rx_ring 2289 drivers/net/ethernet/realtek/8139too.c tp->rx_ring = NULL; rx_ring 1005 drivers/net/ethernet/renesas/ravb.h struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; rx_ring 225 drivers/net/ethernet/renesas/ravb_main.c if (priv->rx_ring[q]) { rx_ring 227 drivers/net/ethernet/renesas/ravb_main.c struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; rx_ring 238 drivers/net/ethernet/renesas/ravb_main.c dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], rx_ring 240 drivers/net/ethernet/renesas/ravb_main.c priv->rx_ring[q] = NULL; rx_ring 291 drivers/net/ethernet/renesas/ravb_main.c memset(priv->rx_ring[q], 0, rx_ring_size); rx_ring 295 drivers/net/ethernet/renesas/ravb_main.c rx_desc = &priv->rx_ring[q][i]; rx_ring 308 drivers/net/ethernet/renesas/ravb_main.c rx_desc = &priv->rx_ring[q][i]; rx_ring 371 drivers/net/ethernet/renesas/ravb_main.c priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, rx_ring 374 drivers/net/ethernet/renesas/ravb_main.c if (!priv->rx_ring[q]) rx_ring 550 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->rx_ring[q][entry]; rx_ring 610 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->rx_ring[q][entry]; rx_ring 616 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->rx_ring[q][entry]; rx_ring 1313 drivers/net/ethernet/renesas/sh_eth.c if (mdp->rx_ring) { rx_ring 1316 drivers/net/ethernet/renesas/sh_eth.c struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; rx_ring 1325 drivers/net/ethernet/renesas/sh_eth.c dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring, rx_ring 1327 drivers/net/ethernet/renesas/sh_eth.c mdp->rx_ring = NULL; rx_ring 1371 drivers/net/ethernet/renesas/sh_eth.c memset(mdp->rx_ring, 0, rx_ringsize); rx_ring 1393 drivers/net/ethernet/renesas/sh_eth.c rxdesc = &mdp->rx_ring[i]; rx_ring 1460 drivers/net/ethernet/renesas/sh_eth.c mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, rx_ring 1462 drivers/net/ethernet/renesas/sh_eth.c if (!mdp->rx_ring) rx_ring 1636 drivers/net/ethernet/renesas/sh_eth.c rxdesc = &mdp->rx_ring[entry]; rx_ring 1701 drivers/net/ethernet/renesas/sh_eth.c rxdesc = &mdp->rx_ring[entry]; rx_ring 1707 drivers/net/ethernet/renesas/sh_eth.c rxdesc = &mdp->rx_ring[entry]; rx_ring 2502 drivers/net/ethernet/renesas/sh_eth.c rxdesc = &mdp->rx_ring[i]; rx_ring 524 drivers/net/ethernet/renesas/sh_eth.h struct sh_eth_rxdesc *rx_ring; rx_ring 49 drivers/net/ethernet/rocker/rocker.h struct rocker_dma_ring_info rx_ring; rx_ring 755 drivers/net/ethernet/rocker/rocker_main.c const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; rx_ring 760 drivers/net/ethernet/rocker/rocker_main.c for (i = 0; i < rx_ring->size; i++) { rx_ring 762 drivers/net/ethernet/rocker/rocker_main.c &rx_ring->desc_info[i]); rx_ring 770 drivers/net/ethernet/rocker/rocker_main.c rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); rx_ring 776 drivers/net/ethernet/rocker/rocker_main.c const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; rx_ring 780 drivers/net/ethernet/rocker/rocker_main.c for (i = 0; i < rx_ring->size; i++) rx_ring 781 drivers/net/ethernet/rocker/rocker_main.c rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); rx_ring 809 drivers/net/ethernet/rocker/rocker_main.c &rocker_port->rx_ring); rx_ring 815 drivers/net/ethernet/rocker/rocker_main.c err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, rx_ring 828 drivers/net/ethernet/rocker/rocker_main.c rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); rx_ring 833 drivers/net/ethernet/rocker/rocker_main.c rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, rx_ring 836 drivers/net/ethernet/rocker/rocker_main.c rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); rx_ring 850 drivers/net/ethernet/rocker/rocker_main.c rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, rx_ring 852 drivers/net/ethernet/rocker/rocker_main.c rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); rx_ring 2506 drivers/net/ethernet/rocker/rocker_main.c (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { rx_ring 2523 drivers/net/ethernet/rocker/rocker_main.c rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); rx_ring 2530 drivers/net/ethernet/rocker/rocker_main.c rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); rx_ring 340 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c struct sxgbe_rx_queue *rx_ring) rx_ring 349 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->rx_skbuff[i] = skb; rx_ring 350 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, rx_ring 353 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) { rx_ring 359 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i]; rx_ring 374 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c struct sxgbe_rx_queue *rx_ring) rx_ring 378 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c kfree_skb(rx_ring->rx_skbuff[i]); rx_ring 379 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i], rx_ring 440 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring, rx_ring 444 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->dma_rx, rx_ring->dma_rx_phy); rx_ring 445 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c kfree(rx_ring->rx_skbuff_dma); rx_ring 446 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c kfree(rx_ring->rx_skbuff); rx_ring 457 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c struct sxgbe_rx_queue *rx_ring, int rx_rsize) rx_ring 470 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c if (rx_ring == NULL) { rx_ring 476 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->queue_no = queue_no; rx_ring 479 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->dma_rx = dma_alloc_coherent(priv->device, rx_ring 481 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c &rx_ring->dma_rx_phy, GFP_KERNEL); rx_ring 483 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c if (rx_ring->dma_rx == NULL) rx_ring 487 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, rx_ring 489 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c if (!rx_ring->rx_skbuff_dma) { rx_ring 494 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->rx_skbuff = kmalloc_array(rx_rsize, rx_ring 496 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c if (!rx_ring->rx_skbuff) { rx_ring 504 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c p = rx_ring->dma_rx + desc_index; rx_ring 506 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c bfsize, rx_ring); rx_ring 512 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->cur_rx = 0; rx_ring 513 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize); rx_ring 522 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c p = rx_ring->dma_rx + desc_index; rx_ring 523 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring); rx_ring 525 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c kfree(rx_ring->rx_skbuff); rx_ring 527 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c kfree(rx_ring->rx_skbuff_dma); rx_ring 531 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c rx_ring->dma_rx, rx_ring->dma_rx_phy); rx_ring 82 drivers/net/ethernet/sgi/meth.c rx_packet *rx_ring[RX_RING_ENTRIES]; rx_ring 234 drivers/net/ethernet/sgi/meth.c priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); rx_ring 237 drivers/net/ethernet/sgi/meth.c dma_map_single(&priv->pdev->dev, priv->rx_ring[i], rx_ring 265 drivers/net/ethernet/sgi/meth.c priv->rx_ring[i] = 0; rx_ring 396 drivers/net/ethernet/sgi/meth.c status = priv->rx_ring[priv->rx_write]->status.raw; rx_ring 408 drivers/net/ethernet/sgi/meth.c priv->rx_ring[priv->rx_write]->status.raw); rx_ring 452 drivers/net/ethernet/sgi/meth.c priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; rx_ring 453 drivers/net/ethernet/sgi/meth.c priv->rx_ring[priv->rx_write]->status.raw = 0; rx_ring 456 drivers/net/ethernet/sgi/meth.c priv->rx_ring[priv->rx_write], rx_ring 273 drivers/net/ethernet/silan/sc92031.c void *rx_ring; rx_ring 727 drivers/net/ethernet/silan/sc92031.c void *rx_ring = priv->rx_ring; rx_ring 762 drivers/net/ethernet/silan/sc92031.c rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset)); rx_ring 796 drivers/net/ethernet/silan/sc92031.c skb_put_data(skb, rx_ring + rx_ring_offset, rx_ring 798 drivers/net/ethernet/silan/sc92031.c skb_put_data(skb, rx_ring, rx_ring 801 drivers/net/ethernet/silan/sc92031.c skb_put_data(skb, rx_ring + rx_ring_offset, pkt_size); rx_ring 996 drivers/net/ethernet/silan/sc92031.c priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN, rx_ring 998 drivers/net/ethernet/silan/sc92031.c if (unlikely(!priv->rx_ring)) { rx_ring 1037 drivers/net/ethernet/silan/sc92031.c pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, rx_ring 1063 drivers/net/ethernet/silan/sc92031.c pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, rx_ring 186 drivers/net/ethernet/sis/sis900.c BufferDesc *rx_ring; rx_ring 497 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring = ring_space; rx_ring 575 drivers/net/ethernet/sis/sis900.c pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, rx_ring 1172 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[i].link = sis_priv->rx_ring_dma + rx_ring 1174 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[i].cmdsts = 0; rx_ring 1175 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[i].bufptr = 0; rx_ring 1190 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; rx_ring 1191 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, rx_ring 1194 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[i].bufptr))) { rx_ring 1735 drivers/net/ethernet/sis/sis900.c u32 rx_status = sis_priv->rx_ring[entry].cmdsts; rx_ring 1776 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; rx_ring 1782 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE, rx_ring 1827 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; rx_ring 1828 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[entry].bufptr = rx_ring 1832 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[entry].bufptr))) { rx_ring 1840 drivers/net/ethernet/sis/sis900.c rx_status = sis_priv->rx_ring[entry].cmdsts; rx_ring 1861 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; rx_ring 1862 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[entry].bufptr = rx_ring 1866 drivers/net/ethernet/sis/sis900.c sis_priv->rx_ring[entry].bufptr))) { rx_ring 1982 drivers/net/ethernet/sis/sis900.c pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr, rx_ring 2487 drivers/net/ethernet/sis/sis900.c pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, rx_ring 253 drivers/net/ethernet/smsc/epic100.c struct epic_rx_desc *rx_ring; rx_ring 385 drivers/net/ethernet/smsc/epic100.c ep->rx_ring = ring_space; rx_ring 495 drivers/net/ethernet/smsc/epic100.c pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); rx_ring 904 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i].rxstatus = 0; rx_ring 905 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i].buflength = ep->rx_buf_sz; rx_ring 906 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i].next = ep->rx_ring_dma + rx_ring 911 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i-1].next = ep->rx_ring_dma; rx_ring 920 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, rx_ring 922 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i].rxstatus = DescOwn; rx_ring 1139 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[entry].rxstatus); rx_ring 1145 drivers/net/ethernet/smsc/epic100.c while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) { rx_ring 1146 drivers/net/ethernet/smsc/epic100.c int status = ep->rx_ring[entry].rxstatus; rx_ring 1181 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[entry].bufaddr, rx_ring 1187 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[entry].bufaddr, rx_ring 1192 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[entry].bufaddr, rx_ring 1215 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, rx_ring 1220 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[entry].rxstatus = DescOwn; rx_ring 1293 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ rx_ring 1294 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i].buflength = 0; rx_ring 1296 drivers/net/ethernet/smsc/epic100.c pci_unmap_single(pdev, ep->rx_ring[i].bufaddr, rx_ring 1300 drivers/net/ethernet/smsc/epic100.c ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ rx_ring 1503 drivers/net/ethernet/smsc/epic100.c pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); rx_ring 49 drivers/net/ethernet/smsc/smsc9420.c struct smsc9420_dma_desc *rx_ring; rx_ring 532 drivers/net/ethernet/smsc/smsc9420.c BUG_ON(!pd->rx_ring); rx_ring 545 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[i].status = 0; rx_ring 546 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[i].length = 0; rx_ring 547 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[i].buffer1 = 0; rx_ring 548 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[i].buffer2 = 0; rx_ring 804 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN; rx_ring 805 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[index].status = RDES0_OWN_; rx_ring 831 drivers/net/ethernet/smsc/smsc9420.c status = pd->rx_ring[pd->rx_ring_head].status; rx_ring 1216 drivers/net/ethernet/smsc/smsc9420.c BUG_ON(!pd->rx_ring); rx_ring 1226 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[i].status = 0; rx_ring 1227 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[i].length = PKT_BUF_SZ; rx_ring 1228 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[i].buffer2 = 0; rx_ring 1232 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_); rx_ring 1569 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring = pci_alloc_consistent(pdev, rx_ring 1574 drivers/net/ethernet/smsc/smsc9420.c if (!pd->rx_ring) rx_ring 1578 drivers/net/ethernet/smsc/smsc9420.c pd->tx_ring = (pd->rx_ring + RX_RING_SIZE); rx_ring 1629 drivers/net/ethernet/smsc/smsc9420.c (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); rx_ring 1659 drivers/net/ethernet/smsc/smsc9420.c BUG_ON(!pd->rx_ring); rx_ring 1662 drivers/net/ethernet/smsc/smsc9420.c (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); rx_ring 845 drivers/net/ethernet/socionext/netsec.c struct netsec_desc_ring *rx_ring = rx_ring 848 drivers/net/ethernet/socionext/netsec.c page_pool_get_dma_dir(rx_ring->page_pool); rx_ring 169 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c xlgmac_free_ring(pdata, channel->rx_ring); rx_ring 196 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c ret = xlgmac_init_ring(pdata, channel->rx_ring, rx_ring 221 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c kfree(pdata->channel_head->rx_ring); rx_ring 222 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c pdata->channel_head->rx_ring = NULL; rx_ring 233 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c struct xlgmac_ring *tx_ring, *rx_ring; rx_ring 250 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring), rx_ring 252 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (!rx_ring) rx_ring 279 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c channel->rx_ring = rx_ring++; rx_ring 284 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c channel->tx_ring, channel->rx_ring); rx_ring 292 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c kfree(rx_ring); rx_ring 470 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c ring = channel->rx_ring; rx_ring 610 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c if (!channel->rx_ring) rx_ring 666 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c if (!channel->rx_ring) rx_ring 1142 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c struct xlgmac_ring *ring = channel->rx_ring; rx_ring 1308 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c if (!channel->rx_ring) rx_ring 1366 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c if (!channel->rx_ring) rx_ring 1405 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c if (!channel->rx_ring) rx_ring 1827 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c if (!channel->rx_ring) rx_ring 2488 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c if (channel->rx_ring) { rx_ring 2641 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c struct xlgmac_ring *ring = channel->rx_ring; rx_ring 216 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c if (channel->tx_ring && channel->rx_ring) rx_ring 220 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c else if (channel->rx_ring) rx_ring 238 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c if (channel->tx_ring && channel->rx_ring) rx_ring 242 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c else if (channel->rx_ring) rx_ring 550 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c ring = channel->rx_ring; rx_ring 954 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c struct xlgmac_ring *ring = channel->rx_ring; rx_ring 1110 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c struct xlgmac_ring *ring = channel->rx_ring; rx_ring 378 drivers/net/ethernet/synopsys/dwc-xlgmac.h struct xlgmac_ring *rx_ring; rx_ring 442 drivers/net/ethernet/via/via-rhine.c struct rx_desc *rx_ring; rx_ring 1182 drivers/net/ethernet/via/via-rhine.c rp->rx_ring = ring; rx_ring 1198 drivers/net/ethernet/via/via-rhine.c rp->rx_ring, rp->rx_ring_dma); rx_ring 1242 drivers/net/ethernet/via/via-rhine.c rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); rx_ring 1251 drivers/net/ethernet/via/via-rhine.c rp->rx_ring[entry].addr = cpu_to_le32(sd->dma); rx_ring 1268 drivers/net/ethernet/via/via-rhine.c rp->rx_ring[i].rx_status = 0; rx_ring 1269 drivers/net/ethernet/via/via-rhine.c rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); rx_ring 1271 drivers/net/ethernet/via/via-rhine.c rp->rx_ring[i].next_desc = cpu_to_le32(next); rx_ring 1275 drivers/net/ethernet/via/via-rhine.c rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); rx_ring 1303 drivers/net/ethernet/via/via-rhine.c rp->rx_ring[i].rx_status = 0; rx_ring 1304 drivers/net/ethernet/via/via-rhine.c rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ rx_ring 2049 drivers/net/ethernet/via/via-rhine.c entry, le32_to_cpu(rp->rx_ring[entry].rx_status)); rx_ring 2053 drivers/net/ethernet/via/via-rhine.c struct rx_desc *desc = rp->rx_ring + entry; rx_ring 164 drivers/net/hippi/rrunner.c rrpriv->rx_ring = tmpptr; rx_ring 203 drivers/net/hippi/rrunner.c if (rrpriv->rx_ring) rx_ring 204 drivers/net/hippi/rrunner.c pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, rx_ring 233 drivers/net/hippi/rrunner.c pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring, rx_ring 642 drivers/net/hippi/rrunner.c rrpriv->rx_ring[i].mode = 0; rx_ring 660 drivers/net/hippi/rrunner.c set_rraddr(&rrpriv->rx_ring[i].addr, addr); rx_ring 661 drivers/net/hippi/rrunner.c rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN; rx_ring 703 drivers/net/hippi/rrunner.c rrpriv->rx_ring[i].addr.addrlo, rx_ring 706 drivers/net/hippi/rrunner.c rrpriv->rx_ring[i].size = 0; rx_ring 707 drivers/net/hippi/rrunner.c set_rraddr(&rrpriv->rx_ring[i].addr, 0); rx_ring 907 drivers/net/hippi/rrunner.c rrpriv->rx_ring[index].mode |= rx_ring 933 drivers/net/hippi/rrunner.c desc = &(rrpriv->rx_ring[index]); rx_ring 939 drivers/net/hippi/rrunner.c if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ rx_ring 1133 drivers/net/hippi/rrunner.c struct rx_desc *desc = &(rrpriv->rx_ring[i]); rx_ring 802 drivers/net/hippi/rrunner.h struct rx_desc *rx_ring; rx_ring 194 drivers/net/thunderbolt.c struct tbnet_ring rx_ring; rx_ring 375 drivers/net/thunderbolt.c tb_ring_stop(net->rx_ring.ring); rx_ring 377 drivers/net/thunderbolt.c tbnet_free_buffers(&net->rx_ring); rx_ring 468 drivers/net/thunderbolt.c struct tbnet_ring *ring = &net->rx_ring; rx_ring 604 drivers/net/thunderbolt.c net->rx_ring.ring->hop, rx_ring 613 drivers/net/thunderbolt.c tb_ring_start(net->rx_ring.ring); rx_ring 628 drivers/net/thunderbolt.c tbnet_free_buffers(&net->rx_ring); rx_ring 630 drivers/net/thunderbolt.c tb_ring_stop(net->rx_ring.ring); rx_ring 743 drivers/net/thunderbolt.c unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); rx_ring 744 drivers/net/thunderbolt.c struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); rx_ring 766 drivers/net/thunderbolt.c frame = tb_ring_poll(net->rx_ring.ring); rx_ring 777 drivers/net/thunderbolt.c net->rx_ring.cons++; rx_ring 836 drivers/net/thunderbolt.c tb_ring_poll_complete(net->rx_ring.ring); rx_ring 877 drivers/net/thunderbolt.c net->rx_ring.ring = ring; rx_ring 894 drivers/net/thunderbolt.c tb_ring_free(net->rx_ring.ring); rx_ring 895 drivers/net/thunderbolt.c net->rx_ring.ring = NULL; rx_ring 571 drivers/net/vmxnet3/vmxnet3_drv.c struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; rx_ring 1320 drivers/net/vmxnet3/vmxnet3_drv.c ring = rq->rx_ring + ring_idx; rx_ring 1321 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, rx_ring 1545 drivers/net/vmxnet3/vmxnet3_drv.c ring = rq->rx_ring + ring_idx; rx_ring 1587 drivers/net/vmxnet3/vmxnet3_drv.c for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { rx_ring 1592 drivers/net/vmxnet3/vmxnet3_drv.c &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); rx_ring 1609 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; rx_ring 1610 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[ring_idx].next2fill = rx_ring 1611 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[ring_idx].next2comp = 0; rx_ring 1638 drivers/net/vmxnet3/vmxnet3_drv.c for (j = 0; j < rq->rx_ring[i].size; j++) rx_ring 1645 drivers/net/vmxnet3/vmxnet3_drv.c if (rq->rx_ring[i].base) { rx_ring 1647 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[i].size rx_ring 1649 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[i].base, rx_ring 1650 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[i].basePA); rx_ring 1651 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[i].base = NULL; rx_ring 1657 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[0].size * rq->data_ring.desc_size, rx_ring 1671 drivers/net/vmxnet3/vmxnet3_drv.c (rq->rx_ring[0].size + rq->rx_ring[1].size); rx_ring 1688 drivers/net/vmxnet3/vmxnet3_drv.c (rq->rx_ring[0].size * rx_ring 1705 drivers/net/vmxnet3/vmxnet3_drv.c for (i = 0; i < rq->rx_ring[0].size; i++) { rx_ring 1716 drivers/net/vmxnet3/vmxnet3_drv.c for (i = 0; i < rq->rx_ring[1].size; i++) { rx_ring 1723 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; rx_ring 1725 drivers/net/vmxnet3/vmxnet3_drv.c memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * rx_ring 1727 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[i].gen = VMXNET3_INIT_GEN; rx_ring 1729 drivers/net/vmxnet3/vmxnet3_drv.c if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, rx_ring 1734 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); rx_ring 1778 drivers/net/vmxnet3/vmxnet3_drv.c sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); rx_ring 1779 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[i].base = dma_alloc_coherent( rx_ring 1781 drivers/net/vmxnet3/vmxnet3_drv.c &rq->rx_ring[i].basePA, rx_ring 1783 drivers/net/vmxnet3/vmxnet3_drv.c if (!rq->rx_ring[i].base) { rx_ring 1791 drivers/net/vmxnet3/vmxnet3_drv.c sz = rq->rx_ring[0].size * rq->data_ring.desc_size; rx_ring 1815 drivers/net/vmxnet3/vmxnet3_drv.c sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + rx_ring 1816 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[1].size); rx_ring 1823 drivers/net/vmxnet3/vmxnet3_drv.c rq->buf_info[1] = bi + rq->rx_ring[0].size; rx_ring 2466 drivers/net/vmxnet3/vmxnet3_drv.c rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); rx_ring 2467 drivers/net/vmxnet3/vmxnet3_drv.c rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); rx_ring 2470 drivers/net/vmxnet3/vmxnet3_drv.c rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); rx_ring 2471 drivers/net/vmxnet3/vmxnet3_drv.c rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); rx_ring 2571 drivers/net/vmxnet3/vmxnet3_drv.c adapter->rx_queue[0].rx_ring[0].size, rx_ring 2572 drivers/net/vmxnet3/vmxnet3_drv.c adapter->rx_queue[0].rx_ring[1].size); rx_ring 2613 drivers/net/vmxnet3/vmxnet3_drv.c adapter->rx_queue[i].rx_ring[0].next2fill); rx_ring 2616 drivers/net/vmxnet3/vmxnet3_drv.c adapter->rx_queue[i].rx_ring[1].next2fill); rx_ring 2799 drivers/net/vmxnet3/vmxnet3_drv.c ring0_size = adapter->rx_queue[0].rx_ring[0].size; rx_ring 2803 drivers/net/vmxnet3/vmxnet3_drv.c ring1_size = adapter->rx_queue[0].rx_ring[1].size; rx_ring 2812 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[0].size = ring0_size; rx_ring 2813 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ring[1].size = ring1_size; rx_ring 2845 drivers/net/vmxnet3/vmxnet3_drv.c adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; rx_ring 2846 drivers/net/vmxnet3/vmxnet3_drv.c adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; rx_ring 427 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA); rx_ring 428 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA); rx_ring 429 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[0].size; rx_ring 430 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[0].next2fill; rx_ring 431 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[0].next2comp; rx_ring 432 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[0].gen; rx_ring 434 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA); rx_ring 435 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA); rx_ring 436 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[1].size; rx_ring 437 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[1].next2fill; rx_ring 438 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[1].next2comp; rx_ring 439 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[1].gen; rx_ring 443 drivers/net/vmxnet3/vmxnet3_ethtool.c buf[j++] = rq->rx_ring[0].size; rx_ring 286 drivers/net/vmxnet3/vmxnet3_int.h struct vmxnet3_cmd_ring rx_ring[2]; rx_ring 406 drivers/net/vmxnet3/vmxnet3_int.h ((rq)->rx_ring[ring_idx].size >> 3) rx_ring 361 drivers/net/wireless/admtek/adm8211.c while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) { rx_ring 365 drivers/net/wireless/admtek/adm8211.c status = le32_to_cpu(priv->rx_ring[entry].status); rx_ring 367 drivers/net/wireless/admtek/adm8211.c rssi = le32_to_cpu(priv->rx_ring[entry].length) & rx_ring 424 drivers/net/wireless/admtek/adm8211.c priv->rx_ring[entry].buffer1 = rx_ring 428 drivers/net/wireless/admtek/adm8211.c priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN | rx_ring 430 drivers/net/wireless/admtek/adm8211.c priv->rx_ring[entry].length = rx_ring 1436 drivers/net/wireless/admtek/adm8211.c desc = &priv->rx_ring[i]; rx_ring 1446 drivers/net/wireless/admtek/adm8211.c desc = &priv->rx_ring[i]; rx_ring 1748 drivers/net/wireless/admtek/adm8211.c priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size, rx_ring 1751 drivers/net/wireless/admtek/adm8211.c if (!priv->rx_ring) { rx_ring 1758 drivers/net/wireless/admtek/adm8211.c priv->tx_ring = priv->rx_ring + priv->rx_ring_size; rx_ring 1937 drivers/net/wireless/admtek/adm8211.c priv->rx_ring, priv->rx_ring_dma); rx_ring 1970 drivers/net/wireless/admtek/adm8211.c priv->rx_ring, priv->rx_ring_dma); rx_ring 541 drivers/net/wireless/admtek/adm8211.h struct adm8211_desc *rx_ring; rx_ring 2609 drivers/net/wireless/ath/ath10k/core.c ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, rx_ring 1959 drivers/net/wireless/ath/ath10k/htt.h } rx_ring; rx_ring 31 drivers/net/wireless/ath/ath10k/htt_rx.c hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) rx_ring 46 drivers/net/wireless/ath/ath10k/htt_rx.c if (htt->rx_ring.in_ord_rx) { rx_ring 47 drivers/net/wireless/ath/ath10k/htt_rx.c hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { rx_ring 56 drivers/net/wireless/ath/ath10k/htt_rx.c for (i = 0; i < htt->rx_ring.size; i++) { rx_ring 57 drivers/net/wireless/ath/ath10k/htt_rx.c skb = htt->rx_ring.netbufs_ring[i]; rx_ring 69 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.fill_cnt = 0; rx_ring 70 drivers/net/wireless/ath/ath10k/htt_rx.c hash_init(htt->rx_ring.skb_table); rx_ring 71 drivers/net/wireless/ath/ath10k/htt_rx.c memset(htt->rx_ring.netbufs_ring, 0, rx_ring 72 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); rx_ring 77 drivers/net/wireless/ath/ath10k/htt_rx.c return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); rx_ring 82 drivers/net/wireless/ath/ath10k/htt_rx.c return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); rx_ring 88 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.paddrs_ring_32 = vaddr; rx_ring 94 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.paddrs_ring_64 = vaddr; rx_ring 100 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); rx_ring 106 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); rx_ring 111 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.paddrs_ring_32[idx] = 0; rx_ring 116 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.paddrs_ring_64[idx] = 0; rx_ring 121 drivers/net/wireless/ath/ath10k/htt_rx.c return (void *)htt->rx_ring.paddrs_ring_32; rx_ring 126 drivers/net/wireless/ath/ath10k/htt_rx.c return (void *)htt->rx_ring.paddrs_ring_64; rx_ring 144 drivers/net/wireless/ath/ath10k/htt_rx.c idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); rx_ring 173 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.netbufs_ring[idx] = skb; rx_ring 175 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.fill_cnt++; rx_ring 177 drivers/net/wireless/ath/ath10k/htt_rx.c if (htt->rx_ring.in_ord_rx) { rx_ring 178 drivers/net/wireless/ath/ath10k/htt_rx.c hash_add(htt->rx_ring.skb_table, rx_ring 185 drivers/net/wireless/ath/ath10k/htt_rx.c idx &= htt->rx_ring.size_mask; rx_ring 194 drivers/net/wireless/ath/ath10k/htt_rx.c *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); rx_ring 200 drivers/net/wireless/ath/ath10k/htt_rx.c lockdep_assert_held(&htt->rx_ring.lock); rx_ring 224 drivers/net/wireless/ath/ath10k/htt_rx.c spin_lock_bh(&htt->rx_ring.lock); rx_ring 225 drivers/net/wireless/ath/ath10k/htt_rx.c num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; rx_ring 236 drivers/net/wireless/ath/ath10k/htt_rx.c mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + rx_ring 239 drivers/net/wireless/ath/ath10k/htt_rx.c mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + rx_ring 242 drivers/net/wireless/ath/ath10k/htt_rx.c spin_unlock_bh(&htt->rx_ring.lock); rx_ring 247 drivers/net/wireless/ath/ath10k/htt_rx.c struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer); rx_ring 260 drivers/net/wireless/ath/ath10k/htt_rx.c spin_lock_bh(&htt->rx_ring.lock); rx_ring 261 drivers/net/wireless/ath/ath10k/htt_rx.c ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - rx_ring 262 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.fill_cnt)); rx_ring 267 drivers/net/wireless/ath/ath10k/htt_rx.c spin_unlock_bh(&htt->rx_ring.lock); rx_ring 277 drivers/net/wireless/ath/ath10k/htt_rx.c del_timer_sync(&htt->rx_ring.refill_retry_timer); rx_ring 283 drivers/net/wireless/ath/ath10k/htt_rx.c spin_lock_bh(&htt->rx_ring.lock); rx_ring 285 drivers/net/wireless/ath/ath10k/htt_rx.c spin_unlock_bh(&htt->rx_ring.lock); rx_ring 290 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.base_paddr); rx_ring 293 drivers/net/wireless/ath/ath10k/htt_rx.c sizeof(*htt->rx_ring.alloc_idx.vaddr), rx_ring 294 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.alloc_idx.vaddr, rx_ring 295 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.alloc_idx.paddr); rx_ring 297 drivers/net/wireless/ath/ath10k/htt_rx.c kfree(htt->rx_ring.netbufs_ring); rx_ring 306 drivers/net/wireless/ath/ath10k/htt_rx.c lockdep_assert_held(&htt->rx_ring.lock); rx_ring 308 drivers/net/wireless/ath/ath10k/htt_rx.c if (htt->rx_ring.fill_cnt == 0) { rx_ring 313 drivers/net/wireless/ath/ath10k/htt_rx.c idx = htt->rx_ring.sw_rd_idx.msdu_payld; rx_ring 314 drivers/net/wireless/ath/ath10k/htt_rx.c msdu = htt->rx_ring.netbufs_ring[idx]; rx_ring 315 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.netbufs_ring[idx] = NULL; rx_ring 319 drivers/net/wireless/ath/ath10k/htt_rx.c idx &= htt->rx_ring.size_mask; rx_ring 320 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.sw_rd_idx.msdu_payld = idx; rx_ring 321 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.fill_cnt--; rx_ring 342 drivers/net/wireless/ath/ath10k/htt_rx.c lockdep_assert_held(&htt->rx_ring.lock); rx_ring 442 drivers/net/wireless/ath/ath10k/htt_rx.c lockdep_assert_held(&htt->rx_ring.lock); rx_ring 450 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.fill_cnt--; rx_ring 633 drivers/net/wireless/ath/ath10k/htt_rx.c lockdep_assert_held(&htt->rx_ring.lock); rx_ring 695 drivers/net/wireless/ath/ath10k/htt_rx.c lockdep_assert_held(&htt->rx_ring.lock); rx_ring 750 drivers/net/wireless/ath/ath10k/htt_rx.c struct timer_list *timer = &htt->rx_ring.refill_retry_timer; rx_ring 760 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.size = HTT_RX_RING_SIZE; rx_ring 761 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.size_mask = htt->rx_ring.size - 1; rx_ring 762 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; rx_ring 764 drivers/net/wireless/ath/ath10k/htt_rx.c if (!is_power_of_2(htt->rx_ring.size)) { rx_ring 769 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.netbufs_ring = rx_ring 770 drivers/net/wireless/ath/ath10k/htt_rx.c kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), rx_ring 772 drivers/net/wireless/ath/ath10k/htt_rx.c if (!htt->rx_ring.netbufs_ring) rx_ring 782 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.base_paddr = paddr; rx_ring 785 drivers/net/wireless/ath/ath10k/htt_rx.c sizeof(*htt->rx_ring.alloc_idx.vaddr), rx_ring 790 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.alloc_idx.vaddr = vaddr; rx_ring 791 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.alloc_idx.paddr = paddr; rx_ring 792 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; rx_ring 793 drivers/net/wireless/ath/ath10k/htt_rx.c *htt->rx_ring.alloc_idx.vaddr = 0; rx_ring 798 drivers/net/wireless/ath/ath10k/htt_rx.c spin_lock_init(&htt->rx_ring.lock); rx_ring 800 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.fill_cnt = 0; rx_ring 801 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.sw_rd_idx.msdu_payld = 0; rx_ring 802 drivers/net/wireless/ath/ath10k/htt_rx.c hash_init(htt->rx_ring.skb_table); rx_ring 810 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.size, htt->rx_ring.fill_level); rx_ring 817 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.base_paddr); rx_ring 819 drivers/net/wireless/ath/ath10k/htt_rx.c kfree(htt->rx_ring.netbufs_ring); rx_ring 2026 drivers/net/wireless/ath/ath10k/htt_rx.c spin_lock_bh(&htt->rx_ring.lock); rx_ring 2028 drivers/net/wireless/ath/ath10k/htt_rx.c spin_unlock_bh(&htt->rx_ring.lock); rx_ring 2032 drivers/net/wireless/ath/ath10k/htt_rx.c spin_unlock_bh(&htt->rx_ring.lock); rx_ring 2941 drivers/net/wireless/ath/ath10k/htt_rx.c lockdep_assert_held(&htt->rx_ring.lock); rx_ring 3973 drivers/net/wireless/ath/ath10k/htt_rx.c spin_lock_bh(&htt->rx_ring.lock); rx_ring 3975 drivers/net/wireless/ath/ath10k/htt_rx.c spin_unlock_bh(&htt->rx_ring.lock); rx_ring 751 drivers/net/wireless/ath/ath10k/htt_tx.c static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring) rx_ring 754 drivers/net/wireless/ath/ath10k/htt_tx.c (struct htt_rx_ring_setup_ring32 *)rx_ring; rx_ring 770 drivers/net/wireless/ath/ath10k/htt_tx.c static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring) rx_ring 773 drivers/net/wireless/ath/ath10k/htt_tx.c (struct htt_rx_ring_setup_ring64 *)rx_ring; rx_ring 841 drivers/net/wireless/ath/ath10k/htt_tx.c fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); rx_ring 844 drivers/net/wireless/ath/ath10k/htt_tx.c __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); rx_ring 845 drivers/net/wireless/ath/ath10k/htt_tx.c ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); rx_ring 846 drivers/net/wireless/ath/ath10k/htt_tx.c ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); rx_ring 911 drivers/net/wireless/ath/ath10k/htt_tx.c fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); rx_ring 913 drivers/net/wireless/ath/ath10k/htt_tx.c ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr); rx_ring 914 drivers/net/wireless/ath/ath10k/htt_tx.c ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr); rx_ring 915 drivers/net/wireless/ath/ath10k/htt_tx.c ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); rx_ring 666 drivers/net/wireless/broadcom/b43/b43.h struct b43_dmaring *rx_ring; rx_ring 1025 drivers/net/wireless/broadcom/b43/dma.c destroy_ring(dma, rx_ring); rx_ring 1109 drivers/net/wireless/broadcom/b43/dma.c dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); rx_ring 1110 drivers/net/wireless/broadcom/b43/dma.c if (!dma->rx_ring) rx_ring 2025 drivers/net/wireless/broadcom/b43/main.c b43_dma_handle_rx_overflow(dev->dma.rx_ring); rx_ring 2031 drivers/net/wireless/broadcom/b43/main.c b43_dma_rx(dev->dma.rx_ring); rx_ring 221 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c void *entry = priv->rx_ring + priv->rx_idx * priv->rx_ring_sz; rx_ring 1007 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c priv->rx_ring = pci_zalloc_consistent(priv->pdev, priv->rx_ring_sz * 32, rx_ring 1009 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { rx_ring 1019 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c entry = priv->rx_ring + priv->rx_ring_sz*i; rx_ring 1022 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c priv->rx_ring, priv->rx_ring_dma); rx_ring 1034 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c priv->rx_ring, priv->rx_ring_dma); rx_ring 1064 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c priv->rx_ring, priv->rx_ring_dma); rx_ring 1065 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c priv->rx_ring = NULL; rx_ring 113 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h void *rx_ring; rx_ring 654 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; rx_ring 747 drivers/net/wireless/realtek/rtlwifi/pci.c struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ rx_ring 748 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx]; rx_ring 758 drivers/net/wireless/realtek/rtlwifi/pci.c buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[ rx_ring 759 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx]; rx_ring 762 drivers/net/wireless/realtek/rtlwifi/pci.c pdesc = &rtlpci->rx_ring[rxring_idx].desc[ rx_ring 763 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx]; rx_ring 867 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[hw_queue].next_rx_rp += 1; rx_ring 868 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[hw_queue].next_rx_rp %= rx_ring 873 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[hw_queue].next_rx_rp); rx_ring 884 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx); rx_ring 888 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx); rx_ring 889 drivers/net/wireless/realtek/rtlwifi/pci.c if (rtlpci->rx_ring[rxring_idx].idx == rx_ring 896 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx = rx_ring 897 drivers/net/wireless/realtek/rtlwifi/pci.c (rtlpci->rx_ring[rxring_idx].idx + 1) % rx_ring 1282 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].buffer_desc = rx_ring 1284 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*rtlpci->rx_ring[rxring_idx]. rx_ring 1287 drivers/net/wireless/realtek/rtlwifi/pci.c &rtlpci->rx_ring[rxring_idx].dma); rx_ring 1288 drivers/net/wireless/realtek/rtlwifi/pci.c if (!rtlpci->rx_ring[rxring_idx].buffer_desc || rx_ring 1289 drivers/net/wireless/realtek/rtlwifi/pci.c (ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) { rx_ring 1295 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx = 0; rx_ring 1297 drivers/net/wireless/realtek/rtlwifi/pci.c entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; rx_ring 1306 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].desc = rx_ring 1308 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*rtlpci->rx_ring[rxring_idx]. rx_ring 1310 drivers/net/wireless/realtek/rtlwifi/pci.c &rtlpci->rx_ring[rxring_idx].dma); rx_ring 1311 drivers/net/wireless/realtek/rtlwifi/pci.c if (!rtlpci->rx_ring[rxring_idx].desc || rx_ring 1312 drivers/net/wireless/realtek/rtlwifi/pci.c (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) { rx_ring 1318 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx = 0; rx_ring 1321 drivers/net/wireless/realtek/rtlwifi/pci.c entry = &rtlpci->rx_ring[rxring_idx].desc[i]; rx_ring 1380 drivers/net/wireless/realtek/rtlwifi/pci.c struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i]; rx_ring 1392 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*rtlpci->rx_ring[rxring_idx]. rx_ring 1394 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].buffer_desc, rx_ring 1395 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].dma); rx_ring 1396 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].buffer_desc = NULL; rx_ring 1399 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*rtlpci->rx_ring[rxring_idx].desc) * rx_ring 1401 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].desc, rx_ring 1402 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].dma); rx_ring 1403 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].desc = NULL; rx_ring 1473 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].desc) { rx_ring 1476 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx = 0; rx_ring 1478 drivers/net/wireless/realtek/rtlwifi/pci.c entry = &rtlpci->rx_ring[rxring_idx].desc[i]; rx_ring 1483 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*rtlpci->rx_ring rx_ring 1508 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->rx_ring[rxring_idx].idx = 0; rx_ring 184 drivers/net/wireless/realtek/rtlwifi/pci.h struct rtl8192_rx_ring rx_ring[RTL_PCI_MAX_RX_QUEUE]; rx_ring 908 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma & rx_ring 754 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma & rx_ring 743 drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c rtlpci->rx_ring[RX_MPDU_QUEUE].dma); rx_ring 842 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c (u64)rtlpci->rx_ring[RX_MPDU_QUEUE].dma >> 32); rx_ring 874 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c (u64)rtlpci->rx_ring[RX_MPDU_QUEUE].dma & rx_ring 470 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c if (write_point != rtlpci->rx_ring[queue_index].next_rx_rp) { rx_ring 490 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c rtlpci->rx_ring[queue_index].next_rx_rp = write_point; rx_ring 695 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c rtl_write_dword(rtlpriv, RDQDA, rtlpci->rx_ring[RX_MPDU_QUEUE].dma); rx_ring 696 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c rtl_write_dword(rtlpriv, RCDA, rtlpci->rx_ring[RX_CMD_QUEUE].dma); rx_ring 754 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma & rx_ring 901 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma & rx_ring 991 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32)); rx_ring 1441 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32)); rx_ring 128 drivers/net/wireless/realtek/rtw88/pci.c struct rtw_pci_rx_ring *rx_ring) rx_ring 136 drivers/net/wireless/realtek/rtw88/pci.c for (i = 0; i < rx_ring->r.len; i++) { rx_ring 137 drivers/net/wireless/realtek/rtw88/pci.c skb = rx_ring->buf[i]; rx_ring 144 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->buf[i] = NULL; rx_ring 149 drivers/net/wireless/realtek/rtw88/pci.c struct rtw_pci_rx_ring *rx_ring) rx_ring 152 drivers/net/wireless/realtek/rtw88/pci.c u8 *head = rx_ring->r.head; rx_ring 153 drivers/net/wireless/realtek/rtw88/pci.c int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; rx_ring 155 drivers/net/wireless/realtek/rtw88/pci.c rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); rx_ring 157 drivers/net/wireless/realtek/rtw88/pci.c pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); rx_ring 164 drivers/net/wireless/realtek/rtw88/pci.c struct rtw_pci_rx_ring *rx_ring; rx_ring 173 drivers/net/wireless/realtek/rtw88/pci.c rx_ring = &rtwpci->rx_rings[i]; rx_ring 174 drivers/net/wireless/realtek/rtw88/pci.c rtw_pci_free_rx_ring(rtwdev, rx_ring); rx_ring 205 drivers/net/wireless/realtek/rtw88/pci.c struct rtw_pci_rx_ring *rx_ring, rx_ring 221 drivers/net/wireless/realtek/rtw88/pci.c buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + rx_ring 231 drivers/net/wireless/realtek/rtw88/pci.c struct rtw_pci_rx_ring *rx_ring, rx_ring 240 drivers/net/wireless/realtek/rtw88/pci.c buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + rx_ring 248 drivers/net/wireless/realtek/rtw88/pci.c struct rtw_pci_rx_ring *rx_ring, rx_ring 265 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->r.head = head; rx_ring 276 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->buf[i] = skb; rx_ring 277 drivers/net/wireless/realtek/rtw88/pci.c ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size); rx_ring 285 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->r.dma = dma; rx_ring 286 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->r.len = len; rx_ring 287 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->r.desc_size = desc_size; rx_ring 288 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->r.wp = 0; rx_ring 289 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->r.rp = 0; rx_ring 295 drivers/net/wireless/realtek/rtw88/pci.c skb = rx_ring->buf[i]; rx_ring 301 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->buf[i] = NULL; rx_ring 314 drivers/net/wireless/realtek/rtw88/pci.c struct rtw_pci_rx_ring *rx_ring; rx_ring 334 drivers/net/wireless/realtek/rtw88/pci.c rx_ring = &rtwpci->rx_rings[j]; rx_ring 335 drivers/net/wireless/realtek/rtw88/pci.c ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size, rx_ring 352 drivers/net/wireless/realtek/rtw88/pci.c rx_ring = &rtwpci->rx_rings[j]; rx_ring 353 drivers/net/wireless/realtek/rtw88/pci.c rtw_pci_free_rx_ring(rtwdev, rx_ring); rx_ring 583 drivers/net/wireless/realtek/rtw88/pci.c struct rtw_pci_rx_ring *rx_ring, rx_ring 592 drivers/net/wireless/realtek/rtw88/pci.c buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + rx_ring 30 drivers/net/xen-netback/xenbus.c struct xen_netif_rx_back_ring *rx_ring = &queue->rx; rx_ring 61 drivers/net/xen-netback/xenbus.c if (rx_ring->sring) { rx_ring 62 drivers/net/xen-netback/xenbus.c struct xen_netif_rx_sring *sring = rx_ring->sring; rx_ring 64 drivers/net/xen-netback/xenbus.c seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents); rx_ring 68 drivers/net/xen-netback/xenbus.c rx_ring->req_cons, rx_ring 69 drivers/net/xen-netback/xenbus.c rx_ring->req_cons - sring->rsp_prod, rx_ring 74 drivers/net/xen-netback/xenbus.c rx_ring->rsp_prod_pvt, rx_ring 75 drivers/net/xen-netback/xenbus.c rx_ring->rsp_prod_pvt - sring->rsp_prod, rx_ring 193 drivers/rapidio/rio_cm.c struct chan_rx_ring rx_ring; rx_ring 555 drivers/rapidio/rio_cm.c if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) { rx_ring 564 drivers/rapidio/rio_cm.c ch->rx_ring.buf[ch->rx_ring.head] = buf; rx_ring 565 drivers/rapidio/rio_cm.c ch->rx_ring.head++; rx_ring 566 drivers/rapidio/rio_cm.c ch->rx_ring.count++; rx_ring 567 drivers/rapidio/rio_cm.c ch->rx_ring.head %= RIOCM_RX_RING_SIZE; rx_ring 838 drivers/rapidio/rio_cm.c if (ch->rx_ring.inuse[i] == buf) { rx_ring 839 drivers/rapidio/rio_cm.c ch->rx_ring.inuse[i] = NULL; rx_ring 840 drivers/rapidio/rio_cm.c ch->rx_ring.inuse_cnt--; rx_ring 877 drivers/rapidio/rio_cm.c if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) { rx_ring 901 drivers/rapidio/rio_cm.c rxmsg = ch->rx_ring.buf[ch->rx_ring.tail]; rx_ring 902 drivers/rapidio/rio_cm.c ch->rx_ring.buf[ch->rx_ring.tail] = NULL; rx_ring 903 drivers/rapidio/rio_cm.c ch->rx_ring.count--; rx_ring 904 drivers/rapidio/rio_cm.c ch->rx_ring.tail++; rx_ring 905 drivers/rapidio/rio_cm.c ch->rx_ring.tail %= RIOCM_RX_RING_SIZE; rx_ring 909 drivers/rapidio/rio_cm.c if (ch->rx_ring.inuse[i] == NULL) { rx_ring 910 drivers/rapidio/rio_cm.c ch->rx_ring.inuse[i] = rxmsg; rx_ring 911 drivers/rapidio/rio_cm.c ch->rx_ring.inuse_cnt++; rx_ring 1320 drivers/rapidio/rio_cm.c ch->rx_ring.head = 0; rx_ring 1321 drivers/rapidio/rio_cm.c ch->rx_ring.tail = 0; rx_ring 1322 drivers/rapidio/rio_cm.c ch->rx_ring.count = 0; rx_ring 1323 drivers/rapidio/rio_cm.c ch->rx_ring.inuse_cnt = 0; rx_ring 1370 drivers/rapidio/rio_cm.c if (ch->rx_ring.inuse_cnt) { rx_ring 1372 drivers/rapidio/rio_cm.c i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) { rx_ring 1373 drivers/rapidio/rio_cm.c if (ch->rx_ring.inuse[i] != NULL) { rx_ring 1374 drivers/rapidio/rio_cm.c kfree(ch->rx_ring.inuse[i]); rx_ring 1375 drivers/rapidio/rio_cm.c ch->rx_ring.inuse_cnt--; rx_ring 1380 drivers/rapidio/rio_cm.c if (ch->rx_ring.count) rx_ring 1381 drivers/rapidio/rio_cm.c for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) { rx_ring 1382 drivers/rapidio/rio_cm.c if (ch->rx_ring.buf[i] != NULL) { rx_ring 1383 drivers/rapidio/rio_cm.c kfree(ch->rx_ring.buf[i]); rx_ring 1384 drivers/rapidio/rio_cm.c ch->rx_ring.count--; rx_ring 115 drivers/scsi/ibmvscsi_tgt/libsrp.c target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size); rx_ring 116 drivers/scsi/ibmvscsi_tgt/libsrp.c if (!target->rx_ring) rx_ring 118 drivers/scsi/ibmvscsi_tgt/libsrp.c err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring); rx_ring 126 drivers/scsi/ibmvscsi_tgt/libsrp.c srp_ring_free(target->dev, target->rx_ring, nr, iu_size); rx_ring 133 drivers/scsi/ibmvscsi_tgt/libsrp.c srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size, rx_ring 92 drivers/scsi/ibmvscsi_tgt/libsrp.h struct srp_buf **rx_ring; rx_ring 150 drivers/staging/mt7621-dma/mtk-hsdma.c struct hsdma_desc *rx_ring; rx_ring 244 drivers/staging/mt7621-dma/mtk-hsdma.c rx_desc = &chan->rx_ring[i]; rx_ring 270 drivers/staging/mt7621-dma/mtk-hsdma.c chan->rx_ring[i].addr0 = 0; rx_ring 271 drivers/staging/mt7621-dma/mtk-hsdma.c chan->rx_ring[i].flags = 0; rx_ring 363 drivers/staging/mt7621-dma/mtk-hsdma.c rx_desc = &chan->rx_ring[rx_idx]; rx_ring 556 drivers/staging/mt7621-dma/mtk-hsdma.c chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM]; rx_ring 575 drivers/staging/mt7621-dma/mtk-hsdma.c chan->rx_ring = NULL; rx_ring 2115 drivers/staging/qlge/qlge.h struct rx_ring rx_ring[MAX_RX_RINGS]; rx_ring 2269 drivers/staging/qlge/qlge.h int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); rx_ring 2310 drivers/staging/qlge/qlge.h void ql_dump_rx_ring(struct rx_ring *rx_ring); rx_ring 2316 drivers/staging/qlge/qlge.h #define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) rx_ring 2324 drivers/staging/qlge/qlge.h #define QL_DUMP_RX_RING(rx_ring) rx_ring 1628 drivers/staging/qlge/qlge_dbg.c DUMP_QDEV_FIELD(qdev, "%p", rx_ring); rx_ring 1733 drivers/staging/qlge/qlge_dbg.c void ql_dump_rx_ring(struct rx_ring *rx_ring) rx_ring 1735 drivers/staging/qlge/qlge_dbg.c if (rx_ring == NULL) rx_ring 1738 drivers/staging/qlge/qlge_dbg.c rx_ring->cq_id); rx_ring 1740 drivers/staging/qlge/qlge_dbg.c rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", rx_ring 1741 drivers/staging/qlge/qlge_dbg.c rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", rx_ring 1742 drivers/staging/qlge/qlge_dbg.c rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); rx_ring 1743 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); rx_ring 1744 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); rx_ring 1746 drivers/staging/qlge/qlge_dbg.c (unsigned long long) rx_ring->cq_base_dma); rx_ring 1747 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); rx_ring 1748 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); rx_ring 1750 drivers/staging/qlge/qlge_dbg.c rx_ring->prod_idx_sh_reg, rx_ring 1751 drivers/staging/qlge/qlge_dbg.c rx_ring->prod_idx_sh_reg rx_ring 1752 drivers/staging/qlge/qlge_dbg.c ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); rx_ring 1754 drivers/staging/qlge/qlge_dbg.c (unsigned long long) rx_ring->prod_idx_sh_reg_dma); rx_ring 1756 drivers/staging/qlge/qlge_dbg.c rx_ring->cnsmr_idx_db_reg); rx_ring 1757 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); rx_ring 1758 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); rx_ring 1759 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); rx_ring 1761 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); rx_ring 1763 drivers/staging/qlge/qlge_dbg.c (unsigned long long) rx_ring->lbq_base_dma); rx_ring 1765 drivers/staging/qlge/qlge_dbg.c rx_ring->lbq_base_indirect); rx_ring 1767 drivers/staging/qlge/qlge_dbg.c (unsigned long long) rx_ring->lbq_base_indirect_dma); rx_ring 1768 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); rx_ring 1769 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); rx_ring 1770 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); rx_ring 1772 drivers/staging/qlge/qlge_dbg.c rx_ring->lbq_prod_idx_db_reg); rx_ring 1773 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); rx_ring 1774 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); rx_ring 1775 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); rx_ring 1776 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); rx_ring 1777 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); rx_ring 1779 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); rx_ring 1781 drivers/staging/qlge/qlge_dbg.c (unsigned long long) rx_ring->sbq_base_dma); rx_ring 1783 drivers/staging/qlge/qlge_dbg.c rx_ring->sbq_base_indirect); rx_ring 1785 drivers/staging/qlge/qlge_dbg.c (unsigned long long) rx_ring->sbq_base_indirect_dma); rx_ring 1786 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); rx_ring 1787 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); rx_ring 1788 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); rx_ring 1790 drivers/staging/qlge/qlge_dbg.c rx_ring->sbq_prod_idx_db_reg); rx_ring 1791 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); rx_ring 1792 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); rx_ring 1793 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); rx_ring 1794 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); rx_ring 1795 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); rx_ring 1796 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); rx_ring 1797 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->irq = %d\n", rx_ring->irq); rx_ring 1798 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); rx_ring 1799 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); rx_ring 2020 drivers/staging/qlge/qlge_dbg.c QL_DUMP_RX_RING(&qdev->rx_ring[i]); rx_ring 2021 drivers/staging/qlge/qlge_dbg.c QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); rx_ring 189 drivers/staging/qlge/qlge_ethtool.c struct rx_ring *rx_ring; rx_ring 198 drivers/staging/qlge/qlge_ethtool.c cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; rx_ring 203 drivers/staging/qlge/qlge_ethtool.c rx_ring = &qdev->rx_ring[i]; rx_ring 204 drivers/staging/qlge/qlge_ethtool.c cqicb = (struct cqicb *)rx_ring; rx_ring 210 drivers/staging/qlge/qlge_ethtool.c CFG_LCQ, rx_ring->cq_id); rx_ring 220 drivers/staging/qlge/qlge_ethtool.c cqicb = (struct cqicb *)&qdev->rx_ring[0]; rx_ring 224 drivers/staging/qlge/qlge_ethtool.c for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { rx_ring 225 drivers/staging/qlge/qlge_ethtool.c rx_ring = &qdev->rx_ring[i]; rx_ring 226 drivers/staging/qlge/qlge_ethtool.c cqicb = (struct cqicb *)rx_ring; rx_ring 232 drivers/staging/qlge/qlge_ethtool.c CFG_LCQ, rx_ring->cq_id); rx_ring 553 drivers/staging/qlge/qlge_ethtool.c ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); rx_ring 1031 drivers/staging/qlge/qlge_main.c static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) rx_ring 1033 drivers/staging/qlge/qlge_main.c struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; rx_ring 1034 drivers/staging/qlge/qlge_main.c rx_ring->lbq_curr_idx++; rx_ring 1035 drivers/staging/qlge/qlge_main.c if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) rx_ring 1036 drivers/staging/qlge/qlge_main.c rx_ring->lbq_curr_idx = 0; rx_ring 1037 drivers/staging/qlge/qlge_main.c rx_ring->lbq_free_cnt++; rx_ring 1042 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring) rx_ring 1044 drivers/staging/qlge/qlge_main.c struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); rx_ring 1048 drivers/staging/qlge/qlge_main.c rx_ring->lbq_buf_size, rx_ring 1054 drivers/staging/qlge/qlge_main.c if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) rx_ring 1064 drivers/staging/qlge/qlge_main.c static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) rx_ring 1066 drivers/staging/qlge/qlge_main.c struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; rx_ring 1067 drivers/staging/qlge/qlge_main.c rx_ring->sbq_curr_idx++; rx_ring 1068 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) rx_ring 1069 drivers/staging/qlge/qlge_main.c rx_ring->sbq_curr_idx = 0; rx_ring 1070 drivers/staging/qlge/qlge_main.c rx_ring->sbq_free_cnt++; rx_ring 1075 drivers/staging/qlge/qlge_main.c static void ql_update_cq(struct rx_ring *rx_ring) rx_ring 1077 drivers/staging/qlge/qlge_main.c rx_ring->cnsmr_idx++; rx_ring 1078 drivers/staging/qlge/qlge_main.c rx_ring->curr_entry++; rx_ring 1079 drivers/staging/qlge/qlge_main.c if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { rx_ring 1080 drivers/staging/qlge/qlge_main.c rx_ring->cnsmr_idx = 0; rx_ring 1081 drivers/staging/qlge/qlge_main.c rx_ring->curr_entry = rx_ring->cq_base; rx_ring 1085 drivers/staging/qlge/qlge_main.c static void ql_write_cq_idx(struct rx_ring *rx_ring) rx_ring 1087 drivers/staging/qlge/qlge_main.c ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); rx_ring 1090 drivers/staging/qlge/qlge_main.c static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, rx_ring 1093 drivers/staging/qlge/qlge_main.c if (!rx_ring->pg_chunk.page) { rx_ring 1095 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC, rx_ring 1097 drivers/staging/qlge/qlge_main.c if (unlikely(!rx_ring->pg_chunk.page)) { rx_ring 1102 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.offset = 0; rx_ring 1103 drivers/staging/qlge/qlge_main.c map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, rx_ring 1107 drivers/staging/qlge/qlge_main.c __free_pages(rx_ring->pg_chunk.page, rx_ring 1109 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.page = NULL; rx_ring 1114 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.map = map; rx_ring 1115 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); rx_ring 1121 drivers/staging/qlge/qlge_main.c lbq_desc->p.pg_chunk = rx_ring->pg_chunk; rx_ring 1126 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; rx_ring 1127 drivers/staging/qlge/qlge_main.c if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { rx_ring 1128 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.page = NULL; rx_ring 1131 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; rx_ring 1132 drivers/staging/qlge/qlge_main.c get_page(rx_ring->pg_chunk.page); rx_ring 1138 drivers/staging/qlge/qlge_main.c static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring 1140 drivers/staging/qlge/qlge_main.c u32 clean_idx = rx_ring->lbq_clean_idx; rx_ring 1146 drivers/staging/qlge/qlge_main.c while (rx_ring->lbq_free_cnt > 32) { rx_ring 1147 drivers/staging/qlge/qlge_main.c for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) { rx_ring 1151 drivers/staging/qlge/qlge_main.c lbq_desc = &rx_ring->lbq[clean_idx]; rx_ring 1152 drivers/staging/qlge/qlge_main.c if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { rx_ring 1153 drivers/staging/qlge/qlge_main.c rx_ring->lbq_clean_idx = clean_idx; rx_ring 1164 drivers/staging/qlge/qlge_main.c rx_ring->lbq_buf_size); rx_ring 1168 drivers/staging/qlge/qlge_main.c rx_ring->lbq_buf_size, rx_ring 1171 drivers/staging/qlge/qlge_main.c if (clean_idx == rx_ring->lbq_len) rx_ring 1175 drivers/staging/qlge/qlge_main.c rx_ring->lbq_clean_idx = clean_idx; rx_ring 1176 drivers/staging/qlge/qlge_main.c rx_ring->lbq_prod_idx += 16; rx_ring 1177 drivers/staging/qlge/qlge_main.c if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) rx_ring 1178 drivers/staging/qlge/qlge_main.c rx_ring->lbq_prod_idx = 0; rx_ring 1179 drivers/staging/qlge/qlge_main.c rx_ring->lbq_free_cnt -= 16; rx_ring 1185 drivers/staging/qlge/qlge_main.c rx_ring->lbq_prod_idx); rx_ring 1186 drivers/staging/qlge/qlge_main.c ql_write_db_reg(rx_ring->lbq_prod_idx, rx_ring 1187 drivers/staging/qlge/qlge_main.c rx_ring->lbq_prod_idx_db_reg); rx_ring 1192 drivers/staging/qlge/qlge_main.c static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring 1194 drivers/staging/qlge/qlge_main.c u32 clean_idx = rx_ring->sbq_clean_idx; rx_ring 1200 drivers/staging/qlge/qlge_main.c while (rx_ring->sbq_free_cnt > 16) { rx_ring 1201 drivers/staging/qlge/qlge_main.c for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) { rx_ring 1202 drivers/staging/qlge/qlge_main.c sbq_desc = &rx_ring->sbq[clean_idx]; rx_ring 1215 drivers/staging/qlge/qlge_main.c rx_ring->sbq_clean_idx = clean_idx; rx_ring 1221 drivers/staging/qlge/qlge_main.c rx_ring->sbq_buf_size, rx_ring 1226 drivers/staging/qlge/qlge_main.c rx_ring->sbq_clean_idx = clean_idx; rx_ring 1233 drivers/staging/qlge/qlge_main.c rx_ring->sbq_buf_size); rx_ring 1238 drivers/staging/qlge/qlge_main.c if (clean_idx == rx_ring->sbq_len) rx_ring 1241 drivers/staging/qlge/qlge_main.c rx_ring->sbq_clean_idx = clean_idx; rx_ring 1242 drivers/staging/qlge/qlge_main.c rx_ring->sbq_prod_idx += 16; rx_ring 1243 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) rx_ring 1244 drivers/staging/qlge/qlge_main.c rx_ring->sbq_prod_idx = 0; rx_ring 1245 drivers/staging/qlge/qlge_main.c rx_ring->sbq_free_cnt -= 16; rx_ring 1251 drivers/staging/qlge/qlge_main.c rx_ring->sbq_prod_idx); rx_ring 1252 drivers/staging/qlge/qlge_main.c ql_write_db_reg(rx_ring->sbq_prod_idx, rx_ring 1253 drivers/staging/qlge/qlge_main.c rx_ring->sbq_prod_idx_db_reg); rx_ring 1258 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring) rx_ring 1260 drivers/staging/qlge/qlge_main.c ql_update_sbq(qdev, rx_ring); rx_ring 1261 drivers/staging/qlge/qlge_main.c ql_update_lbq(qdev, rx_ring); rx_ring 1437 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring) rx_ring 1442 drivers/staging/qlge/qlge_main.c rx_ring->rx_errors++; rx_ring 1492 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring, rx_ring 1498 drivers/staging/qlge/qlge_main.c struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); rx_ring 1499 drivers/staging/qlge/qlge_main.c struct napi_struct *napi = &rx_ring->napi; rx_ring 1503 drivers/staging/qlge/qlge_main.c ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); rx_ring 1513 drivers/staging/qlge/qlge_main.c rx_ring->rx_dropped++; rx_ring 1528 drivers/staging/qlge/qlge_main.c rx_ring->rx_packets++; rx_ring 1529 drivers/staging/qlge/qlge_main.c rx_ring->rx_bytes += length; rx_ring 1531 drivers/staging/qlge/qlge_main.c skb_record_rx_queue(skb, rx_ring->cq_id); rx_ring 1539 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring, rx_ring 1547 drivers/staging/qlge/qlge_main.c struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); rx_ring 1548 drivers/staging/qlge/qlge_main.c struct napi_struct *napi = &rx_ring->napi; rx_ring 1553 drivers/staging/qlge/qlge_main.c rx_ring->rx_dropped++; rx_ring 1563 drivers/staging/qlge/qlge_main.c ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); rx_ring 1576 drivers/staging/qlge/qlge_main.c rx_ring->rx_dropped++; rx_ring 1590 drivers/staging/qlge/qlge_main.c rx_ring->rx_packets++; rx_ring 1591 drivers/staging/qlge/qlge_main.c rx_ring->rx_bytes += skb->len; rx_ring 1617 drivers/staging/qlge/qlge_main.c skb_record_rx_queue(skb, rx_ring->cq_id); rx_ring 1632 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring, rx_ring 1640 drivers/staging/qlge/qlge_main.c struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); rx_ring 1646 drivers/staging/qlge/qlge_main.c rx_ring->rx_dropped++; rx_ring 1666 drivers/staging/qlge/qlge_main.c ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); rx_ring 1683 drivers/staging/qlge/qlge_main.c rx_ring->rx_dropped++; rx_ring 1702 drivers/staging/qlge/qlge_main.c rx_ring->rx_packets++; rx_ring 1703 drivers/staging/qlge/qlge_main.c rx_ring->rx_bytes += skb->len; rx_ring 1731 drivers/staging/qlge/qlge_main.c skb_record_rx_queue(skb, rx_ring->cq_id); rx_ring 1735 drivers/staging/qlge/qlge_main.c napi_gro_receive(&rx_ring->napi, skb); rx_ring 1759 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring, rx_ring 1779 drivers/staging/qlge/qlge_main.c sbq_desc = ql_get_curr_sbuf(rx_ring); rx_ring 1811 drivers/staging/qlge/qlge_main.c sbq_desc = ql_get_curr_sbuf(rx_ring); rx_ring 1831 drivers/staging/qlge/qlge_main.c sbq_desc = ql_get_curr_sbuf(rx_ring); rx_ring 1853 drivers/staging/qlge/qlge_main.c lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); rx_ring 1869 drivers/staging/qlge/qlge_main.c lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); rx_ring 1910 drivers/staging/qlge/qlge_main.c sbq_desc = ql_get_curr_sbuf(rx_ring); rx_ring 1933 drivers/staging/qlge/qlge_main.c lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); rx_ring 1934 drivers/staging/qlge/qlge_main.c size = (length < rx_ring->lbq_buf_size) ? length : rx_ring 1935 drivers/staging/qlge/qlge_main.c rx_ring->lbq_buf_size; rx_ring 1959 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring, rx_ring 1968 drivers/staging/qlge/qlge_main.c skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); rx_ring 1972 drivers/staging/qlge/qlge_main.c rx_ring->rx_dropped++; rx_ring 1978 drivers/staging/qlge/qlge_main.c ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); rx_ring 1988 drivers/staging/qlge/qlge_main.c rx_ring->rx_dropped++; rx_ring 2008 drivers/staging/qlge/qlge_main.c rx_ring->rx_multicast++; rx_ring 2041 drivers/staging/qlge/qlge_main.c rx_ring->rx_packets++; rx_ring 2042 drivers/staging/qlge/qlge_main.c rx_ring->rx_bytes += skb->len; rx_ring 2043 drivers/staging/qlge/qlge_main.c skb_record_rx_queue(skb, rx_ring->cq_id); rx_ring 2047 drivers/staging/qlge/qlge_main.c napi_gro_receive(&rx_ring->napi, skb); rx_ring 2054 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring, rx_ring 2069 drivers/staging/qlge/qlge_main.c ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, rx_ring 2076 drivers/staging/qlge/qlge_main.c ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, rx_ring 2084 drivers/staging/qlge/qlge_main.c ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, rx_ring 2090 drivers/staging/qlge/qlge_main.c ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, rx_ring 2096 drivers/staging/qlge/qlge_main.c ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, rx_ring 2202 drivers/staging/qlge/qlge_main.c static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) rx_ring 2204 drivers/staging/qlge/qlge_main.c struct ql_adapter *qdev = rx_ring->qdev; rx_ring 2205 drivers/staging/qlge/qlge_main.c u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); rx_ring 2211 drivers/staging/qlge/qlge_main.c while (prod != rx_ring->cnsmr_idx) { rx_ring 2215 drivers/staging/qlge/qlge_main.c rx_ring->cq_id, prod, rx_ring->cnsmr_idx); rx_ring 2217 drivers/staging/qlge/qlge_main.c net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; rx_ring 2231 drivers/staging/qlge/qlge_main.c ql_update_cq(rx_ring); rx_ring 2232 drivers/staging/qlge/qlge_main.c prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); rx_ring 2236 drivers/staging/qlge/qlge_main.c ql_write_cq_idx(rx_ring); rx_ring 2250 drivers/staging/qlge/qlge_main.c static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) rx_ring 2252 drivers/staging/qlge/qlge_main.c struct ql_adapter *qdev = rx_ring->qdev; rx_ring 2253 drivers/staging/qlge/qlge_main.c u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); rx_ring 2258 drivers/staging/qlge/qlge_main.c while (prod != rx_ring->cnsmr_idx) { rx_ring 2262 drivers/staging/qlge/qlge_main.c rx_ring->cq_id, prod, rx_ring->cnsmr_idx); rx_ring 2264 drivers/staging/qlge/qlge_main.c net_rsp = rx_ring->curr_entry; rx_ring 2268 drivers/staging/qlge/qlge_main.c ql_process_mac_rx_intr(qdev, rx_ring, rx_ring 2284 drivers/staging/qlge/qlge_main.c ql_update_cq(rx_ring); rx_ring 2285 drivers/staging/qlge/qlge_main.c prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); rx_ring 2289 drivers/staging/qlge/qlge_main.c ql_update_buffer_queues(qdev, rx_ring); rx_ring 2290 drivers/staging/qlge/qlge_main.c ql_write_cq_idx(rx_ring); rx_ring 2296 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); rx_ring 2297 drivers/staging/qlge/qlge_main.c struct ql_adapter *qdev = rx_ring->qdev; rx_ring 2298 drivers/staging/qlge/qlge_main.c struct rx_ring *trx_ring; rx_ring 2300 drivers/staging/qlge/qlge_main.c struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; rx_ring 2303 drivers/staging/qlge/qlge_main.c "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); rx_ring 2308 drivers/staging/qlge/qlge_main.c trx_ring = &qdev->rx_ring[i]; rx_ring 2325 drivers/staging/qlge/qlge_main.c if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring 2326 drivers/staging/qlge/qlge_main.c rx_ring->cnsmr_idx) { rx_ring 2329 drivers/staging/qlge/qlge_main.c __func__, rx_ring->cq_id); rx_ring 2330 drivers/staging/qlge/qlge_main.c work_done = ql_clean_inbound_rx_ring(rx_ring, budget); rx_ring 2335 drivers/staging/qlge/qlge_main.c ql_enable_completion_interrupt(qdev, rx_ring->irq); rx_ring 2485 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring = dev_id; rx_ring 2486 drivers/staging/qlge/qlge_main.c napi_schedule(&rx_ring->napi); rx_ring 2497 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring = dev_id; rx_ring 2498 drivers/staging/qlge/qlge_main.c struct ql_adapter *qdev = rx_ring->qdev; rx_ring 2554 drivers/staging/qlge/qlge_main.c napi_schedule(&rx_ring->napi); rx_ring 2821 drivers/staging/qlge/qlge_main.c static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring 2827 drivers/staging/qlge/qlge_main.c curr_idx = rx_ring->lbq_curr_idx; rx_ring 2828 drivers/staging/qlge/qlge_main.c clean_idx = rx_ring->lbq_clean_idx; rx_ring 2830 drivers/staging/qlge/qlge_main.c lbq_desc = &rx_ring->lbq[curr_idx]; rx_ring 2843 drivers/staging/qlge/qlge_main.c if (++curr_idx == rx_ring->lbq_len) rx_ring 2847 drivers/staging/qlge/qlge_main.c if (rx_ring->pg_chunk.page) { rx_ring 2848 drivers/staging/qlge/qlge_main.c pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, rx_ring 2850 drivers/staging/qlge/qlge_main.c put_page(rx_ring->pg_chunk.page); rx_ring 2851 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.page = NULL; rx_ring 2855 drivers/staging/qlge/qlge_main.c static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring 2860 drivers/staging/qlge/qlge_main.c for (i = 0; i < rx_ring->sbq_len; i++) { rx_ring 2861 drivers/staging/qlge/qlge_main.c sbq_desc = &rx_ring->sbq[i]; rx_ring 2884 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring; rx_ring 2887 drivers/staging/qlge/qlge_main.c rx_ring = &qdev->rx_ring[i]; rx_ring 2888 drivers/staging/qlge/qlge_main.c if (rx_ring->lbq) rx_ring 2889 drivers/staging/qlge/qlge_main.c ql_free_lbq_buffers(qdev, rx_ring); rx_ring 2890 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq) rx_ring 2891 drivers/staging/qlge/qlge_main.c ql_free_sbq_buffers(qdev, rx_ring); rx_ring 2897 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring; rx_ring 2901 drivers/staging/qlge/qlge_main.c rx_ring = &qdev->rx_ring[i]; rx_ring 2902 drivers/staging/qlge/qlge_main.c if (rx_ring->type != TX_Q) rx_ring 2903 drivers/staging/qlge/qlge_main.c ql_update_buffer_queues(qdev, rx_ring); rx_ring 2908 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring) rx_ring 2912 drivers/staging/qlge/qlge_main.c __le64 *bq = rx_ring->lbq_base; rx_ring 2914 drivers/staging/qlge/qlge_main.c memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); rx_ring 2915 drivers/staging/qlge/qlge_main.c for (i = 0; i < rx_ring->lbq_len; i++) { rx_ring 2916 drivers/staging/qlge/qlge_main.c lbq_desc = &rx_ring->lbq[i]; rx_ring 2925 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring) rx_ring 2929 drivers/staging/qlge/qlge_main.c __le64 *bq = rx_ring->sbq_base; rx_ring 2931 drivers/staging/qlge/qlge_main.c memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); rx_ring 2932 drivers/staging/qlge/qlge_main.c for (i = 0; i < rx_ring->sbq_len; i++) { rx_ring 2933 drivers/staging/qlge/qlge_main.c sbq_desc = &rx_ring->sbq[i]; rx_ring 2942 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring) rx_ring 2945 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq_base) { rx_ring 2947 drivers/staging/qlge/qlge_main.c rx_ring->sbq_size, rx_ring 2948 drivers/staging/qlge/qlge_main.c rx_ring->sbq_base, rx_ring->sbq_base_dma); rx_ring 2949 drivers/staging/qlge/qlge_main.c rx_ring->sbq_base = NULL; rx_ring 2953 drivers/staging/qlge/qlge_main.c kfree(rx_ring->sbq); rx_ring 2954 drivers/staging/qlge/qlge_main.c rx_ring->sbq = NULL; rx_ring 2957 drivers/staging/qlge/qlge_main.c if (rx_ring->lbq_base) { rx_ring 2959 drivers/staging/qlge/qlge_main.c rx_ring->lbq_size, rx_ring 2960 drivers/staging/qlge/qlge_main.c rx_ring->lbq_base, rx_ring->lbq_base_dma); rx_ring 2961 drivers/staging/qlge/qlge_main.c rx_ring->lbq_base = NULL; rx_ring 2965 drivers/staging/qlge/qlge_main.c kfree(rx_ring->lbq); rx_ring 2966 drivers/staging/qlge/qlge_main.c rx_ring->lbq = NULL; rx_ring 2969 drivers/staging/qlge/qlge_main.c if (rx_ring->cq_base) { rx_ring 2971 drivers/staging/qlge/qlge_main.c rx_ring->cq_size, rx_ring 2972 drivers/staging/qlge/qlge_main.c rx_ring->cq_base, rx_ring->cq_base_dma); rx_ring 2973 drivers/staging/qlge/qlge_main.c rx_ring->cq_base = NULL; rx_ring 2980 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring) rx_ring 2986 drivers/staging/qlge/qlge_main.c rx_ring->cq_base = rx_ring 2987 drivers/staging/qlge/qlge_main.c pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, rx_ring 2988 drivers/staging/qlge/qlge_main.c &rx_ring->cq_base_dma); rx_ring 2990 drivers/staging/qlge/qlge_main.c if (rx_ring->cq_base == NULL) { rx_ring 2995 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq_len) { rx_ring 2999 drivers/staging/qlge/qlge_main.c rx_ring->sbq_base = rx_ring 3000 drivers/staging/qlge/qlge_main.c pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, rx_ring 3001 drivers/staging/qlge/qlge_main.c &rx_ring->sbq_base_dma); rx_ring 3003 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq_base == NULL) { rx_ring 3012 drivers/staging/qlge/qlge_main.c rx_ring->sbq = kmalloc_array(rx_ring->sbq_len, rx_ring 3015 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq == NULL) rx_ring 3018 drivers/staging/qlge/qlge_main.c ql_init_sbq_ring(qdev, rx_ring); rx_ring 3021 drivers/staging/qlge/qlge_main.c if (rx_ring->lbq_len) { rx_ring 3025 drivers/staging/qlge/qlge_main.c rx_ring->lbq_base = rx_ring 3026 drivers/staging/qlge/qlge_main.c pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, rx_ring 3027 drivers/staging/qlge/qlge_main.c &rx_ring->lbq_base_dma); rx_ring 3029 drivers/staging/qlge/qlge_main.c if (rx_ring->lbq_base == NULL) { rx_ring 3037 drivers/staging/qlge/qlge_main.c rx_ring->lbq = kmalloc_array(rx_ring->lbq_len, rx_ring 3040 drivers/staging/qlge/qlge_main.c if (rx_ring->lbq == NULL) rx_ring 3043 drivers/staging/qlge/qlge_main.c ql_init_lbq_ring(qdev, rx_ring); rx_ring 3049 drivers/staging/qlge/qlge_main.c ql_free_rx_resources(qdev, rx_ring); rx_ring 3088 drivers/staging/qlge/qlge_main.c ql_free_rx_resources(qdev, &qdev->rx_ring[i]); rx_ring 3101 drivers/staging/qlge/qlge_main.c if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { rx_ring 3126 drivers/staging/qlge/qlge_main.c static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring 3128 drivers/staging/qlge/qlge_main.c struct cqicb *cqicb = &rx_ring->cqicb; rx_ring 3130 drivers/staging/qlge/qlge_main.c (rx_ring->cq_id * RX_RING_SHADOW_SPACE); rx_ring 3132 drivers/staging/qlge/qlge_main.c (rx_ring->cq_id * RX_RING_SHADOW_SPACE); rx_ring 3134 drivers/staging/qlge/qlge_main.c qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); rx_ring 3142 drivers/staging/qlge/qlge_main.c rx_ring->prod_idx_sh_reg = shadow_reg; rx_ring 3143 drivers/staging/qlge/qlge_main.c rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; rx_ring 3144 drivers/staging/qlge/qlge_main.c *rx_ring->prod_idx_sh_reg = 0; rx_ring 3147 drivers/staging/qlge/qlge_main.c rx_ring->lbq_base_indirect = shadow_reg; rx_ring 3148 drivers/staging/qlge/qlge_main.c rx_ring->lbq_base_indirect_dma = shadow_reg_dma; rx_ring 3149 drivers/staging/qlge/qlge_main.c shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); rx_ring 3150 drivers/staging/qlge/qlge_main.c shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); rx_ring 3151 drivers/staging/qlge/qlge_main.c rx_ring->sbq_base_indirect = shadow_reg; rx_ring 3152 drivers/staging/qlge/qlge_main.c rx_ring->sbq_base_indirect_dma = shadow_reg_dma; rx_ring 3155 drivers/staging/qlge/qlge_main.c rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; rx_ring 3156 drivers/staging/qlge/qlge_main.c rx_ring->cnsmr_idx = 0; rx_ring 3157 drivers/staging/qlge/qlge_main.c rx_ring->curr_entry = rx_ring->cq_base; rx_ring 3160 drivers/staging/qlge/qlge_main.c rx_ring->valid_db_reg = doorbell_area + 0x04; rx_ring 3163 drivers/staging/qlge/qlge_main.c rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); rx_ring 3166 drivers/staging/qlge/qlge_main.c rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); rx_ring 3169 drivers/staging/qlge/qlge_main.c cqicb->msix_vect = rx_ring->irq; rx_ring 3171 drivers/staging/qlge/qlge_main.c bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; rx_ring 3174 drivers/staging/qlge/qlge_main.c cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); rx_ring 3176 drivers/staging/qlge/qlge_main.c cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); rx_ring 3184 drivers/staging/qlge/qlge_main.c if (rx_ring->lbq_len) { rx_ring 3186 drivers/staging/qlge/qlge_main.c tmp = (u64)rx_ring->lbq_base_dma; rx_ring 3187 drivers/staging/qlge/qlge_main.c base_indirect_ptr = rx_ring->lbq_base_indirect; rx_ring 3194 drivers/staging/qlge/qlge_main.c } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); rx_ring 3196 drivers/staging/qlge/qlge_main.c cpu_to_le64(rx_ring->lbq_base_indirect_dma); rx_ring 3197 drivers/staging/qlge/qlge_main.c bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : rx_ring 3198 drivers/staging/qlge/qlge_main.c (u16) rx_ring->lbq_buf_size; rx_ring 3200 drivers/staging/qlge/qlge_main.c bq_len = (rx_ring->lbq_len == 65536) ? 0 : rx_ring 3201 drivers/staging/qlge/qlge_main.c (u16) rx_ring->lbq_len; rx_ring 3203 drivers/staging/qlge/qlge_main.c rx_ring->lbq_prod_idx = 0; rx_ring 3204 drivers/staging/qlge/qlge_main.c rx_ring->lbq_curr_idx = 0; rx_ring 3205 drivers/staging/qlge/qlge_main.c rx_ring->lbq_clean_idx = 0; rx_ring 3206 drivers/staging/qlge/qlge_main.c rx_ring->lbq_free_cnt = rx_ring->lbq_len; rx_ring 3208 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq_len) { rx_ring 3210 drivers/staging/qlge/qlge_main.c tmp = (u64)rx_ring->sbq_base_dma; rx_ring 3211 drivers/staging/qlge/qlge_main.c base_indirect_ptr = rx_ring->sbq_base_indirect; rx_ring 3218 drivers/staging/qlge/qlge_main.c } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); rx_ring 3220 drivers/staging/qlge/qlge_main.c cpu_to_le64(rx_ring->sbq_base_indirect_dma); rx_ring 3222 drivers/staging/qlge/qlge_main.c cpu_to_le16((u16)(rx_ring->sbq_buf_size)); rx_ring 3223 drivers/staging/qlge/qlge_main.c bq_len = (rx_ring->sbq_len == 65536) ? 0 : rx_ring 3224 drivers/staging/qlge/qlge_main.c (u16) rx_ring->sbq_len; rx_ring 3226 drivers/staging/qlge/qlge_main.c rx_ring->sbq_prod_idx = 0; rx_ring 3227 drivers/staging/qlge/qlge_main.c rx_ring->sbq_curr_idx = 0; rx_ring 3228 drivers/staging/qlge/qlge_main.c rx_ring->sbq_clean_idx = 0; rx_ring 3229 drivers/staging/qlge/qlge_main.c rx_ring->sbq_free_cnt = rx_ring->sbq_len; rx_ring 3231 drivers/staging/qlge/qlge_main.c switch (rx_ring->type) { rx_ring 3240 drivers/staging/qlge/qlge_main.c netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, rx_ring 3247 drivers/staging/qlge/qlge_main.c "Invalid rx_ring->type = %d.\n", rx_ring->type); rx_ring 3250 drivers/staging/qlge/qlge_main.c CFG_LCQ, rx_ring->cq_id); rx_ring 3395 drivers/staging/qlge/qlge_main.c qdev->rx_ring[i].irq = vect; rx_ring 3403 drivers/staging/qlge/qlge_main.c qdev->rx_ring[i].irq = 0; rx_ring 3421 drivers/staging/qlge/qlge_main.c ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); rx_ring 3426 drivers/staging/qlge/qlge_main.c (1 << qdev->rx_ring[qdev->rss_ring_count + rx_ring 3434 drivers/staging/qlge/qlge_main.c ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); rx_ring 3455 drivers/staging/qlge/qlge_main.c qdev->rx_ring[i].irq = i; rx_ring 3541 drivers/staging/qlge/qlge_main.c &qdev->rx_ring[i]); rx_ring 3543 drivers/staging/qlge/qlge_main.c free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); rx_ring 3566 drivers/staging/qlge/qlge_main.c &qdev->rx_ring[i]); rx_ring 3583 drivers/staging/qlge/qlge_main.c &qdev->rx_ring[0]); rx_ring 3589 drivers/staging/qlge/qlge_main.c intr_context->name, &qdev->rx_ring[0]); rx_ring 3596 drivers/staging/qlge/qlge_main.c qdev->rx_ring[0].type == DEFAULT_Q ? rx_ring 3598 drivers/staging/qlge/qlge_main.c qdev->rx_ring[0].type == TX_Q ? "TX_Q" : rx_ring 3599 drivers/staging/qlge/qlge_main.c qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", rx_ring 3813 drivers/staging/qlge/qlge_main.c status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); rx_ring 3857 drivers/staging/qlge/qlge_main.c napi_enable(&qdev->rx_ring[i].napi); rx_ring 3997 drivers/staging/qlge/qlge_main.c napi_disable(&qdev->rx_ring[i].napi); rx_ring 4008 drivers/staging/qlge/qlge_main.c netif_napi_del(&qdev->rx_ring[i].napi); rx_ring 4100 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring; rx_ring 4139 drivers/staging/qlge/qlge_main.c rx_ring = &qdev->rx_ring[i]; rx_ring 4140 drivers/staging/qlge/qlge_main.c memset((void *)rx_ring, 0, sizeof(*rx_ring)); rx_ring 4141 drivers/staging/qlge/qlge_main.c rx_ring->qdev = qdev; rx_ring 4142 drivers/staging/qlge/qlge_main.c rx_ring->cq_id = i; rx_ring 4143 drivers/staging/qlge/qlge_main.c rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ rx_ring 4148 drivers/staging/qlge/qlge_main.c rx_ring->cq_len = qdev->rx_ring_size; rx_ring 4149 drivers/staging/qlge/qlge_main.c rx_ring->cq_size = rx_ring 4150 drivers/staging/qlge/qlge_main.c rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring 4151 drivers/staging/qlge/qlge_main.c rx_ring->lbq_len = NUM_LARGE_BUFFERS; rx_ring 4152 drivers/staging/qlge/qlge_main.c rx_ring->lbq_size = rx_ring 4153 drivers/staging/qlge/qlge_main.c rx_ring->lbq_len * sizeof(__le64); rx_ring 4154 drivers/staging/qlge/qlge_main.c rx_ring->lbq_buf_size = (u16)lbq_buf_len; rx_ring 4155 drivers/staging/qlge/qlge_main.c rx_ring->sbq_len = NUM_SMALL_BUFFERS; rx_ring 4156 drivers/staging/qlge/qlge_main.c rx_ring->sbq_size = rx_ring 4157 drivers/staging/qlge/qlge_main.c rx_ring->sbq_len * sizeof(__le64); rx_ring 4158 drivers/staging/qlge/qlge_main.c rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; rx_ring 4159 drivers/staging/qlge/qlge_main.c rx_ring->type = RX_Q; rx_ring 4165 drivers/staging/qlge/qlge_main.c rx_ring->cq_len = qdev->tx_ring_size; rx_ring 4166 drivers/staging/qlge/qlge_main.c rx_ring->cq_size = rx_ring 4167 drivers/staging/qlge/qlge_main.c rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring 4168 drivers/staging/qlge/qlge_main.c rx_ring->lbq_len = 0; rx_ring 4169 drivers/staging/qlge/qlge_main.c rx_ring->lbq_size = 0; rx_ring 4170 drivers/staging/qlge/qlge_main.c rx_ring->lbq_buf_size = 0; rx_ring 4171 drivers/staging/qlge/qlge_main.c rx_ring->sbq_len = 0; rx_ring 4172 drivers/staging/qlge/qlge_main.c rx_ring->sbq_size = 0; rx_ring 4173 drivers/staging/qlge/qlge_main.c rx_ring->sbq_buf_size = 0; rx_ring 4174 drivers/staging/qlge/qlge_main.c rx_ring->type = TX_Q; rx_ring 4210 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring; rx_ring 4241 drivers/staging/qlge/qlge_main.c rx_ring = &qdev->rx_ring[i]; rx_ring 4243 drivers/staging/qlge/qlge_main.c rx_ring->lbq_buf_size = lbq_buf_len; rx_ring 4293 drivers/staging/qlge/qlge_main.c struct rx_ring *rx_ring = &qdev->rx_ring[0]; rx_ring 4300 drivers/staging/qlge/qlge_main.c for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { rx_ring 4301 drivers/staging/qlge/qlge_main.c pkts += rx_ring->rx_packets; rx_ring 4302 drivers/staging/qlge/qlge_main.c bytes += rx_ring->rx_bytes; rx_ring 4303 drivers/staging/qlge/qlge_main.c dropped += rx_ring->rx_dropped; rx_ring 4304 drivers/staging/qlge/qlge_main.c errors += rx_ring->rx_errors; rx_ring 4305 drivers/staging/qlge/qlge_main.c mcast += rx_ring->rx_multicast; rx_ring 4820 drivers/staging/qlge/qlge_main.c int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) rx_ring 4822 drivers/staging/qlge/qlge_main.c return ql_clean_inbound_rx_ring(rx_ring, budget); rx_ring 4852 drivers/staging/qlge/qlge_main.c netif_napi_del(&qdev->rx_ring[i].napi); rx_ring 1569 drivers/staging/rtl8192e/rtl8192e/rtl_core.c sizeof(*priv->rx_ring[rx_queue_idx]) * rx_ring 1571 drivers/staging/rtl8192e/rtl8192e/rtl_core.c priv->rx_ring[rx_queue_idx], rx_ring 1573 drivers/staging/rtl8192e/rtl8192e/rtl_core.c priv->rx_ring[rx_queue_idx] = NULL; rx_ring 1786 drivers/staging/rtl8192e/rtl8192e/rtl_core.c priv->rx_ring[rx_queue_idx] = pci_zalloc_consistent(priv->pdev, rx_ring 1787 drivers/staging/rtl8192e/rtl8192e/rtl_core.c sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount, rx_ring 1789 drivers/staging/rtl8192e/rtl8192e/rtl_core.c if (!priv->rx_ring[rx_queue_idx] || rx_ring 1790 drivers/staging/rtl8192e/rtl8192e/rtl_core.c (unsigned long)priv->rx_ring[rx_queue_idx] & 0xFF) { rx_ring 1801 drivers/staging/rtl8192e/rtl8192e/rtl_core.c entry = &priv->rx_ring[rx_queue_idx][i]; rx_ring 1888 drivers/staging/rtl8192e/rtl8192e/rtl_core.c if (priv->rx_ring[rx_queue_idx]) { rx_ring 1892 drivers/staging/rtl8192e/rtl8192e/rtl_core.c entry = &priv->rx_ring[rx_queue_idx][i]; rx_ring 2021 drivers/staging/rtl8192e/rtl8192e/rtl_core.c struct rx_desc *pdesc = &priv->rx_ring[rx_queue_idx] rx_ring 374 drivers/staging/rtl8192e/rtl8192e/rtl_core.h struct rx_desc *rx_ring[MAX_RX_QUEUE]; rx_ring 148 drivers/tty/serial/atmel_serial.c struct circ_buf rx_ring; rx_ring 737 drivers/tty/serial/atmel_serial.c struct circ_buf *ring = &atmel_port->rx_ring; rx_ring 1106 drivers/tty/serial/atmel_serial.c struct circ_buf *ring = &atmel_port->rx_ring; rx_ring 1200 drivers/tty/serial/atmel_serial.c ring = &atmel_port->rx_ring; rx_ring 1521 drivers/tty/serial/atmel_serial.c struct circ_buf *ring = &atmel_port->rx_ring; rx_ring 2083 drivers/tty/serial/atmel_serial.c atmel_port->rx_ring.head = 0; rx_ring 2084 drivers/tty/serial/atmel_serial.c atmel_port->rx_ring.tail = 0; rx_ring 2505 drivers/tty/serial/atmel_serial.c memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); rx_ring 2904 drivers/tty/serial/atmel_serial.c atmel_port->rx_ring.buf = data; rx_ring 2954 drivers/tty/serial/atmel_serial.c kfree(atmel_port->rx_ring.buf); rx_ring 2955 drivers/tty/serial/atmel_serial.c atmel_port->rx_ring.buf = NULL; rx_ring 2989 drivers/tty/serial/atmel_serial.c kfree(atmel_port->rx_ring.buf); rx_ring 267 drivers/tty/serial/fsl_lpuart.c struct circ_buf rx_ring; rx_ring 1004 drivers/tty/serial/fsl_lpuart.c struct circ_buf *ring = &sport->rx_ring; rx_ring 1143 drivers/tty/serial/fsl_lpuart.c struct circ_buf *ring = &sport->rx_ring; rx_ring 1226 drivers/tty/serial/fsl_lpuart.c kfree(sport->rx_ring.buf); rx_ring 1227 drivers/tty/serial/fsl_lpuart.c sport->rx_ring.tail = 0; rx_ring 1228 drivers/tty/serial/fsl_lpuart.c sport->rx_ring.head = 0; rx_ring 222 drivers/tty/serial/imx.c struct circ_buf rx_ring; rx_ring 1079 drivers/tty/serial/imx.c struct circ_buf *rx_ring = &sport->rx_ring; rx_ring 1106 drivers/tty/serial/imx.c rx_ring->head = sg_dma_len(sgl) - state.residue; rx_ring 1110 drivers/tty/serial/imx.c rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size; rx_ring 1112 drivers/tty/serial/imx.c if (rx_ring->head <= sg_dma_len(sgl) && rx_ring 1113 drivers/tty/serial/imx.c rx_ring->head > rx_ring->tail) { rx_ring 1116 drivers/tty/serial/imx.c r_bytes = rx_ring->head - rx_ring->tail; rx_ring 1123 drivers/tty/serial/imx.c sport->rx_buf + rx_ring->tail, r_bytes); rx_ring 1134 drivers/tty/serial/imx.c WARN_ON(rx_ring->head > sg_dma_len(sgl)); rx_ring 1135 drivers/tty/serial/imx.c WARN_ON(rx_ring->head <= rx_ring->tail); rx_ring 1156 drivers/tty/serial/imx.c sport->rx_ring.head = 0; rx_ring 1157 drivers/tty/serial/imx.c sport->rx_ring.tail = 0; rx_ring 1282 drivers/tty/serial/imx.c sport->rx_ring.buf = sport->rx_buf; rx_ring 125 drivers/vhost/net.c struct ptr_ring *rx_ring; rx_ring 181 drivers/vhost/net.c rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, rx_ring 190 drivers/vhost/net.c if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { rx_ring 191 drivers/vhost/net.c ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, rx_ring 980 drivers/vhost/net.c if (rvq->rx_ring) rx_ring 1169 drivers/vhost/net.c if (nvq->rx_ring) rx_ring 1322 drivers/vhost/net.c n->vqs[i].rx_ring = NULL; rx_ring 1351 drivers/vhost/net.c nvq->rx_ring = NULL; rx_ring 1542 drivers/vhost/net.c nvq->rx_ring = get_tap_ptr_ring(fd); rx_ring 139 include/linux/fs_enet_pd.h int rx_ring, tx_ring; /* number of buffers on rx */ rx_ring 500 net/packet/af_packet.c pkc = GET_PBDQC_FROM_RB(&po->rx_ring); rx_ring 513 net/packet/af_packet.c pkc = GET_PBDQC_FROM_RB(&po->rx_ring); rx_ring 638 net/packet/af_packet.c from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); rx_ring 639 net/packet/af_packet.c struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); rx_ring 1014 net/packet/af_packet.c pkc = GET_PBDQC_FROM_RB(&po->rx_ring); rx_ring 1074 net/packet/af_packet.c curr = packet_lookup_frame(po, &po->rx_ring, rx_ring 1075 net/packet/af_packet.c po->rx_ring.head, status); rx_ring 1183 net/packet/af_packet.c po->rx_ring.pending_refcnt = NULL; rx_ring 1206 net/packet/af_packet.c len = READ_ONCE(po->rx_ring.frame_max) + 1; rx_ring 1207 net/packet/af_packet.c idx = READ_ONCE(po->rx_ring.head); rx_ring 1212 net/packet/af_packet.c return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); rx_ring 1219 net/packet/af_packet.c len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); rx_ring 1220 net/packet/af_packet.c idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); rx_ring 1225 net/packet/af_packet.c return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); rx_ring 2240 net/packet/af_packet.c if (macoff + snaplen > po->rx_ring.frame_size) { rx_ring 2252 net/packet/af_packet.c snaplen = po->rx_ring.frame_size - macoff; rx_ring 2259 net/packet/af_packet.c GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { rx_ring 2262 net/packet/af_packet.c nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; rx_ring 2268 net/packet/af_packet.c macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; rx_ring 2279 net/packet/af_packet.c slot_id = po->rx_ring.head; rx_ring 2280 net/packet/af_packet.c if (test_bit(slot_id, po->rx_ring.rx_owner_map)) rx_ring 2282 net/packet/af_packet.c __set_bit(slot_id, po->rx_ring.rx_owner_map); rx_ring 2292 net/packet/af_packet.c packet_increment_rx_head(po, &po->rx_ring); rx_ring 2392 net/packet/af_packet.c __clear_bit(slot_id, po->rx_ring.rx_owner_map); rx_ring 2396 net/packet/af_packet.c prb_clear_blk_fill_status(&po->rx_ring); rx_ring 3046 net/packet/af_packet.c if (po->rx_ring.pg_vec) { rx_ring 3743 net/packet/af_packet.c if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { rx_ring 3763 net/packet/af_packet.c if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { rx_ring 3782 net/packet/af_packet.c if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { rx_ring 3831 net/packet/af_packet.c if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { rx_ring 3894 net/packet/af_packet.c if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { rx_ring 4171 net/packet/af_packet.c if (po->rx_ring.pg_vec) { rx_ring 4172 net/packet/af_packet.c if (!packet_previous_rx_frame(po, &po->rx_ring, rx_ring 4299 net/packet/af_packet.c rb = tx_ring ? &po->tx_ring : &po->rx_ring; rx_ring 4422 net/packet/af_packet.c po->prot_hook.func = (po->rx_ring.pg_vec) ? rx_ring 4468 net/packet/af_packet.c for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { rx_ring 4484 net/packet/af_packet.c for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { rx_ring 103 net/packet/diag.c ret = pdiag_put_ring(&po->rx_ring, po->tp_version, rx_ring 113 net/packet/internal.h struct packet_ring_buffer rx_ring;