tx_ring           391 drivers/crypto/qat/qat_common/adf_transport.c 	struct adf_etr_ring_data *tx_ring;
tx_ring           427 drivers/crypto/qat/qat_common/adf_transport.c 			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
tx_ring           428 drivers/crypto/qat/qat_common/adf_transport.c 			ring->inflights = tx_ring->inflights;
tx_ring           284 drivers/dma/xgene-dma.c 	struct xgene_dma_ring tx_ring;
tx_ring           425 drivers/dma/xgene-dma.c 	xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
tx_ring           594 drivers/dma/xgene-dma.c 	struct xgene_dma_ring *ring = &chan->tx_ring;
tx_ring          1178 drivers/dma/xgene-dma.c 	xgene_dma_delete_ring_one(&chan->tx_ring);
tx_ring          1216 drivers/dma/xgene-dma.c 	struct xgene_dma_ring *tx_ring = &chan->tx_ring;
tx_ring          1232 drivers/dma/xgene-dma.c 	tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
tx_ring          1233 drivers/dma/xgene-dma.c 	tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
tx_ring          1235 drivers/dma/xgene-dma.c 	ret = xgene_dma_create_ring_one(chan, tx_ring,
tx_ring          1242 drivers/dma/xgene-dma.c 	tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
tx_ring          1246 drivers/dma/xgene-dma.c 		 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
tx_ring          1249 drivers/dma/xgene-dma.c 	chan->max_outstanding = tx_ring->slots;
tx_ring           571 drivers/infiniband/hw/hfi1/sdma.c 	return sde->tx_ring[sde->tx_head & sde->sdma_mask];
tx_ring           596 drivers/infiniband/hw/hfi1/sdma.c 			sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
tx_ring          1329 drivers/infiniband/hw/hfi1/sdma.c 		kvfree(sde->tx_ring);
tx_ring          1330 drivers/infiniband/hw/hfi1/sdma.c 		sde->tx_ring = NULL;
tx_ring          1475 drivers/infiniband/hw/hfi1/sdma.c 		sde->tx_ring =
tx_ring          1479 drivers/infiniband/hw/hfi1/sdma.c 		if (!sde->tx_ring)
tx_ring          1852 drivers/infiniband/hw/hfi1/sdma.c 			sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
tx_ring          2344 drivers/infiniband/hw/hfi1/sdma.c 	WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
tx_ring          2346 drivers/infiniband/hw/hfi1/sdma.c 	sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
tx_ring           329 drivers/infiniband/hw/hfi1/sdma.h 	struct sdma_txreq **tx_ring;
tx_ring           585 drivers/infiniband/hw/mlx4/mad.c 	tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
tx_ring           586 drivers/infiniband/hw/mlx4/mad.c 	if (tun_qp->tx_ring[tun_tx_ix].ah)
tx_ring           587 drivers/infiniband/hw/mlx4/mad.c 		rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
tx_ring           588 drivers/infiniband/hw/mlx4/mad.c 	tun_qp->tx_ring[tun_tx_ix].ah = ah;
tx_ring           590 drivers/infiniband/hw/mlx4/mad.c 				   tun_qp->tx_ring[tun_tx_ix].buf.map,
tx_ring           632 drivers/infiniband/hw/mlx4/mad.c 				      tun_qp->tx_ring[tun_tx_ix].buf.map,
tx_ring           636 drivers/infiniband/hw/mlx4/mad.c 	list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
tx_ring           658 drivers/infiniband/hw/mlx4/mad.c 	tun_qp->tx_ring[tun_tx_ix].ah = NULL;
tx_ring          1420 drivers/infiniband/hw/mlx4/mad.c 	sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
tx_ring          1421 drivers/infiniband/hw/mlx4/mad.c 	kfree(sqp->tx_ring[wire_tx_ix].ah);
tx_ring          1422 drivers/infiniband/hw/mlx4/mad.c 	sqp->tx_ring[wire_tx_ix].ah = ah;
tx_ring          1424 drivers/infiniband/hw/mlx4/mad.c 				   sqp->tx_ring[wire_tx_ix].buf.map,
tx_ring          1431 drivers/infiniband/hw/mlx4/mad.c 				      sqp->tx_ring[wire_tx_ix].buf.map,
tx_ring          1435 drivers/infiniband/hw/mlx4/mad.c 	list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
tx_ring          1458 drivers/infiniband/hw/mlx4/mad.c 	sqp->tx_ring[wire_tx_ix].ah = NULL;
tx_ring          1618 drivers/infiniband/hw/mlx4/mad.c 	tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
tx_ring          1621 drivers/infiniband/hw/mlx4/mad.c 	if (!tun_qp->tx_ring) {
tx_ring          1650 drivers/infiniband/hw/mlx4/mad.c 		tun_qp->tx_ring[i].buf.addr =
tx_ring          1652 drivers/infiniband/hw/mlx4/mad.c 		if (!tun_qp->tx_ring[i].buf.addr)
tx_ring          1654 drivers/infiniband/hw/mlx4/mad.c 		tun_qp->tx_ring[i].buf.map =
tx_ring          1656 drivers/infiniband/hw/mlx4/mad.c 					  tun_qp->tx_ring[i].buf.addr,
tx_ring          1660 drivers/infiniband/hw/mlx4/mad.c 					 tun_qp->tx_ring[i].buf.map)) {
tx_ring          1661 drivers/infiniband/hw/mlx4/mad.c 			kfree(tun_qp->tx_ring[i].buf.addr);
tx_ring          1664 drivers/infiniband/hw/mlx4/mad.c 		tun_qp->tx_ring[i].ah = NULL;
tx_ring          1676 drivers/infiniband/hw/mlx4/mad.c 		ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
tx_ring          1678 drivers/infiniband/hw/mlx4/mad.c 		kfree(tun_qp->tx_ring[i].buf.addr);
tx_ring          1688 drivers/infiniband/hw/mlx4/mad.c 	kfree(tun_qp->tx_ring);
tx_ring          1689 drivers/infiniband/hw/mlx4/mad.c 	tun_qp->tx_ring = NULL;
tx_ring          1722 drivers/infiniband/hw/mlx4/mad.c 		ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
tx_ring          1724 drivers/infiniband/hw/mlx4/mad.c 		kfree(tun_qp->tx_ring[i].buf.addr);
tx_ring          1725 drivers/infiniband/hw/mlx4/mad.c 		if (tun_qp->tx_ring[i].ah)
tx_ring          1726 drivers/infiniband/hw/mlx4/mad.c 			rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0);
tx_ring          1728 drivers/infiniband/hw/mlx4/mad.c 	kfree(tun_qp->tx_ring);
tx_ring          1758 drivers/infiniband/hw/mlx4/mad.c 				rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
tx_ring          1760 drivers/infiniband/hw/mlx4/mad.c 				tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
tx_ring          1775 drivers/infiniband/hw/mlx4/mad.c 				rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
tx_ring          1777 drivers/infiniband/hw/mlx4/mad.c 				tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
tx_ring          1912 drivers/infiniband/hw/mlx4/mad.c 				kfree(sqp->tx_ring[wc.wr_id &
tx_ring          1914 drivers/infiniband/hw/mlx4/mad.c 				sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
tx_ring          1941 drivers/infiniband/hw/mlx4/mad.c 				kfree(sqp->tx_ring[wc.wr_id &
tx_ring          1943 drivers/infiniband/hw/mlx4/mad.c 				sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
tx_ring           439 drivers/infiniband/hw/mlx4/mlx4_ib.h 	struct mlx4_ib_tun_tx_buf *tx_ring;
tx_ring           251 drivers/infiniband/ulp/ipoib/ipoib.h 	struct ipoib_tx_buf *tx_ring;
tx_ring           379 drivers/infiniband/ulp/ipoib/ipoib.h 	struct ipoib_tx_buf *tx_ring;
tx_ring           750 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
tx_ring           811 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	tx_req = &tx->tx_ring[wr_id];
tx_ring          1152 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring)));
tx_ring          1153 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (!p->tx_ring) {
tx_ring          1198 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	vfree(p->tx_ring);
tx_ring          1215 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (p->tx_ring) {
tx_ring          1232 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
tx_ring          1249 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	vfree(p->tx_ring);
tx_ring           400 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	tx_req = &priv->tx_ring[wr_id];
tx_ring           626 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_ring           809 drivers/infiniband/ulp/ipoib/ipoib_ib.c 				tx_req = &priv->tx_ring[priv->tx_tail &
tx_ring          1683 drivers/infiniband/ulp/ipoib/ipoib_main.c 	vfree(priv->tx_ring);
tx_ring          1686 drivers/infiniband/ulp/ipoib/ipoib_main.c 	priv->tx_ring = NULL;
tx_ring          1702 drivers/infiniband/ulp/ipoib/ipoib_main.c 	priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
tx_ring          1703 drivers/infiniband/ulp/ipoib/ipoib_main.c 					   sizeof(*priv->tx_ring)));
tx_ring          1704 drivers/infiniband/ulp/ipoib/ipoib_main.c 	if (!priv->tx_ring) {
tx_ring          1726 drivers/infiniband/ulp/ipoib/ipoib_main.c 	vfree(priv->tx_ring);
tx_ring           728 drivers/infiniband/ulp/srp/ib_srp.c 	if (ch->tx_ring) {
tx_ring           730 drivers/infiniband/ulp/srp/ib_srp.c 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
tx_ring           731 drivers/infiniband/ulp/srp/ib_srp.c 		kfree(ch->tx_ring);
tx_ring           732 drivers/infiniband/ulp/srp/ib_srp.c 		ch->tx_ring = NULL;
tx_ring          1429 drivers/infiniband/ulp/srp/ib_srp.c 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
tx_ring          2458 drivers/infiniband/ulp/srp/ib_srp.c 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
tx_ring          2460 drivers/infiniband/ulp/srp/ib_srp.c 	if (!ch->tx_ring)
tx_ring          2472 drivers/infiniband/ulp/srp/ib_srp.c 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
tx_ring          2475 drivers/infiniband/ulp/srp/ib_srp.c 		if (!ch->tx_ring[i])
tx_ring          2478 drivers/infiniband/ulp/srp/ib_srp.c 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
tx_ring          2486 drivers/infiniband/ulp/srp/ib_srp.c 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
tx_ring          2491 drivers/infiniband/ulp/srp/ib_srp.c 	kfree(ch->tx_ring);
tx_ring          2492 drivers/infiniband/ulp/srp/ib_srp.c 	ch->tx_ring = NULL;
tx_ring           185 drivers/infiniband/ulp/srp/ib_srp.h 	struct srp_iu	      **tx_ring;
tx_ring           306 drivers/net/ethernet/3com/3c515.c 	struct boom_tx_desc tx_ring[TX_RING_SIZE];
tx_ring           982 drivers/net/ethernet/3com/3c515.c 	       &vp->tx_ring[0]);
tx_ring           985 drivers/net/ethernet/3com/3c515.c 		       &vp->tx_ring[i],
tx_ring           986 drivers/net/ethernet/3com/3c515.c 		       vp->tx_ring[i].length, vp->tx_ring[i].status);
tx_ring          1021 drivers/net/ethernet/3com/3c515.c 			prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
tx_ring          1029 drivers/net/ethernet/3com/3c515.c 		vp->tx_ring[entry].next = 0;
tx_ring          1030 drivers/net/ethernet/3com/3c515.c 		vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data);
tx_ring          1031 drivers/net/ethernet/3com/3c515.c 		vp->tx_ring[entry].length = skb->len | 0x80000000;
tx_ring          1032 drivers/net/ethernet/3com/3c515.c 		vp->tx_ring[entry].status = skb->len | 0x80000000;
tx_ring          1041 drivers/net/ethernet/3com/3c515.c 			prev_entry->next = isa_virt_to_bus(&vp->tx_ring[entry]);
tx_ring          1043 drivers/net/ethernet/3com/3c515.c 			outl(isa_virt_to_bus(&vp->tx_ring[entry]),
tx_ring          1177 drivers/net/ethernet/3com/3c515.c 				if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry]))
tx_ring           598 drivers/net/ethernet/3com/3c59x.c 	struct boom_tx_desc* tx_ring;
tx_ring          1217 drivers/net/ethernet/3com/3c59x.c 	vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
tx_ring          2116 drivers/net/ethernet/3com/3c59x.c 	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
tx_ring          2145 drivers/net/ethernet/3com/3c59x.c 	vp->tx_ring[entry].next = 0;
tx_ring          2148 drivers/net/ethernet/3com/3c59x.c 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
tx_ring          2150 drivers/net/ethernet/3com/3c59x.c 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
tx_ring          2158 drivers/net/ethernet/3com/3c59x.c 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
tx_ring          2159 drivers/net/ethernet/3com/3c59x.c 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
tx_ring          2168 drivers/net/ethernet/3com/3c59x.c 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
tx_ring          2169 drivers/net/ethernet/3com/3c59x.c 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
tx_ring          2181 drivers/net/ethernet/3com/3c59x.c 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
tx_ring          2182 drivers/net/ethernet/3com/3c59x.c 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
tx_ring          2186 drivers/net/ethernet/3com/3c59x.c 						 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
tx_ring          2187 drivers/net/ethernet/3com/3c59x.c 						 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
tx_ring          2193 drivers/net/ethernet/3com/3c59x.c 			vp->tx_ring[entry].frag[i+1].addr =
tx_ring          2197 drivers/net/ethernet/3com/3c59x.c 					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
tx_ring          2199 drivers/net/ethernet/3com/3c59x.c 					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
tx_ring          2206 drivers/net/ethernet/3com/3c59x.c 	vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
tx_ring          2207 drivers/net/ethernet/3com/3c59x.c 	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
tx_ring          2208 drivers/net/ethernet/3com/3c59x.c 	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
tx_ring          2426 drivers/net/ethernet/3com/3c59x.c 				if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
tx_ring          2435 drivers/net/ethernet/3com/3c59x.c 							le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
tx_ring          2436 drivers/net/ethernet/3com/3c59x.c 							le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
tx_ring          2441 drivers/net/ethernet/3com/3c59x.c 											 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
tx_ring          2442 drivers/net/ethernet/3com/3c59x.c 											 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
tx_ring          2446 drivers/net/ethernet/3com/3c59x.c 						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
tx_ring          2770 drivers/net/ethernet/3com/3c59x.c 										 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
tx_ring          2771 drivers/net/ethernet/3com/3c59x.c 										 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
tx_ring          2774 drivers/net/ethernet/3com/3c59x.c 				dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
tx_ring          2802 drivers/net/ethernet/3com/3c59x.c 				   &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
tx_ring          2808 drivers/net/ethernet/3com/3c59x.c 				length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
tx_ring          2810 drivers/net/ethernet/3com/3c59x.c 				length = le32_to_cpu(vp->tx_ring[i].length);
tx_ring          2813 drivers/net/ethernet/3com/3c59x.c 					   i, &vp->tx_ring[i], length,
tx_ring          2814 drivers/net/ethernet/3com/3c59x.c 					   le32_to_cpu(vp->tx_ring[i].status));
tx_ring           534 drivers/net/ethernet/adaptec/starfire.c 	starfire_tx_desc *tx_ring;
tx_ring           914 drivers/net/ethernet/adaptec/starfire.c 		np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
tx_ring           916 drivers/net/ethernet/adaptec/starfire.c 		np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
tx_ring          1251 drivers/net/ethernet/adaptec/starfire.c 		np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
tx_ring          1252 drivers/net/ethernet/adaptec/starfire.c 		np->tx_ring[entry].status = cpu_to_le32(status);
tx_ring          1964 drivers/net/ethernet/adaptec/starfire.c 			       i, le32_to_cpu(np->tx_ring[i].status),
tx_ring          1965 drivers/net/ethernet/adaptec/starfire.c 			       (long long) dma_to_cpu(np->tx_ring[i].addr),
tx_ring           489 drivers/net/ethernet/agere/et131x.c 	struct tx_ring tx_ring;
tx_ring          1643 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          1646 drivers/net/ethernet/agere/et131x.c 	writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
tx_ring          1647 drivers/net/ethernet/agere/et131x.c 	writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
tx_ring          1653 drivers/net/ethernet/agere/et131x.c 	writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
tx_ring          1654 drivers/net/ethernet/agere/et131x.c 	writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
tx_ring          1656 drivers/net/ethernet/agere/et131x.c 	*tx_ring->tx_status = 0;
tx_ring          1659 drivers/net/ethernet/agere/et131x.c 	tx_ring->send_idx = 0;
tx_ring          1756 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          1757 drivers/net/ethernet/agere/et131x.c 	struct tcb *tcb = tx_ring->tcb_ring;
tx_ring          1759 drivers/net/ethernet/agere/et131x.c 	tx_ring->tcb_qhead = tcb;
tx_ring          1769 drivers/net/ethernet/agere/et131x.c 	tx_ring->tcb_qtail = tcb;
tx_ring          1772 drivers/net/ethernet/agere/et131x.c 	tx_ring->send_head = NULL;
tx_ring          1773 drivers/net/ethernet/agere/et131x.c 	tx_ring->send_tail = NULL;
tx_ring          2361 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          2364 drivers/net/ethernet/agere/et131x.c 	tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
tx_ring          2366 drivers/net/ethernet/agere/et131x.c 	if (!tx_ring->tcb_ring)
tx_ring          2370 drivers/net/ethernet/agere/et131x.c 	tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
tx_ring          2372 drivers/net/ethernet/agere/et131x.c 						   &tx_ring->tx_desc_ring_pa,
tx_ring          2374 drivers/net/ethernet/agere/et131x.c 	if (!tx_ring->tx_desc_ring) {
tx_ring          2380 drivers/net/ethernet/agere/et131x.c 	tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
tx_ring          2382 drivers/net/ethernet/agere/et131x.c 						    &tx_ring->tx_status_pa,
tx_ring          2384 drivers/net/ethernet/agere/et131x.c 	if (!tx_ring->tx_status) {
tx_ring          2395 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          2397 drivers/net/ethernet/agere/et131x.c 	if (tx_ring->tx_desc_ring) {
tx_ring          2402 drivers/net/ethernet/agere/et131x.c 				  tx_ring->tx_desc_ring,
tx_ring          2403 drivers/net/ethernet/agere/et131x.c 				  tx_ring->tx_desc_ring_pa);
tx_ring          2404 drivers/net/ethernet/agere/et131x.c 		tx_ring->tx_desc_ring = NULL;
tx_ring          2408 drivers/net/ethernet/agere/et131x.c 	if (tx_ring->tx_status) {
tx_ring          2411 drivers/net/ethernet/agere/et131x.c 				  tx_ring->tx_status,
tx_ring          2412 drivers/net/ethernet/agere/et131x.c 				  tx_ring->tx_status_pa);
tx_ring          2414 drivers/net/ethernet/agere/et131x.c 		tx_ring->tx_status = NULL;
tx_ring          2417 drivers/net/ethernet/agere/et131x.c 	kfree(tx_ring->tcb_ring);
tx_ring          2432 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          2504 drivers/net/ethernet/agere/et131x.c 		if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
tx_ring          2508 drivers/net/ethernet/agere/et131x.c 			tx_ring->since_irq = 0;
tx_ring          2519 drivers/net/ethernet/agere/et131x.c 	tcb->index_start = tx_ring->send_idx;
tx_ring          2522 drivers/net/ethernet/agere/et131x.c 	thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
tx_ring          2531 drivers/net/ethernet/agere/et131x.c 	memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
tx_ring          2535 drivers/net/ethernet/agere/et131x.c 	add_10bit(&tx_ring->send_idx, thiscopy);
tx_ring          2537 drivers/net/ethernet/agere/et131x.c 	if (INDEX10(tx_ring->send_idx) == 0 ||
tx_ring          2538 drivers/net/ethernet/agere/et131x.c 	    INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
tx_ring          2539 drivers/net/ethernet/agere/et131x.c 		tx_ring->send_idx &= ~ET_DMA10_MASK;
tx_ring          2540 drivers/net/ethernet/agere/et131x.c 		tx_ring->send_idx ^= ET_DMA10_WRAP;
tx_ring          2544 drivers/net/ethernet/agere/et131x.c 		memcpy(tx_ring->tx_desc_ring,
tx_ring          2548 drivers/net/ethernet/agere/et131x.c 		add_10bit(&tx_ring->send_idx, remainder);
tx_ring          2551 drivers/net/ethernet/agere/et131x.c 	if (INDEX10(tx_ring->send_idx) == 0) {
tx_ring          2552 drivers/net/ethernet/agere/et131x.c 		if (tx_ring->send_idx)
tx_ring          2557 drivers/net/ethernet/agere/et131x.c 		tcb->index = tx_ring->send_idx - 1;
tx_ring          2562 drivers/net/ethernet/agere/et131x.c 	if (tx_ring->send_tail)
tx_ring          2563 drivers/net/ethernet/agere/et131x.c 		tx_ring->send_tail->next = tcb;
tx_ring          2565 drivers/net/ethernet/agere/et131x.c 		tx_ring->send_head = tcb;
tx_ring          2567 drivers/net/ethernet/agere/et131x.c 	tx_ring->send_tail = tcb;
tx_ring          2571 drivers/net/ethernet/agere/et131x.c 	tx_ring->used++;
tx_ring          2576 drivers/net/ethernet/agere/et131x.c 	writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
tx_ring          2593 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          2601 drivers/net/ethernet/agere/et131x.c 	tcb = tx_ring->tcb_qhead;
tx_ring          2608 drivers/net/ethernet/agere/et131x.c 	tx_ring->tcb_qhead = tcb->next;
tx_ring          2610 drivers/net/ethernet/agere/et131x.c 	if (tx_ring->tcb_qhead == NULL)
tx_ring          2611 drivers/net/ethernet/agere/et131x.c 		tx_ring->tcb_qtail = NULL;
tx_ring          2623 drivers/net/ethernet/agere/et131x.c 		if (tx_ring->tcb_qtail)
tx_ring          2624 drivers/net/ethernet/agere/et131x.c 			tx_ring->tcb_qtail->next = tcb;
tx_ring          2627 drivers/net/ethernet/agere/et131x.c 			tx_ring->tcb_qhead = tcb;
tx_ring          2629 drivers/net/ethernet/agere/et131x.c 		tx_ring->tcb_qtail = tcb;
tx_ring          2633 drivers/net/ethernet/agere/et131x.c 	WARN_ON(tx_ring->used > NUM_TCB);
tx_ring          2644 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          2655 drivers/net/ethernet/agere/et131x.c 			desc = tx_ring->tx_desc_ring +
tx_ring          2671 drivers/net/ethernet/agere/et131x.c 		} while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
tx_ring          2683 drivers/net/ethernet/agere/et131x.c 	if (tx_ring->tcb_qtail)
tx_ring          2684 drivers/net/ethernet/agere/et131x.c 		tx_ring->tcb_qtail->next = tcb;
tx_ring          2686 drivers/net/ethernet/agere/et131x.c 		tx_ring->tcb_qhead = tcb;
tx_ring          2688 drivers/net/ethernet/agere/et131x.c 	tx_ring->tcb_qtail = tcb;
tx_ring          2691 drivers/net/ethernet/agere/et131x.c 	WARN_ON(tx_ring->used < 0);
tx_ring          2700 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          2705 drivers/net/ethernet/agere/et131x.c 	tcb = tx_ring->send_head;
tx_ring          2710 drivers/net/ethernet/agere/et131x.c 		tx_ring->send_head = next;
tx_ring          2713 drivers/net/ethernet/agere/et131x.c 			tx_ring->send_tail = NULL;
tx_ring          2715 drivers/net/ethernet/agere/et131x.c 		tx_ring->used--;
tx_ring          2724 drivers/net/ethernet/agere/et131x.c 		tcb = tx_ring->send_head;
tx_ring          2731 drivers/net/ethernet/agere/et131x.c 	tx_ring->used = 0;
tx_ring          2745 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          2755 drivers/net/ethernet/agere/et131x.c 	tcb = tx_ring->send_head;
tx_ring          2760 drivers/net/ethernet/agere/et131x.c 		tx_ring->used--;
tx_ring          2761 drivers/net/ethernet/agere/et131x.c 		tx_ring->send_head = tcb->next;
tx_ring          2763 drivers/net/ethernet/agere/et131x.c 			tx_ring->send_tail = NULL;
tx_ring          2770 drivers/net/ethernet/agere/et131x.c 		tcb = tx_ring->send_head;
tx_ring          2775 drivers/net/ethernet/agere/et131x.c 		tx_ring->used--;
tx_ring          2776 drivers/net/ethernet/agere/et131x.c 		tx_ring->send_head = tcb->next;
tx_ring          2778 drivers/net/ethernet/agere/et131x.c 			tx_ring->send_tail = NULL;
tx_ring          2785 drivers/net/ethernet/agere/et131x.c 		tcb = tx_ring->send_head;
tx_ring          2789 drivers/net/ethernet/agere/et131x.c 	if (tx_ring->used <= NUM_TCB / 3)
tx_ring          3370 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          3397 drivers/net/ethernet/agere/et131x.c 		struct tcb *tcb = tx_ring->send_head;
tx_ring          3780 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          3783 drivers/net/ethernet/agere/et131x.c 	if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
tx_ring          3790 drivers/net/ethernet/agere/et131x.c 	if (tx_ring->used >= NUM_TCB)
tx_ring          3817 drivers/net/ethernet/agere/et131x.c 	struct tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          3839 drivers/net/ethernet/agere/et131x.c 	tcb = tx_ring->send_head;
tx_ring           729 drivers/net/ethernet/alteon/acenic.c 	if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
tx_ring           731 drivers/net/ethernet/alteon/acenic.c 		pci_free_consistent(ap->pdev, size, ap->tx_ring,
tx_ring           734 drivers/net/ethernet/alteon/acenic.c 	ap->tx_ring = NULL;
tx_ring           789 drivers/net/ethernet/alteon/acenic.c 		ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
tx_ring           792 drivers/net/ethernet/alteon/acenic.c 		if (ap->tx_ring == NULL)
tx_ring          1287 drivers/net/ethernet/alteon/acenic.c 		ap->tx_ring = (__force struct tx_desc *) regs->Window;
tx_ring          1290 drivers/net/ethernet/alteon/acenic.c 			writel(0, (__force void __iomem *)ap->tx_ring  + i * 4);
tx_ring          1294 drivers/net/ethernet/alteon/acenic.c 		memset(ap->tx_ring, 0,
tx_ring          2333 drivers/net/ethernet/alteon/acenic.c 				tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
tx_ring          2338 drivers/net/ethernet/alteon/acenic.c 				memset(ap->tx_ring + i, 0,
tx_ring          2434 drivers/net/ethernet/alteon/acenic.c 		desc = ap->tx_ring + idx;
tx_ring          2456 drivers/net/ethernet/alteon/acenic.c 		ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
tx_ring          2466 drivers/net/ethernet/alteon/acenic.c 			desc = ap->tx_ring + idx;
tx_ring           647 drivers/net/ethernet/alteon/acenic.h 	struct tx_desc		*tx_ring;
tx_ring           434 drivers/net/ethernet/altera/altera_tse.h 	struct tse_buffer *tx_ring;
tx_ring           283 drivers/net/ethernet/altera/altera_tse_main.c 	priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
tx_ring           285 drivers/net/ethernet/altera/altera_tse_main.c 	if (!priv->tx_ring)
tx_ring           306 drivers/net/ethernet/altera/altera_tse_main.c 	kfree(priv->tx_ring);
tx_ring           324 drivers/net/ethernet/altera/altera_tse_main.c 		tse_free_tx_buffer(priv, &priv->tx_ring[i]);
tx_ring           327 drivers/net/ethernet/altera/altera_tse_main.c 	kfree(priv->tx_ring);
tx_ring           459 drivers/net/ethernet/altera/altera_tse_main.c 		tx_buff = &priv->tx_ring[entry];
tx_ring           584 drivers/net/ethernet/altera/altera_tse_main.c 	buffer = &priv->tx_ring[entry];
tx_ring           138 drivers/net/ethernet/amazon/ena/ena_ethtool.c 		ring = &adapter->tx_ring[i];
tx_ring           336 drivers/net/ethernet/amazon/ena/ena_ethtool.c 		adapter->tx_ring[i].smoothed_interval = val;
tx_ring           419 drivers/net/ethernet/amazon/ena/ena_ethtool.c 	ring->tx_pending = adapter->tx_ring[0].ring_size;
tx_ring           176 drivers/net/ethernet/amazon/ena/ena_netdev.c 		txr = &adapter->tx_ring[i];
tx_ring           210 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
tx_ring           214 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (tx_ring->tx_buffer_info) {
tx_ring           220 drivers/net/ethernet/amazon/ena/ena_netdev.c 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
tx_ring           223 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_buffer_info = vzalloc_node(size, node);
tx_ring           224 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (!tx_ring->tx_buffer_info) {
tx_ring           225 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->tx_buffer_info = vzalloc(size);
tx_ring           226 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (!tx_ring->tx_buffer_info)
tx_ring           230 drivers/net/ethernet/amazon/ena/ena_netdev.c 	size = sizeof(u16) * tx_ring->ring_size;
tx_ring           231 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->free_ids = vzalloc_node(size, node);
tx_ring           232 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (!tx_ring->free_ids) {
tx_ring           233 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->free_ids = vzalloc(size);
tx_ring           234 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (!tx_ring->free_ids)
tx_ring           238 drivers/net/ethernet/amazon/ena/ena_netdev.c 	size = tx_ring->tx_max_header_size;
tx_ring           239 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
tx_ring           240 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (!tx_ring->push_buf_intermediate_buf) {
tx_ring           241 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->push_buf_intermediate_buf = vzalloc(size);
tx_ring           242 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (!tx_ring->push_buf_intermediate_buf)
tx_ring           247 drivers/net/ethernet/amazon/ena/ena_netdev.c 	for (i = 0; i < tx_ring->ring_size; i++)
tx_ring           248 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->free_ids[i] = i;
tx_ring           251 drivers/net/ethernet/amazon/ena/ena_netdev.c 	memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
tx_ring           253 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->next_to_use = 0;
tx_ring           254 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->next_to_clean = 0;
tx_ring           255 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->cpu = ena_irq->cpu;
tx_ring           259 drivers/net/ethernet/amazon/ena/ena_netdev.c 	vfree(tx_ring->free_ids);
tx_ring           260 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->free_ids = NULL;
tx_ring           262 drivers/net/ethernet/amazon/ena/ena_netdev.c 	vfree(tx_ring->tx_buffer_info);
tx_ring           263 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_buffer_info = NULL;
tx_ring           276 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
tx_ring           278 drivers/net/ethernet/amazon/ena/ena_netdev.c 	vfree(tx_ring->tx_buffer_info);
tx_ring           279 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_buffer_info = NULL;
tx_ring           281 drivers/net/ethernet/amazon/ena/ena_netdev.c 	vfree(tx_ring->free_ids);
tx_ring           282 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->free_ids = NULL;
tx_ring           284 drivers/net/ethernet/amazon/ena/ena_netdev.c 	vfree(tx_ring->push_buf_intermediate_buf);
tx_ring           285 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->push_buf_intermediate_buf = NULL;
tx_ring           623 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
tx_ring           637 drivers/net/ethernet/amazon/ena/ena_netdev.c 		dma_unmap_single(tx_ring->dev,
tx_ring           647 drivers/net/ethernet/amazon/ena/ena_netdev.c 		dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
tx_ring           656 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_free_tx_bufs(struct ena_ring *tx_ring)
tx_ring           661 drivers/net/ethernet/amazon/ena/ena_netdev.c 	for (i = 0; i < tx_ring->ring_size; i++) {
tx_ring           662 drivers/net/ethernet/amazon/ena/ena_netdev.c 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
tx_ring           668 drivers/net/ethernet/amazon/ena/ena_netdev.c 			netdev_notice(tx_ring->netdev,
tx_ring           670 drivers/net/ethernet/amazon/ena/ena_netdev.c 				      tx_ring->qid, i);
tx_ring           673 drivers/net/ethernet/amazon/ena/ena_netdev.c 			netdev_dbg(tx_ring->netdev,
tx_ring           675 drivers/net/ethernet/amazon/ena/ena_netdev.c 				   tx_ring->qid, i);
tx_ring           678 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_unmap_tx_skb(tx_ring, tx_info);
tx_ring           682 drivers/net/ethernet/amazon/ena/ena_netdev.c 	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring           683 drivers/net/ethernet/amazon/ena/ena_netdev.c 						  tx_ring->qid));
tx_ring           688 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_ring *tx_ring;
tx_ring           692 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring = &adapter->tx_ring[i];
tx_ring           693 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_free_tx_bufs(tx_ring);
tx_ring           726 drivers/net/ethernet/amazon/ena/ena_netdev.c static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
tx_ring           730 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (likely(req_id < tx_ring->ring_size)) {
tx_ring           731 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_info = &tx_ring->tx_buffer_info[req_id];
tx_ring           737 drivers/net/ethernet/amazon/ena/ena_netdev.c 		netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
tx_ring           740 drivers/net/ethernet/amazon/ena/ena_netdev.c 		netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
tx_ring           743 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring           744 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.bad_req_id++;
tx_ring           745 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring           748 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
tx_ring           749 drivers/net/ethernet/amazon/ena/ena_netdev.c 	set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
tx_ring           753 drivers/net/ethernet/amazon/ena/ena_netdev.c static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring           764 drivers/net/ethernet/amazon/ena/ena_netdev.c 	next_to_clean = tx_ring->next_to_clean;
tx_ring           765 drivers/net/ethernet/amazon/ena/ena_netdev.c 	txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
tx_ring           771 drivers/net/ethernet/amazon/ena/ena_netdev.c 		rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
tx_ring           776 drivers/net/ethernet/amazon/ena/ena_netdev.c 		rc = validate_tx_req_id(tx_ring, req_id);
tx_ring           780 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_info = &tx_ring->tx_buffer_info[req_id];
tx_ring           789 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_unmap_tx_skb(tx_ring, tx_info);
tx_ring           791 drivers/net/ethernet/amazon/ena/ena_netdev.c 		netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
tx_ring           792 drivers/net/ethernet/amazon/ena/ena_netdev.c 			  "tx_poll: q %d skb %p completed\n", tx_ring->qid,
tx_ring           800 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->free_ids[next_to_clean] = req_id;
tx_ring           802 drivers/net/ethernet/amazon/ena/ena_netdev.c 						     tx_ring->ring_size);
tx_ring           805 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->next_to_clean = next_to_clean;
tx_ring           806 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
tx_ring           807 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
tx_ring           811 drivers/net/ethernet/amazon/ena/ena_netdev.c 	netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
tx_ring           813 drivers/net/ethernet/amazon/ena/ena_netdev.c 		  tx_ring->qid, tx_pkts);
tx_ring           820 drivers/net/ethernet/amazon/ena/ena_netdev.c 	above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
tx_ring           825 drivers/net/ethernet/amazon/ena/ena_netdev.c 			ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
tx_ring           828 drivers/net/ethernet/amazon/ena/ena_netdev.c 		    test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
tx_ring           830 drivers/net/ethernet/amazon/ena/ena_netdev.c 			u64_stats_update_begin(&tx_ring->syncp);
tx_ring           831 drivers/net/ethernet/amazon/ena/ena_netdev.c 			tx_ring->tx_stats.queue_wakeup++;
tx_ring           832 drivers/net/ethernet/amazon/ena/ena_netdev.c 			u64_stats_update_end(&tx_ring->syncp);
tx_ring          1187 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_unmask_interrupt(struct ena_ring *tx_ring,
tx_ring          1200 drivers/net/ethernet/amazon/ena/ena_netdev.c 				tx_ring->smoothed_interval,
tx_ring          1210 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
tx_ring          1217 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (likely(tx_ring->cpu == cpu))
tx_ring          1224 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
tx_ring          1228 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->cpu = cpu;
tx_ring          1239 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_ring *tx_ring, *rx_ring;
tx_ring          1247 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring = ena_napi->tx_ring;
tx_ring          1250 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
tx_ring          1252 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
tx_ring          1253 drivers/net/ethernet/amazon/ena/ena_netdev.c 	    test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
tx_ring          1258 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
tx_ring          1268 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
tx_ring          1269 drivers/net/ethernet/amazon/ena/ena_netdev.c 		     test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
tx_ring          1286 drivers/net/ethernet/amazon/ena/ena_netdev.c 			ena_unmask_interrupt(tx_ring, rx_ring);
tx_ring          1289 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_update_ring_numa_node(tx_ring, rx_ring);
tx_ring          1296 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring          1297 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.napi_comp += napi_comp_call;
tx_ring          1298 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.tx_poll++;
tx_ring          1299 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring          1325 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ena_napi->tx_ring->first_interrupt = true;
tx_ring          1553 drivers/net/ethernet/amazon/ena/ena_netdev.c 		napi->tx_ring = &adapter->tx_ring[i];
tx_ring          1632 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_ring *tx_ring;
tx_ring          1639 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring = &adapter->tx_ring[qid];
tx_ring          1649 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ctx.queue_size = tx_ring->ring_size;
tx_ring          1650 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ctx.numa_node = cpu_to_node(tx_ring->cpu);
tx_ring          1661 drivers/net/ethernet/amazon/ena/ena_netdev.c 				     &tx_ring->ena_com_io_sq,
tx_ring          1662 drivers/net/ethernet/amazon/ena/ena_netdev.c 				     &tx_ring->ena_com_io_cq);
tx_ring          1671 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
tx_ring          1772 drivers/net/ethernet/amazon/ena/ena_netdev.c 		adapter->tx_ring[i].ring_size = new_tx_size;
tx_ring          1836 drivers/net/ethernet/amazon/ena/ena_netdev.c 		cur_tx_ring_size = adapter->tx_ring[0].ring_size;
tx_ring          1910 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_unmask_interrupt(&adapter->tx_ring[i],
tx_ring          2110 drivers/net/ethernet/amazon/ena/ena_netdev.c static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
tx_ring          2118 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (num_frags < tx_ring->sgl_size)
tx_ring          2121 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if ((num_frags == tx_ring->sgl_size) &&
tx_ring          2122 drivers/net/ethernet/amazon/ena/ena_netdev.c 	    (header_len < tx_ring->tx_max_header_size))
tx_ring          2125 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2126 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.linearize++;
tx_ring          2127 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring          2131 drivers/net/ethernet/amazon/ena/ena_netdev.c 		u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2132 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->tx_stats.linearize_failed++;
tx_ring          2133 drivers/net/ethernet/amazon/ena/ena_netdev.c 		u64_stats_update_end(&tx_ring->syncp);
tx_ring          2139 drivers/net/ethernet/amazon/ena/ena_netdev.c static int ena_tx_map_skb(struct ena_ring *tx_ring,
tx_ring          2145 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_adapter *adapter = tx_ring->adapter;
tx_ring          2157 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
tx_ring          2168 drivers/net/ethernet/amazon/ena/ena_netdev.c 		push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
tx_ring          2170 drivers/net/ethernet/amazon/ena/ena_netdev.c 					       tx_ring->push_buf_intermediate_buf);
tx_ring          2173 drivers/net/ethernet/amazon/ena/ena_netdev.c 			u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2174 drivers/net/ethernet/amazon/ena/ena_netdev.c 			tx_ring->tx_stats.llq_buffer_copy++;
tx_ring          2175 drivers/net/ethernet/amazon/ena/ena_netdev.c 			u64_stats_update_end(&tx_ring->syncp);
tx_ring          2182 drivers/net/ethernet/amazon/ena/ena_netdev.c 				    tx_ring->tx_max_header_size);
tx_ring          2190 drivers/net/ethernet/amazon/ena/ena_netdev.c 		dma = dma_map_single(tx_ring->dev, skb->data + push_len,
tx_ring          2192 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
tx_ring          2217 drivers/net/ethernet/amazon/ena/ena_netdev.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
tx_ring          2219 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
tx_ring          2232 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2233 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.dma_mapping_err++;
tx_ring          2234 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring          2240 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ena_unmap_tx_skb(tx_ring, tx_info);
tx_ring          2251 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_ring *tx_ring;
tx_ring          2260 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring = &adapter->tx_ring[qid];
tx_ring          2263 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rc = ena_check_and_linearize_skb(tx_ring, skb);
tx_ring          2269 drivers/net/ethernet/amazon/ena/ena_netdev.c 	next_to_use = tx_ring->next_to_use;
tx_ring          2270 drivers/net/ethernet/amazon/ena/ena_netdev.c 	req_id = tx_ring->free_ids[next_to_use];
tx_ring          2271 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_info = &tx_ring->tx_buffer_info[req_id];
tx_ring          2276 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
tx_ring          2290 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
tx_ring          2294 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
tx_ring          2298 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
tx_ring          2312 drivers/net/ethernet/amazon/ena/ena_netdev.c 		u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2313 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->tx_stats.prepare_ctx_err++;
tx_ring          2314 drivers/net/ethernet/amazon/ena/ena_netdev.c 		u64_stats_update_end(&tx_ring->syncp);
tx_ring          2322 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2323 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.cnt++;
tx_ring          2324 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.bytes += skb->len;
tx_ring          2325 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring          2331 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
tx_ring          2332 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->ring_size);
tx_ring          2338 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
tx_ring          2339 drivers/net/ethernet/amazon/ena/ena_netdev.c 						   tx_ring->sgl_size + 2))) {
tx_ring          2344 drivers/net/ethernet/amazon/ena/ena_netdev.c 		u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2345 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->tx_stats.queue_stop++;
tx_ring          2346 drivers/net/ethernet/amazon/ena/ena_netdev.c 		u64_stats_update_end(&tx_ring->syncp);
tx_ring          2358 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
tx_ring          2361 drivers/net/ethernet/amazon/ena/ena_netdev.c 			u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2362 drivers/net/ethernet/amazon/ena/ena_netdev.c 			tx_ring->tx_stats.queue_wakeup++;
tx_ring          2363 drivers/net/ethernet/amazon/ena/ena_netdev.c 			u64_stats_update_end(&tx_ring->syncp);
tx_ring          2371 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
tx_ring          2372 drivers/net/ethernet/amazon/ena/ena_netdev.c 		u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2373 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring->tx_stats.doorbells++;
tx_ring          2374 drivers/net/ethernet/amazon/ena/ena_netdev.c 		u64_stats_update_end(&tx_ring->syncp);
tx_ring          2380 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ena_unmap_tx_skb(tx_ring, tx_info);
tx_ring          2494 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_ring *rx_ring, *tx_ring;
tx_ring          2505 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring = &adapter->tx_ring[i];
tx_ring          2508 drivers/net/ethernet/amazon/ena/ena_netdev.c 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
tx_ring          2509 drivers/net/ethernet/amazon/ena/ena_netdev.c 			packets = tx_ring->tx_stats.cnt;
tx_ring          2510 drivers/net/ethernet/amazon/ena/ena_netdev.c 			bytes = tx_ring->tx_stats.bytes;
tx_ring          2511 drivers/net/ethernet/amazon/ena/ena_netdev.c 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
tx_ring          2876 drivers/net/ethernet/amazon/ena/ena_netdev.c 					  struct ena_ring *tx_ring)
tx_ring          2883 drivers/net/ethernet/amazon/ena/ena_netdev.c 	for (i = 0; i < tx_ring->ring_size; i++) {
tx_ring          2884 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_buf = &tx_ring->tx_buffer_info[i];
tx_ring          2891 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
tx_ring          2898 drivers/net/ethernet/amazon/ena/ena_netdev.c 				  tx_ring->qid);
tx_ring          2910 drivers/net/ethernet/amazon/ena/ena_netdev.c 					     tx_ring->qid, i);
tx_ring          2928 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring          2929 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.missed_tx = missed_tx;
tx_ring          2930 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring          2937 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_ring *tx_ring;
tx_ring          2956 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_ring = &adapter->tx_ring[i];
tx_ring          2959 drivers/net/ethernet/amazon/ena/ena_netdev.c 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
tx_ring           156 drivers/net/ethernet/amazon/ena/ena_netdev.h 	struct ena_ring *tx_ring;
tx_ring           355 drivers/net/ethernet/amazon/ena/ena_netdev.h 	struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
tx_ring           292 drivers/net/ethernet/amd/amd8111e.c 	     	if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
tx_ring           333 drivers/net/ethernet/amd/amd8111e.c 		lp->tx_ring[i].buff_phy_addr = 0;
tx_ring           334 drivers/net/ethernet/amd/amd8111e.c 		lp->tx_ring[i].tx_flags = 0;
tx_ring           335 drivers/net/ethernet/amd/amd8111e.c 		lp->tx_ring[i].buff_count = 0;
tx_ring           349 drivers/net/ethernet/amd/amd8111e.c 		 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
tx_ring           622 drivers/net/ethernet/amd/amd8111e.c 	if(lp->tx_ring){
tx_ring           625 drivers/net/ethernet/amd/amd8111e.c 			lp->tx_ring, lp->tx_ring_dma_addr);
tx_ring           627 drivers/net/ethernet/amd/amd8111e.c 		lp->tx_ring = NULL;
tx_ring           644 drivers/net/ethernet/amd/amd8111e.c 		status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
tx_ring           649 drivers/net/ethernet/amd/amd8111e.c 		lp->tx_ring[tx_index].buff_phy_addr = 0;
tx_ring           664 drivers/net/ethernet/amd/amd8111e.c 			le16_to_cpu(lp->tx_ring[tx_index].buff_count);
tx_ring          1259 drivers/net/ethernet/amd/amd8111e.c 	lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
tx_ring          1262 drivers/net/ethernet/amd/amd8111e.c 	lp->tx_ring[tx_index].tx_flags = 0;
tx_ring          1266 drivers/net/ethernet/amd/amd8111e.c 		lp->tx_ring[tx_index].tag_ctrl_cmd |=
tx_ring          1268 drivers/net/ethernet/amd/amd8111e.c 		lp->tx_ring[tx_index].tag_ctrl_info =
tx_ring          1275 drivers/net/ethernet/amd/amd8111e.c 	lp->tx_ring[tx_index].buff_phy_addr =
tx_ring          1280 drivers/net/ethernet/amd/amd8111e.c 	lp->tx_ring[tx_index].tx_flags |=
tx_ring           736 drivers/net/ethernet/amd/amd8111e.h 	struct amd8111e_tx_dr*  tx_ring;
tx_ring            88 drivers/net/ethernet/amd/ariadne.c 	volatile struct TDRE *tx_ring[TX_RING_SIZE];
tx_ring           100 drivers/net/ethernet/amd/ariadne.c 	struct TDRE tx_ring[TX_RING_SIZE];
tx_ring           130 drivers/net/ethernet/amd/ariadne.c 		volatile struct TDRE *t = &lancedata->tx_ring[i];
tx_ring           138 drivers/net/ethernet/amd/ariadne.c 		priv->tx_ring[i] = &lancedata->tx_ring[i];
tx_ring           141 drivers/net/ethernet/amd/ariadne.c 			   i, &lancedata->tx_ring[i], lancedata->tx_buff[i]);
tx_ring           310 drivers/net/ethernet/amd/ariadne.c 				int status = lowb(priv->tx_ring[entry]->TMD1);
tx_ring           315 drivers/net/ethernet/amd/ariadne.c 				priv->tx_ring[entry]->TMD1 &= 0xff00;
tx_ring           319 drivers/net/ethernet/amd/ariadne.c 					int err_status = priv->tx_ring[entry]->TMD3;
tx_ring           456 drivers/net/ethernet/amd/ariadne.c 	lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
tx_ring           458 drivers/net/ethernet/amd/ariadne.c 	lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
tx_ring           581 drivers/net/ethernet/amd/ariadne.c 	priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
tx_ring           582 drivers/net/ethernet/amd/ariadne.c 	priv->tx_ring[entry]->TMD3 = 0x0000;
tx_ring           591 drivers/net/ethernet/amd/ariadne.c 	priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00)
tx_ring           612 drivers/net/ethernet/amd/ariadne.c 	if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) {
tx_ring           151 drivers/net/ethernet/amd/atarilance.c 	struct ringdesc	tx_ring;
tx_ring           620 drivers/net/ethernet/amd/atarilance.c 	MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
tx_ring           621 drivers/net/ethernet/amd/atarilance.c 	MEM->init.tx_ring.adr_hi = 0;
tx_ring           622 drivers/net/ethernet/amd/atarilance.c 	MEM->init.tx_ring.len    = TX_RING_LEN_BITS;
tx_ring           232 drivers/net/ethernet/amd/lance.c 	u32  tx_ring;
tx_ring           238 drivers/net/ethernet/amd/lance.c 	struct lance_tx_head tx_ring[TX_RING_SIZE];
tx_ring           575 drivers/net/ethernet/amd/lance.c 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
tx_ring           788 drivers/net/ethernet/amd/lance.c 		           (u32) isa_virt_to_bus(lp->tx_ring),
tx_ring           889 drivers/net/ethernet/amd/lance.c 		lp->tx_ring[i].base = 0;
tx_ring           898 drivers/net/ethernet/amd/lance.c 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
tx_ring           938 drivers/net/ethernet/amd/lance.c 			     lp->tx_ring[i].base, -lp->tx_ring[i].length,
tx_ring           939 drivers/net/ethernet/amd/lance.c 				lp->tx_ring[i].misc);
tx_ring           980 drivers/net/ethernet/amd/lance.c 			lp->tx_ring[entry].length = -ETH_ZLEN;
tx_ring           983 drivers/net/ethernet/amd/lance.c 			lp->tx_ring[entry].length = -skb->len;
tx_ring           985 drivers/net/ethernet/amd/lance.c 		lp->tx_ring[entry].length = -skb->len;
tx_ring           987 drivers/net/ethernet/amd/lance.c 	lp->tx_ring[entry].misc = 0x0000;
tx_ring           998 drivers/net/ethernet/amd/lance.c 		lp->tx_ring[entry].base =
tx_ring          1003 drivers/net/ethernet/amd/lance.c 		lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
tx_ring          1052 drivers/net/ethernet/amd/lance.c 				int status = lp->tx_ring[entry].base;
tx_ring          1057 drivers/net/ethernet/amd/lance.c 				lp->tx_ring[entry].base = 0;
tx_ring          1061 drivers/net/ethernet/amd/lance.c 					int err_status = lp->tx_ring[entry].misc;
tx_ring           241 drivers/net/ethernet/amd/pcnet32.c 	__le32	tx_ring;
tx_ring           263 drivers/net/ethernet/amd/pcnet32.c 	struct pcnet32_tx_head	*tx_ring;
tx_ring           510 drivers/net/ethernet/amd/pcnet32.c 			    lp->tx_ring, lp->tx_ring_dma_addr);
tx_ring           515 drivers/net/ethernet/amd/pcnet32.c 	lp->tx_ring = new_tx_ring;
tx_ring          1026 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[x].length = cpu_to_le16(-skb->len);
tx_ring          1027 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[x].misc = 0;
tx_ring          1052 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
tx_ring          1054 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[x].status = cpu_to_le16(status);
tx_ring          1319 drivers/net/ethernet/amd/pcnet32.c 		int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
tx_ring          1324 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[entry].base = 0;
tx_ring          1328 drivers/net/ethernet/amd/pcnet32.c 			int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
tx_ring          1913 drivers/net/ethernet/amd/pcnet32.c 	lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
tx_ring          2020 drivers/net/ethernet/amd/pcnet32.c 	lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
tx_ring          2024 drivers/net/ethernet/amd/pcnet32.c 	if (lp->tx_ring == NULL) {
tx_ring          2077 drivers/net/ethernet/amd/pcnet32.c 	if (lp->tx_ring) {
tx_ring          2080 drivers/net/ethernet/amd/pcnet32.c 				    lp->tx_ring_size, lp->tx_ring,
tx_ring          2082 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring = NULL;
tx_ring          2347 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[i].status = 0;	/* CPU owns buffer */
tx_ring          2409 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[i].status = 0;	/* CPU owns buffer */
tx_ring          2411 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[i].base = 0;
tx_ring          2420 drivers/net/ethernet/amd/pcnet32.c 	lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
tx_ring          2484 drivers/net/ethernet/amd/pcnet32.c 			       le32_to_cpu(lp->tx_ring[i].base),
tx_ring          2485 drivers/net/ethernet/amd/pcnet32.c 			       (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
tx_ring          2486 drivers/net/ethernet/amd/pcnet32.c 			       le32_to_cpu(lp->tx_ring[i].misc),
tx_ring          2487 drivers/net/ethernet/amd/pcnet32.c 			       le16_to_cpu(lp->tx_ring[i].status));
tx_ring          2526 drivers/net/ethernet/amd/pcnet32.c 	lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
tx_ring          2528 drivers/net/ethernet/amd/pcnet32.c 	lp->tx_ring[entry].misc = 0x00000000;
tx_ring          2538 drivers/net/ethernet/amd/pcnet32.c 	lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
tx_ring          2540 drivers/net/ethernet/amd/pcnet32.c 	lp->tx_ring[entry].status = cpu_to_le16(status);
tx_ring          2548 drivers/net/ethernet/amd/pcnet32.c 	if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
tx_ring           181 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		xgbe_free_ring(pdata, channel->tx_ring);
tx_ring           257 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ret = xgbe_init_ring(pdata, channel->tx_ring,
tx_ring           400 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring = channel->tx_ring;
tx_ring           526 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	struct xgbe_ring *ring = channel->tx_ring;
tx_ring           195 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (pdata->channel[i]->tx_ring)
tx_ring           212 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (!pdata->channel[i]->tx_ring)
tx_ring           302 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (!pdata->channel[i]->tx_ring)
tx_ring           686 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (channel->tx_ring) {
tx_ring          1410 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	struct xgbe_ring *ring = channel->tx_ring;
tx_ring          1669 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	struct xgbe_ring *ring = channel->tx_ring;
tx_ring          3288 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (!pdata->channel[i]->tx_ring)
tx_ring          3320 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (!pdata->channel[i]->tx_ring)
tx_ring          3410 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (!pdata->channel[i]->tx_ring)
tx_ring          3433 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (!pdata->channel[i]->tx_ring)
tx_ring           181 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		kfree(pdata->channel[i]->tx_ring);
tx_ring           230 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			channel->tx_ring = ring;
tx_ring           250 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			  channel->tx_ring, channel->rx_ring);
tx_ring           315 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (channel->tx_ring && channel->rx_ring)
tx_ring           317 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	else if (channel->tx_ring)
tx_ring           341 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (channel->tx_ring && channel->rx_ring)
tx_ring           343 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	else if (channel->tx_ring)
tx_ring           700 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (!channel->tx_ring)
tx_ring           721 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (!channel->tx_ring)
tx_ring          1193 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		ring = pdata->channel[i]->tx_ring;
tx_ring          1465 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (!channel->tx_ring)
tx_ring          2026 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	ring = channel->tx_ring;
tx_ring          2617 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_ring *ring = channel->tx_ring;
tx_ring           519 drivers/net/ethernet/amd/xgbe/xgbe.h 	struct xgbe_ring *tx_ring;
tx_ring           176 drivers/net/ethernet/apm/xgene-v2/main.c 	struct xge_desc_ring *tx_ring;
tx_ring           184 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring = pdata->tx_ring;
tx_ring           185 drivers/net/ethernet/apm/xgene-v2/main.c 	tail = tx_ring->tail;
tx_ring           187 drivers/net/ethernet/apm/xgene-v2/main.c 	raw_desc = &tx_ring->raw_desc[tail];
tx_ring           210 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring->pkt_info[tail].skb = skb;
tx_ring           211 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring->pkt_info[tail].dma_addr = dma_addr;
tx_ring           212 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
tx_ring           222 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
tx_ring           240 drivers/net/ethernet/apm/xgene-v2/main.c 	struct xge_desc_ring *tx_ring;
tx_ring           248 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring = pdata->tx_ring;
tx_ring           249 drivers/net/ethernet/apm/xgene-v2/main.c 	head = tx_ring->head;
tx_ring           256 drivers/net/ethernet/apm/xgene-v2/main.c 		raw_desc = &tx_ring->raw_desc[head];
tx_ring           263 drivers/net/ethernet/apm/xgene-v2/main.c 		skb = tx_ring->pkt_info[head].skb;
tx_ring           264 drivers/net/ethernet/apm/xgene-v2/main.c 		dma_addr = tx_ring->pkt_info[head].dma_addr;
tx_ring           265 drivers/net/ethernet/apm/xgene-v2/main.c 		pkt_buf = tx_ring->pkt_info[head].pkt_buf;
tx_ring           282 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring->head = head;
tx_ring           396 drivers/net/ethernet/apm/xgene-v2/main.c 	xge_delete_desc_ring(ndev, pdata->tx_ring);
tx_ring           448 drivers/net/ethernet/apm/xgene-v2/main.c 	pdata->tx_ring = ring;
tx_ring           555 drivers/net/ethernet/apm/xgene-v2/main.c 	struct xge_desc_ring *tx_ring;
tx_ring           562 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring = pdata->tx_ring;
tx_ring           565 drivers/net/ethernet/apm/xgene-v2/main.c 		raw_desc = &tx_ring->raw_desc[i];
tx_ring           570 drivers/net/ethernet/apm/xgene-v2/main.c 		skb = tx_ring->pkt_info[i].skb;
tx_ring           571 drivers/net/ethernet/apm/xgene-v2/main.c 		dma_addr = tx_ring->pkt_info[i].dma_addr;
tx_ring           572 drivers/net/ethernet/apm/xgene-v2/main.c 		pkt_buf = tx_ring->pkt_info[i].pkt_buf;
tx_ring           596 drivers/net/ethernet/apm/xgene-v2/main.c 	xge_setup_desc(pdata->tx_ring);
tx_ring            53 drivers/net/ethernet/apm/xgene-v2/main.h 	struct xge_desc_ring *tx_ring;
tx_ring            36 drivers/net/ethernet/apm/xgene-v2/ring.c 	struct xge_desc_ring *ring = pdata->tx_ring;
tx_ring           406 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
tx_ring           409 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct device *dev = ndev_to_dev(tx_ring->ndev);
tx_ring           410 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
tx_ring           415 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	u16 tail = tx_ring->tail;
tx_ring           424 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	raw_desc = &tx_ring->raw_desc[tail];
tx_ring           425 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tail = (tail + 1) & (tx_ring->slots - 1);
tx_ring           432 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
tx_ring           440 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		netdev_err(tx_ring->ndev, "DMA mapping error\n");
tx_ring           454 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	exp_desc = (void *)&tx_ring->raw_desc[tail];
tx_ring           455 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tail = (tail + 1) & (tx_ring->slots - 1);
tx_ring           462 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
tx_ring           501 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
tx_ring           541 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				   SET_VAL(USERINFO, tx_ring->tail));
tx_ring           542 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
tx_ring           543 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	pdata->tx_level[tx_ring->cp_ring->index] += count;
tx_ring           544 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tx_ring->tail = tail;
tx_ring           553 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *tx_ring;
tx_ring           558 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tx_ring = pdata->tx_ring[index];
tx_ring           570 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	count = xgene_enet_setup_tx_desc(tx_ring, skb);
tx_ring           581 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tx_ring->tx_packets++;
tx_ring           582 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tx_ring->tx_bytes += skb->len;
tx_ring           584 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	pdata->ring_ops->wr_cmd(tx_ring, count);
tx_ring           895 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i]->cp_ring;
tx_ring           921 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i]->cp_ring;
tx_ring           951 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i]->cp_ring;
tx_ring           968 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		napi = &pdata->tx_ring[i]->cp_ring->napi;
tx_ring           984 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		napi = &pdata->tx_ring[i]->cp_ring->napi;
tx_ring          1063 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i];
tx_ring          1069 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			pdata->tx_ring[i] = NULL;
tx_ring          1152 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i];
tx_ring          1303 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
tx_ring          1399 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
tx_ring          1402 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (!tx_ring) {
tx_ring          1407 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
tx_ring          1414 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		tx_ring->exp_bufs = exp_bufs;
tx_ring          1416 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		pdata->tx_ring[i] = tx_ring;
tx_ring          1436 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
tx_ring          1445 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
tx_ring          1453 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		tx_ring->cp_ring = cp_ring;
tx_ring          1454 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
tx_ring          1458 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		pdata->ring_ops->coalesce(pdata->tx_ring[0]);
tx_ring          1459 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
tx_ring          1477 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i];
tx_ring          1983 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		napi = &pdata->tx_ring[i]->cp_ring->napi;
tx_ring           191 drivers/net/ethernet/apm/xgene/xgene_enet_main.h 	struct xgene_enet_desc_ring *tx_ring[XGENE_NUM_TX_RING];
tx_ring            58 drivers/net/ethernet/apple/macmace.c 	unsigned char *tx_ring;
tx_ring           384 drivers/net/ethernet/apple/macmace.c 	mp->tx_ring = dma_alloc_coherent(mp->device,
tx_ring           387 drivers/net/ethernet/apple/macmace.c 	if (mp->tx_ring == NULL)
tx_ring           416 drivers/net/ethernet/apple/macmace.c 	                  mp->tx_ring, mp->tx_ring_phys);
tx_ring           464 drivers/net/ethernet/apple/macmace.c 	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
tx_ring           751 drivers/net/ethernet/apple/macmace.c 	                  mp->tx_ring, mp->tx_ring_phys);
tx_ring           299 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring tx_ring ____cacheline_aligned;
tx_ring           631 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->tx_ring;
tx_ring           822 drivers/net/ethernet/atheros/ag71xx.c 	ag->tx_ring.curr = 0;
tx_ring           823 drivers/net/ethernet/atheros/ag71xx.c 	ag->tx_ring.dirty = 0;
tx_ring           831 drivers/net/ethernet/atheros/ag71xx.c 	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
tx_ring           890 drivers/net/ethernet/atheros/ag71xx.c 	if (ag->tx_ring.desc_split) {
tx_ring           892 drivers/net/ethernet/atheros/ag71xx.c 		ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
tx_ring           957 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->tx_ring;
tx_ring           989 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->tx_ring;
tx_ring          1133 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *tx = &ag->tx_ring;
tx_ring          1163 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *tx = &ag->tx_ring;
tx_ring          1216 drivers/net/ethernet/atheros/ag71xx.c 	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
tx_ring          1336 drivers/net/ethernet/atheros/ag71xx.c 	ring = &ag->tx_ring;
tx_ring          1722 drivers/net/ethernet/atheros/ag71xx.c 		ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
tx_ring          1725 drivers/net/ethernet/atheros/ag71xx.c 	ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
tx_ring           444 drivers/net/ethernet/atheros/atl1e/atl1e.h 	struct atl1e_tx_ring tx_ring;
tx_ring           620 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	hw->tpd_thresh = adapter->tx_ring.count / 2;
tx_ring           647 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
tx_ring           652 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
tx_ring           655 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	ring_count = tx_ring->count;
tx_ring           658 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		tx_buffer = &tx_ring->tx_buffer[index];
tx_ring           671 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		tx_buffer = &tx_ring->tx_buffer[index];
tx_ring           678 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
tx_ring           680 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
tx_ring           711 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	*ring_size = ((u32)(adapter->tx_ring.count *
tx_ring           737 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	rwlock_init(&adapter->tx_ring.tx_lock);
tx_ring           745 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring = NULL;
tx_ring           750 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_ring = &adapter->tx_ring;
tx_ring           754 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_ring->next_to_use = 0;
tx_ring           755 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	atomic_set(&tx_ring->next_to_clean, 0);
tx_ring           786 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (adapter->tx_ring.tx_buffer) {
tx_ring           787 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		kfree(adapter->tx_ring.tx_buffer);
tx_ring           788 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		adapter->tx_ring.tx_buffer = NULL;
tx_ring           801 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring;
tx_ring           811 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_ring = &adapter->tx_ring;
tx_ring           828 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_ring->dma = roundup(adapter->ring_dma, 8);
tx_ring           829 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	offset = tx_ring->dma - adapter->ring_dma;
tx_ring           830 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_ring->desc = adapter->ring_vir_addr + offset;
tx_ring           831 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring           832 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
tx_ring           833 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (tx_ring->tx_buffer == NULL) {
tx_ring           839 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
tx_ring           853 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_ring->cmb_dma = adapter->ring_dma + offset;
tx_ring           854 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_ring->cmb = adapter->ring_vir_addr + offset;
tx_ring           889 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
tx_ring           896 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			(u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK));
tx_ring           897 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
tx_ring           899 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			(u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK));
tx_ring          1231 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          1234 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
tx_ring          1237 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		tx_buffer = &tx_ring->tx_buffer[next_to_clean];
tx_ring          1253 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		if (++next_to_clean == tx_ring->count)
tx_ring          1257 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	atomic_set(&tx_ring->next_to_clean, next_to_clean);
tx_ring          1554 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          1558 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	next_to_clean = atomic_read(&tx_ring->next_to_clean);
tx_ring          1559 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	next_to_use   = tx_ring->next_to_use;
tx_ring          1563 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		(tx_ring->count + next_to_clean - next_to_use - 1);
tx_ring          1573 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          1576 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	next_to_use = tx_ring->next_to_use;
tx_ring          1577 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (++tx_ring->next_to_use == tx_ring->count)
tx_ring          1578 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		tx_ring->next_to_use = 0;
tx_ring          1580 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
tx_ring          1581 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	return &tx_ring->desc[next_to_use];
tx_ring          1587 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          1589 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	return &tx_ring->tx_buffer[tpd - tx_ring->desc];
tx_ring          1705 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	int ring_start = adapter->tx_ring.next_to_use;
tx_ring          1751 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			ring_end = adapter->tx_ring.next_to_use;
tx_ring          1752 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			adapter->tx_ring.next_to_use = ring_start;
tx_ring          1753 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			while (adapter->tx_ring.next_to_use != ring_end) {
tx_ring          1760 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			adapter->tx_ring.next_to_use = ring_start;
tx_ring          1801 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				ring_end = adapter->tx_ring.next_to_use;
tx_ring          1802 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				adapter->tx_ring.next_to_use = ring_start;
tx_ring          1803 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				while (adapter->tx_ring.next_to_use != ring_end) {
tx_ring          1811 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				adapter->tx_ring.next_to_use = ring_start;
tx_ring          1838 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
tx_ring          1844 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
tx_ring           193 drivers/net/ethernet/atheros/atl1e/atl1e_param.c 			adapter->tx_ring.count = (u16) val & 0xFFFC;
tx_ring           195 drivers/net/ethernet/atheros/atl1e/atl1e_param.c 			adapter->tx_ring.count = (u16)opt.def;
tx_ring          1028 drivers/net/ethernet/broadcom/b44.c 	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
tx_ring          1029 drivers/net/ethernet/broadcom/b44.c 	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
tx_ring          1033 drivers/net/ethernet/broadcom/b44.c 			                    entry * sizeof(bp->tx_ring[0]),
tx_ring          1136 drivers/net/ethernet/broadcom/b44.c 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
tx_ring          1173 drivers/net/ethernet/broadcom/b44.c 	if (bp->tx_ring) {
tx_ring          1177 drivers/net/ethernet/broadcom/b44.c 			kfree(bp->tx_ring);
tx_ring          1180 drivers/net/ethernet/broadcom/b44.c 					  bp->tx_ring, bp->tx_ring_dma);
tx_ring          1181 drivers/net/ethernet/broadcom/b44.c 		bp->tx_ring = NULL;
tx_ring          1233 drivers/net/ethernet/broadcom/b44.c 	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
tx_ring          1235 drivers/net/ethernet/broadcom/b44.c 	if (!bp->tx_ring) {
tx_ring          1239 drivers/net/ethernet/broadcom/b44.c 		struct dma_desc *tx_ring;
tx_ring          1242 drivers/net/ethernet/broadcom/b44.c 		tx_ring = kzalloc(size, gfp);
tx_ring          1243 drivers/net/ethernet/broadcom/b44.c 		if (!tx_ring)
tx_ring          1246 drivers/net/ethernet/broadcom/b44.c 		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
tx_ring          1252 drivers/net/ethernet/broadcom/b44.c 			kfree(tx_ring);
tx_ring          1256 drivers/net/ethernet/broadcom/b44.c 		bp->tx_ring = tx_ring;
tx_ring           363 drivers/net/ethernet/broadcom/b44.h 	struct dma_desc		*rx_ring, *tx_ring;
tx_ring          2236 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *tx_ring;
tx_ring          2245 drivers/net/ethernet/broadcom/bcmsysport.c 	tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
tx_ring          2247 drivers/net/ethernet/broadcom/bcmsysport.c 	if (unlikely(!tx_ring))
tx_ring          2250 drivers/net/ethernet/broadcom/bcmsysport.c 	return tx_ring->index;
tx_ring           593 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
tx_ring           604 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
tx_ring           632 drivers/net/ethernet/broadcom/bgmac.c 		ring = &bgmac->tx_ring[i];
tx_ring           692 drivers/net/ethernet/broadcom/bgmac.c 		ring = &bgmac->tx_ring[i];
tx_ring           918 drivers/net/ethernet/broadcom/bgmac.c 			bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
tx_ring          1156 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
tx_ring          1230 drivers/net/ethernet/broadcom/bgmac.c 	ring = &bgmac->tx_ring[0];
tx_ring           502 drivers/net/ethernet/broadcom/bgmac.h 	struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
tx_ring           703 drivers/net/ethernet/broadcom/bnx2.c 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
tx_ring           755 drivers/net/ethernet/broadcom/bnx2.c 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
tx_ring          2850 drivers/net/ethernet/broadcom/bnx2.c 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
tx_ring          3394 drivers/net/ethernet/broadcom/bnx2.c 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
tx_ring          3488 drivers/net/ethernet/broadcom/bnx2.c 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
tx_ring          5140 drivers/net/ethernet/broadcom/bnx2.c 		txr = &bnapi->tx_ring;
tx_ring          5192 drivers/net/ethernet/broadcom/bnx2.c 	txr = &bnapi->tx_ring;
tx_ring          5439 drivers/net/ethernet/broadcom/bnx2.c 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
tx_ring          5825 drivers/net/ethernet/broadcom/bnx2.c 	txr = &tx_napi->tx_ring;
tx_ring          6611 drivers/net/ethernet/broadcom/bnx2.c 	txr = &bnapi->tx_ring;
tx_ring          6782 drivers/net/ethernet/broadcom/bnx2.h 	struct bnx2_tx_ring_info	tx_ring;
tx_ring           367 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
tx_ring           621 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
tx_ring          2224 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
tx_ring          2478 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!bp->tx_ring)
tx_ring          2483 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
tx_ring          2885 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!bp->tx_ring)
tx_ring          2889 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
tx_ring          2925 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
tx_ring          3126 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		txr = bnapi->tx_ring;
tx_ring          3302 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
tx_ring          3868 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		txr = bnapi->tx_ring;
tx_ring          3949 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		kfree(bp->tx_ring);
tx_ring          3950 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bp->tx_ring = NULL;
tx_ring          4010 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bp->tx_ring = kcalloc(bp->tx_nr_rings,
tx_ring          4013 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!bp->tx_ring)
tx_ring          4028 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
tx_ring          4034 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			bp->bnapi[j]->tx_ring = txr;
tx_ring          5435 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
tx_ring          5558 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
tx_ring          6272 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
tx_ring          6287 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (bnapi->rx_ring && bnapi->tx_ring) {
tx_ring          6289 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
tx_ring          8340 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (bp->tx_ring) {
tx_ring          8342 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			txr = &bp->tx_ring[i];
tx_ring          8357 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		txr = &bp->tx_ring[i];
tx_ring          9885 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
tx_ring           947 drivers/net/ethernet/broadcom/bnxt/bnxt.h 	struct bnxt_tx_ring_info	*tx_ring;
tx_ring          1558 drivers/net/ethernet/broadcom/bnxt/bnxt.h 	struct bnxt_tx_ring_info	*tx_ring;
tx_ring          2814 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
tx_ring            73 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
tx_ring           135 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 	txr = rxr->bnapi->tx_ring;
tx_ring           232 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 	txr = &bp->tx_ring[ring];
tx_ring          2634 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *tx_ring;
tx_ring          2666 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		tx_ring = &priv->tx_rings[index];
tx_ring          2668 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
tx_ring          2669 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			tx_ring->int_disable(tx_ring);
tx_ring          2670 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			__napi_schedule_irqoff(&tx_ring->napi);
tx_ring          2682 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *tx_ring;
tx_ring          2707 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		tx_ring = &priv->tx_rings[DESC_INDEX];
tx_ring          2709 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
tx_ring          2710 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			tx_ring->int_disable(tx_ring);
tx_ring          2711 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			__napi_schedule_irqoff(&tx_ring->napi);
tx_ring          3177 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *tx_ring;
tx_ring          3182 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		tx_ring = &priv->tx_rings[q];
tx_ring          3183 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		tx_bytes += tx_ring->bytes;
tx_ring          3184 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		tx_packets += tx_ring->packets;
tx_ring          3186 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	tx_ring = &priv->tx_rings[DESC_INDEX];
tx_ring          3187 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	tx_bytes += tx_ring->bytes;
tx_ring          3188 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	tx_packets += tx_ring->packets;
tx_ring          6569 drivers/net/ethernet/broadcom/tg3.c 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
tx_ring          7739 drivers/net/ethernet/broadcom/tg3.c 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
tx_ring          7750 drivers/net/ethernet/broadcom/tg3.c 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
tx_ring          7760 drivers/net/ethernet/broadcom/tg3.c 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
tx_ring          8600 drivers/net/ethernet/broadcom/tg3.c 		if (tnapi->tx_ring)
tx_ring          8601 drivers/net/ethernet/broadcom/tg3.c 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
tx_ring          8624 drivers/net/ethernet/broadcom/tg3.c 		if (tnapi->tx_ring) {
tx_ring          8626 drivers/net/ethernet/broadcom/tg3.c 				tnapi->tx_ring, tnapi->tx_desc_mapping);
tx_ring          8627 drivers/net/ethernet/broadcom/tg3.c 			tnapi->tx_ring = NULL;
tx_ring          8653 drivers/net/ethernet/broadcom/tg3.c 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
tx_ring          8657 drivers/net/ethernet/broadcom/tg3.c 		if (!tnapi->tx_ring)
tx_ring          9552 drivers/net/ethernet/broadcom/tg3.c 		if (!tnapi->tx_ring)
tx_ring          3027 drivers/net/ethernet/broadcom/tg3.h 	struct tg3_tx_buffer_desc	*tx_ring;
tx_ring          1123 drivers/net/ethernet/cadence/macb.h 	struct macb_dma_desc	*tx_ring;
tx_ring           188 drivers/net/ethernet/cadence/macb_main.c 	return &queue->tx_ring[index];
tx_ring          1949 drivers/net/ethernet/cadence/macb_main.c 		if (queue->tx_ring) {
tx_ring          1952 drivers/net/ethernet/cadence/macb_main.c 					  queue->tx_ring, queue->tx_ring_dma);
tx_ring          1953 drivers/net/ethernet/cadence/macb_main.c 			queue->tx_ring = NULL;
tx_ring          2008 drivers/net/ethernet/cadence/macb_main.c 		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
tx_ring          2011 drivers/net/ethernet/cadence/macb_main.c 		if (!queue->tx_ring)
tx_ring          2016 drivers/net/ethernet/cadence/macb_main.c 			   queue->tx_ring);
tx_ring           126 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	u64 *tx_ring;
tx_ring           272 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re.d64 = p->tx_ring[p->tx_next_clean];
tx_ring           981 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
tx_ring           983 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (!p->tx_ring)
tx_ring           986 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_map_single(p->dev, p->tx_ring,
tx_ring          1237 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	kfree(p->tx_ring);
tx_ring          1269 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	kfree(p->tx_ring);
tx_ring          1308 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_ring[p->tx_next] = re.d64;
tx_ring           307 drivers/net/ethernet/dec/tulip/de2104x.c 	struct de_desc		*tx_ring;
tx_ring           550 drivers/net/ethernet/dec/tulip/de2104x.c 		status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
tx_ring           623 drivers/net/ethernet/dec/tulip/de2104x.c 	txd = &de->tx_ring[entry];
tx_ring           757 drivers/net/ethernet/dec/tulip/de2104x.c 		dummy_txd = &de->tx_ring[entry];
tx_ring           773 drivers/net/ethernet/dec/tulip/de2104x.c 	txd = &de->tx_ring[entry];
tx_ring          1308 drivers/net/ethernet/dec/tulip/de2104x.c 	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
tx_ring          1309 drivers/net/ethernet/dec/tulip/de2104x.c 	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
tx_ring          1322 drivers/net/ethernet/dec/tulip/de2104x.c 	de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
tx_ring          1333 drivers/net/ethernet/dec/tulip/de2104x.c 	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
tx_ring          1334 drivers/net/ethernet/dec/tulip/de2104x.c 	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
tx_ring          1372 drivers/net/ethernet/dec/tulip/de2104x.c 	de->tx_ring = NULL;
tx_ring           791 drivers/net/ethernet/dec/tulip/de4x5.c     struct de4x5_desc *tx_ring;		    /* TX descriptor ring           */
tx_ring          1178 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
tx_ring          1221 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
tx_ring          1422 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->tx_ring[i].status = cpu_to_le32(0);
tx_ring          1437 drivers/net/ethernet/dec/tulip/de4x5.c 	if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
tx_ring          1679 drivers/net/ethernet/dec/tulip/de4x5.c     dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
tx_ring          1680 drivers/net/ethernet/dec/tulip/de4x5.c 		     le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
tx_ring          1699 drivers/net/ethernet/dec/tulip/de4x5.c 	status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
tx_ring          1902 drivers/net/ethernet/dec/tulip/de4x5.c     lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
tx_ring          1903 drivers/net/ethernet/dec/tulip/de4x5.c     lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
tx_ring          1904 drivers/net/ethernet/dec/tulip/de4x5.c     lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
tx_ring          1906 drivers/net/ethernet/dec/tulip/de4x5.c     lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
tx_ring          1909 drivers/net/ethernet/dec/tulip/de4x5.c     lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
tx_ring          3573 drivers/net/ethernet/dec/tulip/de4x5.c 	((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
tx_ring          3578 drivers/net/ethernet/dec/tulip/de4x5.c 	    !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
tx_ring          3665 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->tx_ring[i].status = 0;
tx_ring          3720 drivers/net/ethernet/dec/tulip/de4x5.c 	    lp->tx_ring[i].status = cpu_to_le32(0);
tx_ring          5230 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("\t0x%8.8lx  0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
tx_ring          5241 drivers/net/ethernet/dec/tulip/de4x5.c 		printk("0x%8.8lx  ", (u_long)&lp->tx_ring[i].status);
tx_ring          5244 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
tx_ring          5255 drivers/net/ethernet/dec/tulip/de4x5.c 		printk("0x%8.8x  ", le32_to_cpu(lp->tx_ring[i].buf));
tx_ring          5258 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
tx_ring           591 drivers/net/ethernet/dec/tulip/interrupt.c 				int status = le32_to_cpu(tp->tx_ring[entry].status);
tx_ring           405 drivers/net/ethernet/dec/tulip/tulip.h 	struct tulip_tx_desc *tx_ring;
tx_ring           371 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
tx_ring           372 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
tx_ring           373 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
tx_ring           597 drivers/net/ethernet/dec/tulip/tulip_core.c 		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
tx_ring           599 drivers/net/ethernet/dec/tulip/tulip_core.c 			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
tx_ring           657 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_ring[i].status = 0x00000000;
tx_ring           658 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
tx_ring           660 drivers/net/ethernet/dec/tulip/tulip_core.c 	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
tx_ring           681 drivers/net/ethernet/dec/tulip/tulip_core.c 	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
tx_ring           696 drivers/net/ethernet/dec/tulip/tulip_core.c 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
tx_ring           699 drivers/net/ethernet/dec/tulip/tulip_core.c 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
tx_ring           719 drivers/net/ethernet/dec/tulip/tulip_core.c 		int status = le32_to_cpu(tp->tx_ring[entry].status);
tx_ring           723 drivers/net/ethernet/dec/tulip/tulip_core.c 			tp->tx_ring[entry].status = 0;
tx_ring          1153 drivers/net/ethernet/dec/tulip/tulip_core.c 				tp->tx_ring[entry].length =
tx_ring          1155 drivers/net/ethernet/dec/tulip/tulip_core.c 				tp->tx_ring[entry].buffer1 = 0;
tx_ring          1170 drivers/net/ethernet/dec/tulip/tulip_core.c 			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
tx_ring          1171 drivers/net/ethernet/dec/tulip/tulip_core.c 			tp->tx_ring[entry].buffer1 =
tx_ring          1173 drivers/net/ethernet/dec/tulip/tulip_core.c 			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
tx_ring          1175 drivers/net/ethernet/dec/tulip/tulip_core.c 				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
tx_ring          1448 drivers/net/ethernet/dec/tulip/tulip_core.c 	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
tx_ring           297 drivers/net/ethernet/dec/tulip/winbond-840.c 	struct w840_tx_desc *tx_ring;
tx_ring           798 drivers/net/ethernet/dec/tulip/winbond-840.c 	np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
tx_ring           828 drivers/net/ethernet/dec/tulip/winbond-840.c 		np->tx_ring[i].status = 0;
tx_ring           939 drivers/net/ethernet/dec/tulip/winbond-840.c 		printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
tx_ring           941 drivers/net/ethernet/dec/tulip/winbond-840.c 			printk(KERN_CONT " %08x", np->tx_ring[i].status);
tx_ring          1011 drivers/net/ethernet/dec/tulip/winbond-840.c 	np->tx_ring[entry].buffer1 = np->tx_addr[entry];
tx_ring          1013 drivers/net/ethernet/dec/tulip/winbond-840.c 		np->tx_ring[entry].length = DescWholePkt | skb->len;
tx_ring          1017 drivers/net/ethernet/dec/tulip/winbond-840.c 		np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
tx_ring          1018 drivers/net/ethernet/dec/tulip/winbond-840.c 		np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
tx_ring          1021 drivers/net/ethernet/dec/tulip/winbond-840.c 		np->tx_ring[entry].length |= DescEndRing;
tx_ring          1037 drivers/net/ethernet/dec/tulip/winbond-840.c 	np->tx_ring[entry].status = DescOwned;
tx_ring          1063 drivers/net/ethernet/dec/tulip/winbond-840.c 		int tx_status = np->tx_ring[entry].status;
tx_ring          1509 drivers/net/ethernet/dec/tulip/winbond-840.c 		printk(KERN_DEBUG"  Tx ring at %p:\n", np->tx_ring);
tx_ring          1512 drivers/net/ethernet/dec/tulip/winbond-840.c 			       i, np->tx_ring[i].length,
tx_ring          1513 drivers/net/ethernet/dec/tulip/winbond-840.c 			       np->tx_ring[i].status, np->tx_ring[i].buffer1);
tx_ring           237 drivers/net/ethernet/dlink/dl2k.c 	np->tx_ring = ring_space;
tx_ring           293 drivers/net/ethernet/dlink/dl2k.c 	pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
tx_ring           458 drivers/net/ethernet/dlink/dl2k.c 			pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]),
tx_ring           476 drivers/net/ethernet/dlink/dl2k.c 		np->tx_ring[i].status = cpu_to_le64(TFDDone);
tx_ring           494 drivers/net/ethernet/dlink/dl2k.c 		np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma +
tx_ring           726 drivers/net/ethernet/dlink/dl2k.c 	txdesc = &np->tx_ring[entry];
tx_ring           836 drivers/net/ethernet/dlink/dl2k.c 		if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
tx_ring           840 drivers/net/ethernet/dlink/dl2k.c 				  desc_to_dma(&np->tx_ring[entry]),
tx_ring          1811 drivers/net/ethernet/dlink/dl2k.c 		pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
tx_ring           367 drivers/net/ethernet/dlink/dl2k.h 	struct netdev_desc *tx_ring;
tx_ring           372 drivers/net/ethernet/dlink/sundance.c 	struct netdev_desc *tx_ring;
tx_ring           562 drivers/net/ethernet/dlink/sundance.c 	np->tx_ring = (struct netdev_desc *)ring_space;
tx_ring           708 drivers/net/ethernet/dlink/sundance.c 		np->tx_ring, np->tx_ring_dma);
tx_ring           990 drivers/net/ethernet/dlink/sundance.c 				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
tx_ring           991 drivers/net/ethernet/dlink/sundance.c 				le32_to_cpu(np->tx_ring[i].next_desc),
tx_ring           992 drivers/net/ethernet/dlink/sundance.c 				le32_to_cpu(np->tx_ring[i].status),
tx_ring           993 drivers/net/ethernet/dlink/sundance.c 				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
tx_ring           994 drivers/net/ethernet/dlink/sundance.c 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
tx_ring           995 drivers/net/ethernet/dlink/sundance.c 				le32_to_cpu(np->tx_ring[i].frag[0].length));
tx_ring          1068 drivers/net/ethernet/dlink/sundance.c 		np->tx_ring[i].status = 0;
tx_ring          1078 drivers/net/ethernet/dlink/sundance.c 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
tx_ring          1083 drivers/net/ethernet/dlink/sundance.c 		txdesc = &np->tx_ring[entry];
tx_ring          1108 drivers/net/ethernet/dlink/sundance.c 	txdesc = &np->tx_ring[entry];
tx_ring          1161 drivers/net/ethernet/dlink/sundance.c 		np->tx_ring[i].next_desc = 0;
tx_ring          1166 drivers/net/ethernet/dlink/sundance.c 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
tx_ring          1275 drivers/net/ethernet/dlink/sundance.c 					np->tx_ring[entry].status) >> 2) & 0xff;
tx_ring          1277 drivers/net/ethernet/dlink/sundance.c 					!(le32_to_cpu(np->tx_ring[entry].status)
tx_ring          1286 drivers/net/ethernet/dlink/sundance.c 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
tx_ring          1290 drivers/net/ethernet/dlink/sundance.c 				np->tx_ring[entry].frag[0].addr = 0;
tx_ring          1291 drivers/net/ethernet/dlink/sundance.c 				np->tx_ring[entry].frag[0].length = 0;
tx_ring          1299 drivers/net/ethernet/dlink/sundance.c 				if (!(le32_to_cpu(np->tx_ring[entry].status)
tx_ring          1305 drivers/net/ethernet/dlink/sundance.c 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
tx_ring          1309 drivers/net/ethernet/dlink/sundance.c 				np->tx_ring[entry].frag[0].addr = 0;
tx_ring          1310 drivers/net/ethernet/dlink/sundance.c 				np->tx_ring[entry].frag[0].length = 0;
tx_ring          1886 drivers/net/ethernet/dlink/sundance.c 				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
tx_ring          1887 drivers/net/ethernet/dlink/sundance.c 				   np->tx_ring[i].frag[0].length);
tx_ring          1916 drivers/net/ethernet/dlink/sundance.c 		np->tx_ring[i].next_desc = 0;
tx_ring          1920 drivers/net/ethernet/dlink/sundance.c 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
tx_ring          1940 drivers/net/ethernet/dlink/sundance.c 		    np->tx_ring, np->tx_ring_dma);
tx_ring           378 drivers/net/ethernet/fealnx.c 	struct fealnx_desc *tx_ring;
tx_ring           573 drivers/net/ethernet/fealnx.c 	np->tx_ring = ring_space;
tx_ring           673 drivers/net/ethernet/fealnx.c 	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
tx_ring           693 drivers/net/ethernet/fealnx.c 		pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
tx_ring          1152 drivers/net/ethernet/fealnx.c 	iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
tx_ring          1211 drivers/net/ethernet/fealnx.c 		printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
tx_ring          1213 drivers/net/ethernet/fealnx.c 			printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
tx_ring          1275 drivers/net/ethernet/fealnx.c 	np->cur_tx = &np->tx_ring[0];
tx_ring          1276 drivers/net/ethernet/fealnx.c 	np->cur_tx_copy = &np->tx_ring[0];
tx_ring          1281 drivers/net/ethernet/fealnx.c 		np->tx_ring[i].status = 0;
tx_ring          1283 drivers/net/ethernet/fealnx.c 		np->tx_ring[i].next_desc = np->tx_ring_dma +
tx_ring          1285 drivers/net/ethernet/fealnx.c 		np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
tx_ring          1286 drivers/net/ethernet/fealnx.c 		np->tx_ring[i].skbuff = NULL;
tx_ring          1290 drivers/net/ethernet/fealnx.c 	np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
tx_ring          1291 drivers/net/ethernet/fealnx.c 	np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
tx_ring          1380 drivers/net/ethernet/fealnx.c 	np->cur_tx = &np->tx_ring[0];
tx_ring          1381 drivers/net/ethernet/fealnx.c 	np->cur_tx_copy = &np->tx_ring[0];
tx_ring          1386 drivers/net/ethernet/fealnx.c 		cur = &np->tx_ring[i];
tx_ring          1398 drivers/net/ethernet/fealnx.c 		cur->next_desc_logical = &np->tx_ring[i + 1];
tx_ring          1401 drivers/net/ethernet/fealnx.c 	np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
tx_ring          1402 drivers/net/ethernet/fealnx.c 	np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
tx_ring          1922 drivers/net/ethernet/fealnx.c 		struct sk_buff *skb = np->tx_ring[i].skbuff;
tx_ring          1925 drivers/net/ethernet/fealnx.c 			pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
tx_ring          1928 drivers/net/ethernet/fealnx.c 			np->tx_ring[i].skbuff = NULL;
tx_ring            16 drivers/net/ethernet/freescale/enetc/enetc.c static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
tx_ring            22 drivers/net/ethernet/freescale/enetc/enetc.c 	struct enetc_bdr *tx_ring;
tx_ring            25 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring = priv->tx_ring[skb->queue_mapping];
tx_ring            32 drivers/net/ethernet/freescale/enetc/enetc.c 	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
tx_ring            33 drivers/net/ethernet/freescale/enetc/enetc.c 		netif_stop_subqueue(ndev, tx_ring->index);
tx_ring            37 drivers/net/ethernet/freescale/enetc/enetc.c 	count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
tx_ring            41 drivers/net/ethernet/freescale/enetc/enetc.c 	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
tx_ring            42 drivers/net/ethernet/freescale/enetc/enetc.c 		netif_stop_subqueue(ndev, tx_ring->index);
tx_ring            85 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
tx_ring            89 drivers/net/ethernet/freescale/enetc/enetc.c 		dma_unmap_page(tx_ring->dev, tx_swbd->dma,
tx_ring            92 drivers/net/ethernet/freescale/enetc/enetc.c 		dma_unmap_single(tx_ring->dev, tx_swbd->dma,
tx_ring            97 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
tx_ring           101 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_unmap_tx_buff(tx_ring, tx_swbd);
tx_ring           109 drivers/net/ethernet/freescale/enetc/enetc.c static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
tx_ring           123 drivers/net/ethernet/freescale/enetc/enetc.c 	i = tx_ring->next_to_use;
tx_ring           124 drivers/net/ethernet/freescale/enetc/enetc.c 	txbd = ENETC_TXBD(*tx_ring, i);
tx_ring           127 drivers/net/ethernet/freescale/enetc/enetc.c 	dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
tx_ring           128 drivers/net/ethernet/freescale/enetc/enetc.c 	if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
tx_ring           135 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_swbd = &tx_ring->tx_swbd[i];
tx_ring           167 drivers/net/ethernet/freescale/enetc/enetc.c 		if (unlikely(i == tx_ring->bd_count)) {
tx_ring           169 drivers/net/ethernet/freescale/enetc/enetc.c 			tx_swbd = tx_ring->tx_swbd;
tx_ring           170 drivers/net/ethernet/freescale/enetc/enetc.c 			txbd = ENETC_TXBD(*tx_ring, 0);
tx_ring           192 drivers/net/ethernet/freescale/enetc/enetc.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
tx_ring           194 drivers/net/ethernet/freescale/enetc/enetc.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring           204 drivers/net/ethernet/freescale/enetc/enetc.c 		if (unlikely(i == tx_ring->bd_count)) {
tx_ring           206 drivers/net/ethernet/freescale/enetc/enetc.c 			tx_swbd = tx_ring->tx_swbd;
tx_ring           207 drivers/net/ethernet/freescale/enetc/enetc.c 			txbd = ENETC_TXBD(*tx_ring, 0);
tx_ring           225 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->tx_swbd[i].skb = skb;
tx_ring           227 drivers/net/ethernet/freescale/enetc/enetc.c 	enetc_bdr_idx_inc(tx_ring, &i);
tx_ring           228 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->next_to_use = i;
tx_ring           231 drivers/net/ethernet/freescale/enetc/enetc.c 	enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
tx_ring           236 drivers/net/ethernet/freescale/enetc/enetc.c 	dev_err(tx_ring->dev, "DMA map error");
tx_ring           239 drivers/net/ethernet/freescale/enetc/enetc.c 		tx_swbd = &tx_ring->tx_swbd[i];
tx_ring           240 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_free_tx_skb(tx_ring, tx_swbd);
tx_ring           242 drivers/net/ethernet/freescale/enetc/enetc.c 			i = tx_ring->bd_count;
tx_ring           265 drivers/net/ethernet/freescale/enetc/enetc.c static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
tx_ring           278 drivers/net/ethernet/freescale/enetc/enetc.c 		if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
tx_ring           300 drivers/net/ethernet/freescale/enetc/enetc.c static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
tx_ring           302 drivers/net/ethernet/freescale/enetc/enetc.c 	int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
tx_ring           304 drivers/net/ethernet/freescale/enetc/enetc.c 	return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
tx_ring           331 drivers/net/ethernet/freescale/enetc/enetc.c static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
tx_ring           333 drivers/net/ethernet/freescale/enetc/enetc.c 	struct net_device *ndev = tx_ring->ndev;
tx_ring           340 drivers/net/ethernet/freescale/enetc/enetc.c 	i = tx_ring->next_to_clean;
tx_ring           341 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_swbd = &tx_ring->tx_swbd[i];
tx_ring           342 drivers/net/ethernet/freescale/enetc/enetc.c 	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
tx_ring           353 drivers/net/ethernet/freescale/enetc/enetc.c 			txbd = ENETC_TXBD(*tx_ring, i);
tx_ring           364 drivers/net/ethernet/freescale/enetc/enetc.c 			enetc_unmap_tx_buff(tx_ring, tx_swbd);
tx_ring           380 drivers/net/ethernet/freescale/enetc/enetc.c 		if (unlikely(i == tx_ring->bd_count)) {
tx_ring           382 drivers/net/ethernet/freescale/enetc/enetc.c 			tx_swbd = tx_ring->tx_swbd;
tx_ring           389 drivers/net/ethernet/freescale/enetc/enetc.c 			enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
tx_ring           390 drivers/net/ethernet/freescale/enetc/enetc.c 				     BIT(16 + tx_ring->index));
tx_ring           394 drivers/net/ethernet/freescale/enetc/enetc.c 			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
tx_ring           397 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->next_to_clean = i;
tx_ring           398 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->stats.packets += tx_frm_cnt;
tx_ring           399 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->stats.bytes += tx_byte_cnt;
tx_ring           402 drivers/net/ethernet/freescale/enetc/enetc.c 		     __netif_subqueue_stopped(ndev, tx_ring->index) &&
tx_ring           403 drivers/net/ethernet/freescale/enetc/enetc.c 		     (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
tx_ring           404 drivers/net/ethernet/freescale/enetc/enetc.c 		netif_wake_subqueue(ndev, tx_ring->index);
tx_ring           808 drivers/net/ethernet/freescale/enetc/enetc.c 		err = enetc_alloc_txbdr(priv->tx_ring[i]);
tx_ring           818 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_free_txbdr(priv->tx_ring[i]);
tx_ring           828 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_free_txbdr(priv->tx_ring[i]);
tx_ring           893 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
tx_ring           897 drivers/net/ethernet/freescale/enetc/enetc.c 	if (!tx_ring->tx_swbd)
tx_ring           900 drivers/net/ethernet/freescale/enetc/enetc.c 	for (i = 0; i < tx_ring->bd_count; i++) {
tx_ring           901 drivers/net/ethernet/freescale/enetc/enetc.c 		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
tx_ring           903 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_free_tx_skb(tx_ring, tx_swbd);
tx_ring           906 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->next_to_clean = 0;
tx_ring           907 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->next_to_use = 0;
tx_ring           942 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_free_tx_ring(priv->tx_ring[i]);
tx_ring          1103 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
tx_ring          1105 drivers/net/ethernet/freescale/enetc/enetc.c 	int idx = tx_ring->index;
tx_ring          1109 drivers/net/ethernet/freescale/enetc/enetc.c 		       lower_32_bits(tx_ring->bd_dma_base));
tx_ring          1112 drivers/net/ethernet/freescale/enetc/enetc.c 		       upper_32_bits(tx_ring->bd_dma_base));
tx_ring          1114 drivers/net/ethernet/freescale/enetc/enetc.c 	WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
tx_ring          1116 drivers/net/ethernet/freescale/enetc/enetc.c 		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
tx_ring          1119 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
tx_ring          1120 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
tx_ring          1126 drivers/net/ethernet/freescale/enetc/enetc.c 	if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tx_ring          1132 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
tx_ring          1133 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
tx_ring          1134 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->idr = hw->reg + ENETC_SITXIDR;
tx_ring          1180 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
tx_ring          1194 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
tx_ring          1197 drivers/net/ethernet/freescale/enetc/enetc.c 	int idx = tx_ring->index;
tx_ring          1210 drivers/net/ethernet/freescale/enetc/enetc.c 		netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
tx_ring          1219 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
tx_ring          1253 drivers/net/ethernet/freescale/enetc/enetc.c 			int idx = v->tx_ring[j].index;
tx_ring          1435 drivers/net/ethernet/freescale/enetc/enetc.c 	struct enetc_bdr *tx_ring;
tx_ring          1451 drivers/net/ethernet/freescale/enetc/enetc.c 			tx_ring = priv->tx_ring[i];
tx_ring          1452 drivers/net/ethernet/freescale/enetc/enetc.c 			enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
tx_ring          1470 drivers/net/ethernet/freescale/enetc/enetc.c 		tx_ring = priv->tx_ring[i];
tx_ring          1471 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
tx_ring          1504 drivers/net/ethernet/freescale/enetc/enetc.c 		packets += priv->tx_ring[i]->stats.packets;
tx_ring          1505 drivers/net/ethernet/freescale/enetc/enetc.c 		bytes	+= priv->tx_ring[i]->stats.bytes;
tx_ring          1653 drivers/net/ethernet/freescale/enetc/enetc.c 			bdr = &v->tx_ring[j];
tx_ring          1658 drivers/net/ethernet/freescale/enetc/enetc.c 			priv->tx_ring[idx] = bdr;
tx_ring          1696 drivers/net/ethernet/freescale/enetc/enetc.c 		priv->tx_ring[i] = NULL;
tx_ring           162 drivers/net/ethernet/freescale/enetc/enetc.h 	struct enetc_bdr tx_ring[0];
tx_ring           191 drivers/net/ethernet/freescale/enetc/enetc.h 	struct enetc_bdr *tx_ring[16];
tx_ring           259 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 		data[o++] = priv->tx_ring[i]->stats.packets;
tx_ring           109 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		if (fep->tx_free == fep->tx_ring)
tx_ring           371 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	fep->tx_free = fep->tx_ring;
tx_ring           403 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
tx_ring           407 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
tx_ring           421 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
tx_ring           949 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	fpi->tx_ring = TX_RING_SIZE;
tx_ring           986 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		     (fpi->rx_ring + fpi->tx_ring) +
tx_ring           987 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		   sizeof(char) * fpi->tx_ring;
tx_ring          1011 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				       fpi->tx_ring);
tx_ring          1027 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	fep->tx_ring = fpi->tx_ring;
tx_ring           130 drivers/net/ethernet/freescale/fs_enet/fs_enet.h 	int rx_ring, tx_ring;
tx_ring           151 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c 					    (fpi->tx_ring + fpi->rx_ring) *
tx_ring           167 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c 			(fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
tx_ring           527 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c 	last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
tx_ring           135 drivers/net/ethernet/freescale/fs_enet/mac-fec.c 					    (fpi->tx_ring + fpi->rx_ring) *
tx_ring           150 drivers/net/ethernet/freescale/fs_enet/mac-fec.c 		dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
tx_ring           137 drivers/net/ethernet/freescale/fs_enet/mac-scc.c 	fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
tx_ring           246 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	struct hix5hd2_sg_desc_ring tx_ring;
tx_ring           574 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	desc = priv->tx_ring.desc + pos;
tx_ring           710 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	desc = priv->tx_ring.desc + pos;
tx_ring           765 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		addr = priv->tx_ring.phys_addr + pos * sizeof(struct sg_desc);
tx_ring          1033 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	priv->tx_ring.desc = desc;
tx_ring          1034 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	priv->tx_ring.phys_addr = phys_addr;
tx_ring          1041 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	if (priv->tx_ring.desc) {
tx_ring          1044 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 				  priv->tx_ring.desc, priv->tx_ring.phys_addr);
tx_ring          1045 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		priv->tx_ring.desc = NULL;
tx_ring           245 drivers/net/ethernet/hisilicon/hns/hnae.c 	ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
tx_ring           259 drivers/net/ethernet/hisilicon/hns/hnae.c 	hnae_fini_ring(&q->tx_ring);
tx_ring           269 drivers/net/ethernet/hisilicon/hns/hnae.c 	hnae_fini_ring(&q->tx_ring);
tx_ring           357 drivers/net/ethernet/hisilicon/hns/hnae.h 	struct hnae_ring tx_ring ____cacheline_internodealigned_in_smp;
tx_ring           586 drivers/net/ethernet/hisilicon/hns/hnae.h 	(q)->tx_ring.io_base + RCB_REG_TAIL)
tx_ring           117 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
tx_ring           666 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		tx_bytes += queue->tx_ring.stats.tx_bytes;
tx_ring           667 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		tx_packets += queue->tx_ring.stats.tx_pkts;
tx_ring            70 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
tx_ring            73 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
tx_ring           250 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		(ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
tx_ring           456 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		ring = &q->tx_ring;
tx_ring           863 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[4] = queue->tx_ring.stats.tx_pkts;
tx_ring           864 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[5] = queue->tx_ring.stats.tx_bytes;
tx_ring           865 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
tx_ring           866 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
tx_ring           867 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
tx_ring           868 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
tx_ring           869 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[10] = queue->tx_ring.stats.restart_queue;
tx_ring           870 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[11] = queue->tx_ring.stats.tx_busy;
tx_ring          1906 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
tx_ring          1907 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
tx_ring          2007 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			i, h->qs[i]->tx_ring.next_to_clean);
tx_ring          2009 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			i, h->qs[i]->tx_ring.next_to_use);
tx_ring          2122 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring = &h->qs[i]->tx_ring;
tx_ring           681 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	param->tx_pending = queue->tx_ring.desc_num;
tx_ring           975 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
tx_ring           978 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hnae3_handle *handle = tx_ring->tqp->handle;
tx_ring          1680 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_enet_ring *tx_ring = NULL;
tx_ring          1717 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	tx_ring = priv->ring_data[timeout_queue].ring;
tx_ring          1718 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	napi = &tx_ring->tqp_vector->napi;
tx_ring          1722 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
tx_ring          1723 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    tx_ring->next_to_clean, napi->state);
tx_ring          1727 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
tx_ring          1728 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
tx_ring          1732 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
tx_ring          1733 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
tx_ring          1746 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hw_head = readl_relaxed(tx_ring->tqp->io_base +
tx_ring          1748 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hw_tail = readl_relaxed(tx_ring->tqp->io_base +
tx_ring          1750 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	fbd_num = readl_relaxed(tx_ring->tqp->io_base +
tx_ring          1752 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
tx_ring          1754 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ebd_num = readl_relaxed(tx_ring->tqp->io_base +
tx_ring          1756 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
tx_ring          1758 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	bd_num = readl_relaxed(tx_ring->tqp->io_base +
tx_ring          1760 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	bd_err = readl_relaxed(tx_ring->tqp->io_base +
tx_ring          1762 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
tx_ring          1763 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
tx_ring          1768 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    readl(tx_ring->tqp_vector->mask_addr));
tx_ring          3195 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_enet_ring *tx_ring;
tx_ring          3198 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	tx_ring = tqp_vector->tx_group.ring;
tx_ring          3199 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (tx_ring) {
tx_ring          3200 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		cur_chain->tqp_index = tx_ring->tqp->tqp_index;
tx_ring          3208 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		while (tx_ring->next) {
tx_ring          3209 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_ring = tx_ring->next;
tx_ring          3217 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			chain->tqp_index = tx_ring->tqp->tqp_index;
tx_ring          3230 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!tx_ring && rx_ring) {
tx_ring           229 drivers/net/ethernet/intel/e1000/e1000.h 	struct e1000_tx_ring *tx_ring;      /* One per active queue */
tx_ring           549 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	struct e1000_tx_ring *txdr = adapter->tx_ring;
tx_ring           579 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	tx_old = adapter->tx_ring;
tx_ring           593 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	adapter->tx_ring = txdr;
tx_ring           625 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		adapter->tx_ring = tx_old;
tx_ring           629 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		adapter->tx_ring = txdr;
tx_ring           642 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	adapter->tx_ring = tx_old;
tx_ring            81 drivers/net/ethernet/intel/e1000/e1000_main.c 				    struct e1000_tx_ring *tx_ring);
tx_ring           100 drivers/net/ethernet/intel/e1000/e1000_main.c 				struct e1000_tx_ring *tx_ring);
tx_ring           113 drivers/net/ethernet/intel/e1000/e1000_main.c 			       struct e1000_tx_ring *tx_ring);
tx_ring          1230 drivers/net/ethernet/intel/e1000/e1000_main.c 	kfree(adapter->tx_ring);
tx_ring          1271 drivers/net/ethernet/intel/e1000/e1000_main.c 	kfree(adapter->tx_ring);
tx_ring          1326 drivers/net/ethernet/intel/e1000/e1000_main.c 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
tx_ring          1328 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (!adapter->tx_ring)
tx_ring          1334 drivers/net/ethernet/intel/e1000/e1000_main.c 		kfree(adapter->tx_ring);
tx_ring          1568 drivers/net/ethernet/intel/e1000/e1000_main.c 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
tx_ring          1573 drivers/net/ethernet/intel/e1000/e1000_main.c 							&adapter->tx_ring[i]);
tx_ring          1599 drivers/net/ethernet/intel/e1000/e1000_main.c 		tdba = adapter->tx_ring[0].dma;
tx_ring          1600 drivers/net/ethernet/intel/e1000/e1000_main.c 		tdlen = adapter->tx_ring[0].count *
tx_ring          1607 drivers/net/ethernet/intel/e1000/e1000_main.c 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
tx_ring          1609 drivers/net/ethernet/intel/e1000/e1000_main.c 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
tx_ring          1919 drivers/net/ethernet/intel/e1000/e1000_main.c 				    struct e1000_tx_ring *tx_ring)
tx_ring          1923 drivers/net/ethernet/intel/e1000/e1000_main.c 	e1000_clean_tx_ring(adapter, tx_ring);
tx_ring          1925 drivers/net/ethernet/intel/e1000/e1000_main.c 	vfree(tx_ring->buffer_info);
tx_ring          1926 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->buffer_info = NULL;
tx_ring          1928 drivers/net/ethernet/intel/e1000/e1000_main.c 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
tx_ring          1929 drivers/net/ethernet/intel/e1000/e1000_main.c 			  tx_ring->dma);
tx_ring          1931 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->desc = NULL;
tx_ring          1945 drivers/net/ethernet/intel/e1000/e1000_main.c 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
tx_ring          1976 drivers/net/ethernet/intel/e1000/e1000_main.c 				struct e1000_tx_ring *tx_ring)
tx_ring          1985 drivers/net/ethernet/intel/e1000/e1000_main.c 	for (i = 0; i < tx_ring->count; i++) {
tx_ring          1986 drivers/net/ethernet/intel/e1000/e1000_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          1991 drivers/net/ethernet/intel/e1000/e1000_main.c 	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
tx_ring          1992 drivers/net/ethernet/intel/e1000/e1000_main.c 	memset(tx_ring->buffer_info, 0, size);
tx_ring          1996 drivers/net/ethernet/intel/e1000/e1000_main.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring          1998 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->next_to_use = 0;
tx_ring          1999 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->next_to_clean = 0;
tx_ring          2000 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->last_tx_tso = false;
tx_ring          2002 drivers/net/ethernet/intel/e1000/e1000_main.c 	writel(0, hw->hw_addr + tx_ring->tdh);
tx_ring          2003 drivers/net/ethernet/intel/e1000/e1000_main.c 	writel(0, hw->hw_addr + tx_ring->tdt);
tx_ring          2015 drivers/net/ethernet/intel/e1000/e1000_main.c 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
tx_ring          2426 drivers/net/ethernet/intel/e1000/e1000_main.c 	struct e1000_tx_ring *txdr = adapter->tx_ring;
tx_ring          2688 drivers/net/ethernet/intel/e1000/e1000_main.c 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
tx_ring          2734 drivers/net/ethernet/intel/e1000/e1000_main.c 		i = tx_ring->next_to_use;
tx_ring          2735 drivers/net/ethernet/intel/e1000/e1000_main.c 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
tx_ring          2736 drivers/net/ethernet/intel/e1000/e1000_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          2751 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (++i == tx_ring->count)
tx_ring          2754 drivers/net/ethernet/intel/e1000/e1000_main.c 		tx_ring->next_to_use = i;
tx_ring          2762 drivers/net/ethernet/intel/e1000/e1000_main.c 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
tx_ring          2793 drivers/net/ethernet/intel/e1000/e1000_main.c 	i = tx_ring->next_to_use;
tx_ring          2794 drivers/net/ethernet/intel/e1000/e1000_main.c 	buffer_info = &tx_ring->buffer_info[i];
tx_ring          2795 drivers/net/ethernet/intel/e1000/e1000_main.c 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
tx_ring          2808 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (unlikely(++i == tx_ring->count))
tx_ring          2811 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->next_to_use = i;
tx_ring          2820 drivers/net/ethernet/intel/e1000/e1000_main.c 			struct e1000_tx_ring *tx_ring,
tx_ring          2832 drivers/net/ethernet/intel/e1000/e1000_main.c 	i = tx_ring->next_to_use;
tx_ring          2835 drivers/net/ethernet/intel/e1000/e1000_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          2842 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (!skb->data_len && tx_ring->last_tx_tso &&
tx_ring          2844 drivers/net/ethernet/intel/e1000/e1000_main.c 			tx_ring->last_tx_tso = false;
tx_ring          2886 drivers/net/ethernet/intel/e1000/e1000_main.c 			if (unlikely(i == tx_ring->count))
tx_ring          2900 drivers/net/ethernet/intel/e1000/e1000_main.c 			if (unlikely(i == tx_ring->count))
tx_ring          2903 drivers/net/ethernet/intel/e1000/e1000_main.c 			buffer_info = &tx_ring->buffer_info[i];
tx_ring          2942 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->buffer_info[i].skb = skb;
tx_ring          2943 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->buffer_info[i].segs = segs;
tx_ring          2944 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->buffer_info[i].bytecount = bytecount;
tx_ring          2945 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->buffer_info[first].next_to_watch = i;
tx_ring          2957 drivers/net/ethernet/intel/e1000/e1000_main.c 			i += tx_ring->count;
tx_ring          2959 drivers/net/ethernet/intel/e1000/e1000_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          2967 drivers/net/ethernet/intel/e1000/e1000_main.c 			   struct e1000_tx_ring *tx_ring, int tx_flags,
tx_ring          2997 drivers/net/ethernet/intel/e1000/e1000_main.c 	i = tx_ring->next_to_use;
tx_ring          3000 drivers/net/ethernet/intel/e1000/e1000_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          3001 drivers/net/ethernet/intel/e1000/e1000_main.c 		tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_ring          3006 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (unlikely(++i == tx_ring->count))
tx_ring          3023 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->next_to_use = i;
tx_ring          3066 drivers/net/ethernet/intel/e1000/e1000_main.c 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
tx_ring          3078 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
tx_ring          3088 drivers/net/ethernet/intel/e1000/e1000_main.c 			       struct e1000_tx_ring *tx_ring, int size)
tx_ring          3090 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
tx_ring          3101 drivers/net/ethernet/intel/e1000/e1000_main.c 	struct e1000_tx_ring *tx_ring;
tx_ring          3118 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring = adapter->tx_ring;
tx_ring          3178 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
tx_ring          3203 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
tx_ring          3220 drivers/net/ethernet/intel/e1000/e1000_main.c 	first = tx_ring->next_to_use;
tx_ring          3222 drivers/net/ethernet/intel/e1000/e1000_main.c 	tso = e1000_tso(adapter, tx_ring, skb, protocol);
tx_ring          3230 drivers/net/ethernet/intel/e1000/e1000_main.c 			tx_ring->last_tx_tso = true;
tx_ring          3232 drivers/net/ethernet/intel/e1000/e1000_main.c 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
tx_ring          3241 drivers/net/ethernet/intel/e1000/e1000_main.c 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
tx_ring          3257 drivers/net/ethernet/intel/e1000/e1000_main.c 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
tx_ring          3267 drivers/net/ethernet/intel/e1000/e1000_main.c 		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
tx_ring          3271 drivers/net/ethernet/intel/e1000/e1000_main.c 			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
tx_ring          3275 drivers/net/ethernet/intel/e1000/e1000_main.c 		tx_ring->buffer_info[first].time_stamp = 0;
tx_ring          3276 drivers/net/ethernet/intel/e1000/e1000_main.c 		tx_ring->next_to_use = first;
tx_ring          3355 drivers/net/ethernet/intel/e1000/e1000_main.c 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
tx_ring          3401 drivers/net/ethernet/intel/e1000/e1000_main.c 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_ring          3402 drivers/net/ethernet/intel/e1000/e1000_main.c 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_ring          3403 drivers/net/ethernet/intel/e1000/e1000_main.c 		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
tx_ring          3408 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
tx_ring          3410 drivers/net/ethernet/intel/e1000/e1000_main.c 		else if (i == tx_ring->next_to_use)
tx_ring          3412 drivers/net/ethernet/intel/e1000/e1000_main.c 		else if (i == tx_ring->next_to_clean)
tx_ring          3796 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
tx_ring          3821 drivers/net/ethernet/intel/e1000/e1000_main.c 			       struct e1000_tx_ring *tx_ring)
tx_ring          3832 drivers/net/ethernet/intel/e1000/e1000_main.c 	i = tx_ring->next_to_clean;
tx_ring          3833 drivers/net/ethernet/intel/e1000/e1000_main.c 	eop = tx_ring->buffer_info[i].next_to_watch;
tx_ring          3834 drivers/net/ethernet/intel/e1000/e1000_main.c 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
tx_ring          3837 drivers/net/ethernet/intel/e1000/e1000_main.c 	       (count < tx_ring->count)) {
tx_ring          3841 drivers/net/ethernet/intel/e1000/e1000_main.c 			tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_ring          3842 drivers/net/ethernet/intel/e1000/e1000_main.c 			buffer_info = &tx_ring->buffer_info[i];
tx_ring          3857 drivers/net/ethernet/intel/e1000/e1000_main.c 			if (unlikely(++i == tx_ring->count))
tx_ring          3861 drivers/net/ethernet/intel/e1000/e1000_main.c 		eop = tx_ring->buffer_info[i].next_to_watch;
tx_ring          3862 drivers/net/ethernet/intel/e1000/e1000_main.c 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
tx_ring          3868 drivers/net/ethernet/intel/e1000/e1000_main.c 	smp_store_release(&tx_ring->next_to_clean, i);
tx_ring          3874 drivers/net/ethernet/intel/e1000/e1000_main.c 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
tx_ring          3892 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (tx_ring->buffer_info[eop].time_stamp &&
tx_ring          3893 drivers/net/ethernet/intel/e1000/e1000_main.c 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
tx_ring          3909 drivers/net/ethernet/intel/e1000/e1000_main.c 				(unsigned long)(tx_ring - adapter->tx_ring),
tx_ring          3910 drivers/net/ethernet/intel/e1000/e1000_main.c 				readl(hw->hw_addr + tx_ring->tdh),
tx_ring          3911 drivers/net/ethernet/intel/e1000/e1000_main.c 				readl(hw->hw_addr + tx_ring->tdt),
tx_ring          3912 drivers/net/ethernet/intel/e1000/e1000_main.c 				tx_ring->next_to_use,
tx_ring          3913 drivers/net/ethernet/intel/e1000/e1000_main.c 				tx_ring->next_to_clean,
tx_ring          3914 drivers/net/ethernet/intel/e1000/e1000_main.c 				tx_ring->buffer_info[eop].time_stamp,
tx_ring          3926 drivers/net/ethernet/intel/e1000/e1000_main.c 	return count < tx_ring->count;
tx_ring           256 drivers/net/ethernet/intel/e1000/e1000_param.c 		struct e1000_tx_ring *tx_ring = adapter->tx_ring;
tx_ring           273 drivers/net/ethernet/intel/e1000/e1000_param.c 			tx_ring->count = TxDescriptors[bd];
tx_ring           274 drivers/net/ethernet/intel/e1000/e1000_param.c 			e1000_validate_option(&tx_ring->count, &opt, adapter);
tx_ring           275 drivers/net/ethernet/intel/e1000/e1000_param.c 			tx_ring->count = ALIGN(tx_ring->count,
tx_ring           278 drivers/net/ethernet/intel/e1000/e1000_param.c 			tx_ring->count = opt.def;
tx_ring           281 drivers/net/ethernet/intel/e1000/e1000_param.c 			tx_ring[i].count = tx_ring->count;
tx_ring           215 drivers/net/ethernet/intel/e1000e/e1000.h 	struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
tx_ring           693 drivers/net/ethernet/intel/e1000e/ethtool.c 		adapter->tx_ring->count = new_tx_count;
tx_ring           728 drivers/net/ethernet/intel/e1000e/ethtool.c 		memcpy(temp_tx, adapter->tx_ring, size);
tx_ring           744 drivers/net/ethernet/intel/e1000e/ethtool.c 		e1000e_free_tx_resources(adapter->tx_ring);
tx_ring           745 drivers/net/ethernet/intel/e1000e/ethtool.c 		memcpy(adapter->tx_ring, temp_tx, size);
tx_ring          1114 drivers/net/ethernet/intel/e1000e/ethtool.c 	struct e1000_ring *tx_ring = &adapter->test_tx_ring;
tx_ring          1120 drivers/net/ethernet/intel/e1000e/ethtool.c 	if (tx_ring->desc && tx_ring->buffer_info) {
tx_ring          1121 drivers/net/ethernet/intel/e1000e/ethtool.c 		for (i = 0; i < tx_ring->count; i++) {
tx_ring          1122 drivers/net/ethernet/intel/e1000e/ethtool.c 			buffer_info = &tx_ring->buffer_info[i];
tx_ring          1145 drivers/net/ethernet/intel/e1000e/ethtool.c 	if (tx_ring->desc) {
tx_ring          1146 drivers/net/ethernet/intel/e1000e/ethtool.c 		dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
tx_ring          1147 drivers/net/ethernet/intel/e1000e/ethtool.c 				  tx_ring->dma);
tx_ring          1148 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_ring->desc = NULL;
tx_ring          1156 drivers/net/ethernet/intel/e1000e/ethtool.c 	kfree(tx_ring->buffer_info);
tx_ring          1157 drivers/net/ethernet/intel/e1000e/ethtool.c 	tx_ring->buffer_info = NULL;
tx_ring          1164 drivers/net/ethernet/intel/e1000e/ethtool.c 	struct e1000_ring *tx_ring = &adapter->test_tx_ring;
tx_ring          1174 drivers/net/ethernet/intel/e1000e/ethtool.c 	if (!tx_ring->count)
tx_ring          1175 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_ring->count = E1000_DEFAULT_TXD;
tx_ring          1177 drivers/net/ethernet/intel/e1000e/ethtool.c 	tx_ring->buffer_info = kcalloc(tx_ring->count,
tx_ring          1179 drivers/net/ethernet/intel/e1000e/ethtool.c 	if (!tx_ring->buffer_info) {
tx_ring          1184 drivers/net/ethernet/intel/e1000e/ethtool.c 	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
tx_ring          1185 drivers/net/ethernet/intel/e1000e/ethtool.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring          1186 drivers/net/ethernet/intel/e1000e/ethtool.c 	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
tx_ring          1187 drivers/net/ethernet/intel/e1000e/ethtool.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring          1188 drivers/net/ethernet/intel/e1000e/ethtool.c 	if (!tx_ring->desc) {
tx_ring          1192 drivers/net/ethernet/intel/e1000e/ethtool.c 	tx_ring->next_to_use = 0;
tx_ring          1193 drivers/net/ethernet/intel/e1000e/ethtool.c 	tx_ring->next_to_clean = 0;
tx_ring          1195 drivers/net/ethernet/intel/e1000e/ethtool.c 	ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
tx_ring          1196 drivers/net/ethernet/intel/e1000e/ethtool.c 	ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
tx_ring          1197 drivers/net/ethernet/intel/e1000e/ethtool.c 	ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
tx_ring          1204 drivers/net/ethernet/intel/e1000e/ethtool.c 	for (i = 0; i < tx_ring->count; i++) {
tx_ring          1205 drivers/net/ethernet/intel/e1000e/ethtool.c 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_ring          1215 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_ring->buffer_info[i].skb = skb;
tx_ring          1216 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_ring->buffer_info[i].length = skb->len;
tx_ring          1217 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_ring->buffer_info[i].dma =
tx_ring          1221 drivers/net/ethernet/intel/e1000e/ethtool.c 				      tx_ring->buffer_info[i].dma)) {
tx_ring          1225 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
tx_ring          1627 drivers/net/ethernet/intel/e1000e/ethtool.c 	struct e1000_ring *tx_ring = &adapter->test_tx_ring;
tx_ring          1645 drivers/net/ethernet/intel/e1000e/ethtool.c 	if (rx_ring->count <= tx_ring->count)
tx_ring          1646 drivers/net/ethernet/intel/e1000e/ethtool.c 		lc = ((tx_ring->count / 64) * 2) + 1;
tx_ring          1656 drivers/net/ethernet/intel/e1000e/ethtool.c 			buffer_info = &tx_ring->buffer_info[k];
tx_ring          1664 drivers/net/ethernet/intel/e1000e/ethtool.c 			if (k == tx_ring->count)
tx_ring           201 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_ring *tx_ring = adapter->tx_ring;
tx_ring           245 drivers/net/ethernet/intel/e1000e/netdev.c 	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
tx_ring           247 drivers/net/ethernet/intel/e1000e/netdev.c 		0, tx_ring->next_to_use, tx_ring->next_to_clean,
tx_ring           289 drivers/net/ethernet/intel/e1000e/netdev.c 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_ring           291 drivers/net/ethernet/intel/e1000e/netdev.c 		tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_ring           292 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring           294 drivers/net/ethernet/intel/e1000e/netdev.c 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
tx_ring           296 drivers/net/ethernet/intel/e1000e/netdev.c 		else if (i == tx_ring->next_to_use)
tx_ring           298 drivers/net/ethernet/intel/e1000e/netdev.c 		else if (i == tx_ring->next_to_clean)
tx_ring           623 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
tx_ring           625 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring           629 drivers/net/ethernet/intel/e1000e/netdev.c 	writel(i, tx_ring->tail);
tx_ring           631 drivers/net/ethernet/intel/e1000e/netdev.c 	if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
tx_ring          1055 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_put_txbuf(struct e1000_ring *tx_ring,
tx_ring          1059 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          1086 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_ring *tx_ring = adapter->tx_ring;
tx_ring          1087 drivers/net/ethernet/intel/e1000e/netdev.c 	unsigned int i = tx_ring->next_to_clean;
tx_ring          1088 drivers/net/ethernet/intel/e1000e/netdev.c 	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
tx_ring          1089 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
tx_ring          1145 drivers/net/ethernet/intel/e1000e/netdev.c 	      readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
tx_ring          1146 drivers/net/ethernet/intel/e1000e/netdev.c 	      tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
tx_ring          1208 drivers/net/ethernet/intel/e1000e/netdev.c static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
tx_ring          1210 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          1220 drivers/net/ethernet/intel/e1000e/netdev.c 	i = tx_ring->next_to_clean;
tx_ring          1221 drivers/net/ethernet/intel/e1000e/netdev.c 	eop = tx_ring->buffer_info[i].next_to_watch;
tx_ring          1222 drivers/net/ethernet/intel/e1000e/netdev.c 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
tx_ring          1225 drivers/net/ethernet/intel/e1000e/netdev.c 	       (count < tx_ring->count)) {
tx_ring          1230 drivers/net/ethernet/intel/e1000e/netdev.c 			tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_ring          1231 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info = &tx_ring->buffer_info[i];
tx_ring          1243 drivers/net/ethernet/intel/e1000e/netdev.c 			e1000_put_txbuf(tx_ring, buffer_info, false);
tx_ring          1247 drivers/net/ethernet/intel/e1000e/netdev.c 			if (i == tx_ring->count)
tx_ring          1251 drivers/net/ethernet/intel/e1000e/netdev.c 		if (i == tx_ring->next_to_use)
tx_ring          1253 drivers/net/ethernet/intel/e1000e/netdev.c 		eop = tx_ring->buffer_info[i].next_to_watch;
tx_ring          1254 drivers/net/ethernet/intel/e1000e/netdev.c 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
tx_ring          1257 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_clean = i;
tx_ring          1263 drivers/net/ethernet/intel/e1000e/netdev.c 	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
tx_ring          1281 drivers/net/ethernet/intel/e1000e/netdev.c 		if (tx_ring->buffer_info[i].time_stamp &&
tx_ring          1282 drivers/net/ethernet/intel/e1000e/netdev.c 		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
tx_ring          1291 drivers/net/ethernet/intel/e1000e/netdev.c 	return count < tx_ring->count;
tx_ring          1922 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_ring *tx_ring = adapter->tx_ring;
tx_ring          1927 drivers/net/ethernet/intel/e1000e/netdev.c 	if (!e1000_clean_tx_irq(tx_ring))
tx_ring          1929 drivers/net/ethernet/intel/e1000e/netdev.c 		ew32(ICS, tx_ring->ims_val);
tx_ring          1932 drivers/net/ethernet/intel/e1000e/netdev.c 		ew32(IMS, adapter->tx_ring->ims_val);
tx_ring          1972 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_ring *tx_ring = adapter->tx_ring;
tx_ring          1997 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->ims_val = E1000_IMS_TXQ0;
tx_ring          1999 drivers/net/ethernet/intel/e1000e/netdev.c 	if (tx_ring->itr_val)
tx_ring          2000 drivers/net/ethernet/intel/e1000e/netdev.c 		writel(1000000000 / (tx_ring->itr_val * 256),
tx_ring          2001 drivers/net/ethernet/intel/e1000e/netdev.c 		       tx_ring->itr_register);
tx_ring          2003 drivers/net/ethernet/intel/e1000e/netdev.c 		writel(1, tx_ring->itr_register);
tx_ring          2004 drivers/net/ethernet/intel/e1000e/netdev.c 	adapter->eiac_mask |= tx_ring->ims_val;
tx_ring          2123 drivers/net/ethernet/intel/e1000e/netdev.c 		snprintf(adapter->tx_ring->name,
tx_ring          2124 drivers/net/ethernet/intel/e1000e/netdev.c 			 sizeof(adapter->tx_ring->name) - 1,
tx_ring          2127 drivers/net/ethernet/intel/e1000e/netdev.c 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
tx_ring          2129 drivers/net/ethernet/intel/e1000e/netdev.c 			  e1000_intr_msix_tx, 0, adapter->tx_ring->name,
tx_ring          2133 drivers/net/ethernet/intel/e1000e/netdev.c 	adapter->tx_ring->itr_register = adapter->hw.hw_addr +
tx_ring          2135 drivers/net/ethernet/intel/e1000e/netdev.c 	adapter->tx_ring->itr_val = adapter->itr;
tx_ring          2322 drivers/net/ethernet/intel/e1000e/netdev.c int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
tx_ring          2324 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          2327 drivers/net/ethernet/intel/e1000e/netdev.c 	size = sizeof(struct e1000_buffer) * tx_ring->count;
tx_ring          2328 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->buffer_info = vzalloc(size);
tx_ring          2329 drivers/net/ethernet/intel/e1000e/netdev.c 	if (!tx_ring->buffer_info)
tx_ring          2333 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
tx_ring          2334 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring          2336 drivers/net/ethernet/intel/e1000e/netdev.c 	err = e1000_alloc_ring_dma(adapter, tx_ring);
tx_ring          2340 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_use = 0;
tx_ring          2341 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_clean = 0;
tx_ring          2345 drivers/net/ethernet/intel/e1000e/netdev.c 	vfree(tx_ring->buffer_info);
tx_ring          2407 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
tx_ring          2409 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          2414 drivers/net/ethernet/intel/e1000e/netdev.c 	for (i = 0; i < tx_ring->count; i++) {
tx_ring          2415 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          2416 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_put_txbuf(tx_ring, buffer_info, false);
tx_ring          2420 drivers/net/ethernet/intel/e1000e/netdev.c 	size = sizeof(struct e1000_buffer) * tx_ring->count;
tx_ring          2421 drivers/net/ethernet/intel/e1000e/netdev.c 	memset(tx_ring->buffer_info, 0, size);
tx_ring          2423 drivers/net/ethernet/intel/e1000e/netdev.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring          2425 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_use = 0;
tx_ring          2426 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_clean = 0;
tx_ring          2435 drivers/net/ethernet/intel/e1000e/netdev.c void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
tx_ring          2437 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          2440 drivers/net/ethernet/intel/e1000e/netdev.c 	e1000_clean_tx_ring(tx_ring);
tx_ring          2442 drivers/net/ethernet/intel/e1000e/netdev.c 	vfree(tx_ring->buffer_info);
tx_ring          2443 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->buffer_info = NULL;
tx_ring          2445 drivers/net/ethernet/intel/e1000e/netdev.c 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
tx_ring          2446 drivers/net/ethernet/intel/e1000e/netdev.c 			  tx_ring->dma);
tx_ring          2447 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->desc = NULL;
tx_ring          2631 drivers/net/ethernet/intel/e1000e/netdev.c 	adapter->tx_ring = kzalloc(size, GFP_KERNEL);
tx_ring          2632 drivers/net/ethernet/intel/e1000e/netdev.c 	if (!adapter->tx_ring)
tx_ring          2634 drivers/net/ethernet/intel/e1000e/netdev.c 	adapter->tx_ring->count = adapter->tx_ring_count;
tx_ring          2635 drivers/net/ethernet/intel/e1000e/netdev.c 	adapter->tx_ring->adapter = adapter;
tx_ring          2647 drivers/net/ethernet/intel/e1000e/netdev.c 	kfree(adapter->tx_ring);
tx_ring          2667 drivers/net/ethernet/intel/e1000e/netdev.c 	    (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
tx_ring          2668 drivers/net/ethernet/intel/e1000e/netdev.c 		tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
tx_ring          2914 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_ring *tx_ring = adapter->tx_ring;
tx_ring          2919 drivers/net/ethernet/intel/e1000e/netdev.c 	tdba = tx_ring->dma;
tx_ring          2920 drivers/net/ethernet/intel/e1000e/netdev.c 	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
tx_ring          2926 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
tx_ring          2927 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
tx_ring          2929 drivers/net/ethernet/intel/e1000e/netdev.c 	writel(0, tx_ring->head);
tx_ring          2931 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000e_update_tdt_wa(tx_ring, 0);
tx_ring          2933 drivers/net/ethernet/intel/e1000e/netdev.c 		writel(0, tx_ring->tail);
tx_ring          3799 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_ring *tx_ring = adapter->tx_ring;
tx_ring          3807 drivers/net/ethernet/intel/e1000e/netdev.c 	BUG_ON(tdt != tx_ring->next_to_use);
tx_ring          3808 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_desc =  E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
tx_ring          3809 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_desc->buffer_addr = tx_ring->dma;
tx_ring          3815 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_use++;
tx_ring          3816 drivers/net/ethernet/intel/e1000e/netdev.c 	if (tx_ring->next_to_use == tx_ring->count)
tx_ring          3817 drivers/net/ethernet/intel/e1000e/netdev.c 		tx_ring->next_to_use = 0;
tx_ring          3818 drivers/net/ethernet/intel/e1000e/netdev.c 	ew32(TDT(0), tx_ring->next_to_use);
tx_ring          4305 drivers/net/ethernet/intel/e1000e/netdev.c 	e1000_clean_tx_ring(adapter->tx_ring);
tx_ring          4612 drivers/net/ethernet/intel/e1000e/netdev.c 	err = e1000e_setup_tx_resources(adapter->tx_ring);
tx_ring          4684 drivers/net/ethernet/intel/e1000e/netdev.c 	e1000e_free_tx_resources(adapter->tx_ring);
tx_ring          4726 drivers/net/ethernet/intel/e1000e/netdev.c 	e1000e_free_tx_resources(adapter->tx_ring);
tx_ring          5175 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_ring *tx_ring = adapter->tx_ring;
tx_ring          5358 drivers/net/ethernet/intel/e1000e/netdev.c 	    (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
tx_ring          5432 drivers/net/ethernet/intel/e1000e/netdev.c static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
tx_ring          5475 drivers/net/ethernet/intel/e1000e/netdev.c 	i = tx_ring->next_to_use;
tx_ring          5476 drivers/net/ethernet/intel/e1000e/netdev.c 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
tx_ring          5477 drivers/net/ethernet/intel/e1000e/netdev.c 	buffer_info = &tx_ring->buffer_info[i];
tx_ring          5493 drivers/net/ethernet/intel/e1000e/netdev.c 	if (i == tx_ring->count)
tx_ring          5495 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_use = i;
tx_ring          5500 drivers/net/ethernet/intel/e1000e/netdev.c static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
tx_ring          5503 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          5532 drivers/net/ethernet/intel/e1000e/netdev.c 	i = tx_ring->next_to_use;
tx_ring          5533 drivers/net/ethernet/intel/e1000e/netdev.c 	buffer_info = &tx_ring->buffer_info[i];
tx_ring          5534 drivers/net/ethernet/intel/e1000e/netdev.c 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
tx_ring          5547 drivers/net/ethernet/intel/e1000e/netdev.c 	if (i == tx_ring->count)
tx_ring          5549 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_use = i;
tx_ring          5554 drivers/net/ethernet/intel/e1000e/netdev.c static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
tx_ring          5558 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          5565 drivers/net/ethernet/intel/e1000e/netdev.c 	i = tx_ring->next_to_use;
tx_ring          5568 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          5587 drivers/net/ethernet/intel/e1000e/netdev.c 			if (i == tx_ring->count)
tx_ring          5600 drivers/net/ethernet/intel/e1000e/netdev.c 			if (i == tx_ring->count)
tx_ring          5603 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info = &tx_ring->buffer_info[i];
tx_ring          5626 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->buffer_info[i].skb = skb;
tx_ring          5627 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->buffer_info[i].segs = segs;
tx_ring          5628 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->buffer_info[i].bytecount = bytecount;
tx_ring          5629 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->buffer_info[first].next_to_watch = i;
tx_ring          5641 drivers/net/ethernet/intel/e1000e/netdev.c 			i += tx_ring->count;
tx_ring          5643 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          5644 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_put_txbuf(tx_ring, buffer_info, true);
tx_ring          5650 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
tx_ring          5652 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          5685 drivers/net/ethernet/intel/e1000e/netdev.c 	i = tx_ring->next_to_use;
tx_ring          5688 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          5689 drivers/net/ethernet/intel/e1000e/netdev.c 		tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_ring          5696 drivers/net/ethernet/intel/e1000e/netdev.c 		if (i == tx_ring->count)
tx_ring          5713 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->next_to_use = i;
tx_ring          5754 drivers/net/ethernet/intel/e1000e/netdev.c static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
tx_ring          5756 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = tx_ring->adapter;
tx_ring          5768 drivers/net/ethernet/intel/e1000e/netdev.c 	if (e1000_desc_unused(tx_ring) < size)
tx_ring          5777 drivers/net/ethernet/intel/e1000e/netdev.c static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
tx_ring          5779 drivers/net/ethernet/intel/e1000e/netdev.c 	BUG_ON(size > tx_ring->count);
tx_ring          5781 drivers/net/ethernet/intel/e1000e/netdev.c 	if (e1000_desc_unused(tx_ring) >= size)
tx_ring          5783 drivers/net/ethernet/intel/e1000e/netdev.c 	return __e1000_maybe_stop_tx(tx_ring, size);
tx_ring          5790 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_ring *tx_ring = adapter->tx_ring;
tx_ring          5860 drivers/net/ethernet/intel/e1000e/netdev.c 	if (e1000_maybe_stop_tx(tx_ring, count + 2))
tx_ring          5869 drivers/net/ethernet/intel/e1000e/netdev.c 	first = tx_ring->next_to_use;
tx_ring          5871 drivers/net/ethernet/intel/e1000e/netdev.c 	tso = e1000_tso(tx_ring, skb, protocol);
tx_ring          5879 drivers/net/ethernet/intel/e1000e/netdev.c 	else if (e1000_tx_csum(tx_ring, skb, protocol))
tx_ring          5893 drivers/net/ethernet/intel/e1000e/netdev.c 	count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
tx_ring          5912 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_tx_queue(tx_ring, tx_flags, count);
tx_ring          5914 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_maybe_stop_tx(tx_ring,
tx_ring          5922 drivers/net/ethernet/intel/e1000e/netdev.c 				e1000e_update_tdt_wa(tx_ring,
tx_ring          5923 drivers/net/ethernet/intel/e1000e/netdev.c 						     tx_ring->next_to_use);
tx_ring          5925 drivers/net/ethernet/intel/e1000e/netdev.c 				writel(tx_ring->next_to_use, tx_ring->tail);
tx_ring          5929 drivers/net/ethernet/intel/e1000e/netdev.c 		tx_ring->buffer_info[first].time_stamp = 0;
tx_ring          5930 drivers/net/ethernet/intel/e1000e/netdev.c 		tx_ring->next_to_use = first;
tx_ring          7379 drivers/net/ethernet/intel/e1000e/netdev.c 	kfree(adapter->tx_ring);
tx_ring          7445 drivers/net/ethernet/intel/e1000e/netdev.c 	kfree(adapter->tx_ring);
tx_ring           311 drivers/net/ethernet/intel/fm10k/fm10k.h 	struct fm10k_ring *tx_ring[MAX_QUEUES] ____cacheline_aligned_in_smp;
tx_ring           484 drivers/net/ethernet/intel/fm10k/fm10k.h 				  struct fm10k_ring *tx_ring);
tx_ring           487 drivers/net/ethernet/intel/fm10k/fm10k.h bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
tx_ring           285 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		ring = interface->tx_ring[i];
tx_ring           550 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			interface->tx_ring[i]->count = new_tx_count;
tx_ring           576 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			memcpy(&temp_ring[i], interface->tx_ring[i],
tx_ring           591 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			fm10k_free_tx_resources(interface->tx_ring[i]);
tx_ring           593 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			memcpy(interface->tx_ring[i], &temp_ring[i],
tx_ring           753 drivers/net/ethernet/intel/fm10k/fm10k_main.c static int fm10k_tso(struct fm10k_ring *tx_ring,
tx_ring           786 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
tx_ring           793 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
tx_ring           795 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		netdev_err(tx_ring->netdev,
tx_ring           800 drivers/net/ethernet/intel/fm10k/fm10k_main.c static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
tx_ring           822 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				dev_warn(tx_ring->dev,
tx_ring           824 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				tx_ring->tx_stats.csum_err++;
tx_ring           865 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			dev_warn(tx_ring->dev,
tx_ring           870 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tx_ring->tx_stats.csum_err++;
tx_ring           876 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_ring->tx_stats.csum_good++;
tx_ring           880 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
tx_ring           902 drivers/net/ethernet/intel/fm10k/fm10k_main.c static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
tx_ring           916 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return i == tx_ring->count;
tx_ring           919 drivers/net/ethernet/intel/fm10k/fm10k_main.c static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
tx_ring           921 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring           927 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (likely(fm10k_desc_unused(tx_ring) < size))
tx_ring           931 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring           932 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	++tx_ring->tx_stats.restart_queue;
tx_ring           936 drivers/net/ethernet/intel/fm10k/fm10k_main.c static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
tx_ring           938 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (likely(fm10k_desc_unused(tx_ring) >= size))
tx_ring           940 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return __fm10k_maybe_stop_tx(tx_ring, size);
tx_ring           943 drivers/net/ethernet/intel/fm10k/fm10k_main.c static void fm10k_tx_map(struct fm10k_ring *tx_ring,
tx_ring           954 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	u16 i = tx_ring->next_to_use;
tx_ring           957 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_desc = FM10K_TX_DESC(tx_ring, i);
tx_ring           968 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
tx_ring           974 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring           982 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
tx_ring           984 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				tx_desc = FM10K_TX_DESC(tx_ring, 0);
tx_ring           995 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
tx_ring           997 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			tx_desc = FM10K_TX_DESC(tx_ring, 0);
tx_ring          1004 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
tx_ring          1007 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tx_buffer = &tx_ring->tx_buffer[i];
tx_ring          1013 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
tx_ring          1017 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring          1034 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_ring->next_to_use = i;
tx_ring          1037 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
tx_ring          1040 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
tx_ring          1041 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		writel(i, tx_ring->tail);
tx_ring          1046 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_ring          1050 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tx_buffer = &tx_ring->tx_buffer[i];
tx_ring          1051 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
tx_ring          1055 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			i = tx_ring->count;
tx_ring          1059 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_ring->next_to_use = i;
tx_ring          1063 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				  struct fm10k_ring *tx_ring)
tx_ring          1082 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring          1083 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tx_ring->tx_stats.tx_busy++;
tx_ring          1088 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	first = &tx_ring->tx_buffer[tx_ring->next_to_use];
tx_ring          1096 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tso = fm10k_tso(tx_ring, first);
tx_ring          1100 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		fm10k_tx_csum(tx_ring, first);
tx_ring          1102 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_tx_map(tx_ring, first);
tx_ring          1140 drivers/net/ethernet/intel/fm10k/fm10k_main.c bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
tx_ring          1142 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	u32 tx_done = fm10k_get_tx_completed(tx_ring);
tx_ring          1143 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
tx_ring          1144 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
tx_ring          1146 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	clear_check_for_tx_hang(tx_ring);
tx_ring          1158 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tx_ring->tx_stats.tx_done_old = tx_done;
tx_ring          1160 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
tx_ring          1166 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
tx_ring          1190 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			       struct fm10k_ring *tx_ring, int napi_budget)
tx_ring          1197 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	unsigned int i = tx_ring->next_to_clean;
tx_ring          1202 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_buffer = &tx_ring->tx_buffer[i];
tx_ring          1203 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_desc = FM10K_TX_DESC(tx_ring, i);
tx_ring          1204 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	i -= tx_ring->count;
tx_ring          1231 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          1246 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				i -= tx_ring->count;
tx_ring          1247 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				tx_buffer = tx_ring->tx_buffer;
tx_ring          1248 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				tx_desc = FM10K_TX_DESC(tx_ring, 0);
tx_ring          1253 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring          1266 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			i -= tx_ring->count;
tx_ring          1267 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			tx_buffer = tx_ring->tx_buffer;
tx_ring          1268 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			tx_desc = FM10K_TX_DESC(tx_ring, 0);
tx_ring          1278 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	i += tx_ring->count;
tx_ring          1279 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_ring->next_to_clean = i;
tx_ring          1280 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring          1281 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_ring->stats.bytes += total_bytes;
tx_ring          1282 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_ring->stats.packets += total_packets;
tx_ring          1283 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring          1287 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
tx_ring          1291 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		netif_err(interface, drv, tx_ring->netdev,
tx_ring          1297 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			  tx_ring->queue_index,
tx_ring          1298 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			  fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
tx_ring          1299 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			  fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
tx_ring          1300 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			  tx_ring->next_to_use, i);
tx_ring          1302 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		netif_stop_subqueue(tx_ring->netdev,
tx_ring          1303 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				    tx_ring->queue_index);
tx_ring          1305 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		netif_info(interface, probe, tx_ring->netdev,
tx_ring          1308 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			   tx_ring->queue_index);
tx_ring          1317 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	netdev_tx_completed_queue(txring_txq(tx_ring),
tx_ring          1321 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
tx_ring          1322 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		     (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
tx_ring          1327 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring          1328 drivers/net/ethernet/intel/fm10k/fm10k_main.c 					     tx_ring->queue_index) &&
tx_ring          1330 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			netif_wake_subqueue(tx_ring->netdev,
tx_ring          1331 drivers/net/ethernet/intel/fm10k/fm10k_main.c 					    tx_ring->queue_index);
tx_ring          1332 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			++tx_ring->tx_stats.restart_queue;
tx_ring          1647 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		interface->tx_ring[txr_idx] = ring;
tx_ring          1709 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		interface->tx_ring[ring->queue_index] = NULL;
tx_ring          1888 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			interface->tx_ring[offset + i]->reg_idx = q_idx;
tx_ring          1889 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			interface->tx_ring[offset + i]->qos_pc = pc;
tx_ring          1913 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		interface->tx_ring[i]->reg_idx = i;
tx_ring            15 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring)
tx_ring            17 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	struct device *dev = tx_ring->dev;
tx_ring            20 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
tx_ring            22 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	tx_ring->tx_buffer = vzalloc(size);
tx_ring            23 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	if (!tx_ring->tx_buffer)
tx_ring            26 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	u64_stats_init(&tx_ring->syncp);
tx_ring            29 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc);
tx_ring            30 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring            32 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
tx_ring            33 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring            34 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	if (!tx_ring->desc)
tx_ring            40 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	vfree(tx_ring->tx_buffer);
tx_ring            41 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	tx_ring->tx_buffer = NULL;
tx_ring            60 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		err = fm10k_setup_tx_resources(interface->tx_ring[i]);
tx_ring            73 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		fm10k_free_tx_resources(interface->tx_ring[i]);
tx_ring           170 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
tx_ring           176 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	if (!tx_ring->tx_buffer)
tx_ring           180 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	for (i = 0; i < tx_ring->count; i++) {
tx_ring           181 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
tx_ring           183 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
tx_ring           187 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring           189 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
tx_ring           190 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	memset(tx_ring->tx_buffer, 0, size);
tx_ring           193 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring           202 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c void fm10k_free_tx_resources(struct fm10k_ring *tx_ring)
tx_ring           204 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	fm10k_clean_tx_ring(tx_ring);
tx_ring           206 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	vfree(tx_ring->tx_buffer);
tx_ring           207 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	tx_ring->tx_buffer = NULL;
tx_ring           210 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	if (!tx_ring->desc)
tx_ring           213 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring           214 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			  tx_ring->desc, tx_ring->dma);
tx_ring           215 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	tx_ring->desc = NULL;
tx_ring           227 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		fm10k_clean_tx_ring(interface->tx_ring[i]);
tx_ring           241 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		fm10k_free_tx_resources(interface->tx_ring[i]);
tx_ring           691 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]);
tx_ring           708 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		struct fm10k_ring *tx_ring = interface->tx_ring[i];
tx_ring           710 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
tx_ring          1355 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		ring = READ_ONCE(interface->tx_ring[i]);
tx_ring           560 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
tx_ring           562 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		if (!tx_ring)
tx_ring           565 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		restart_queue += tx_ring->tx_stats.restart_queue;
tx_ring           566 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		tx_busy += tx_ring->tx_stats.tx_busy;
tx_ring           567 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		tx_csum_errors += tx_ring->tx_stats.csum_err;
tx_ring           568 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		bytes += tx_ring->stats.bytes;
tx_ring           569 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		pkts += tx_ring->stats.packets;
tx_ring           570 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		hw_csum_tx_good += tx_ring->tx_stats.csum_good;
tx_ring           650 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		struct fm10k_ring *tx_ring = interface->tx_ring[i];
tx_ring           652 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		if (tx_ring->next_to_use != tx_ring->next_to_clean) {
tx_ring           716 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 			set_check_for_tx_hang(interface->tx_ring[i]);
tx_ring           963 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
tx_ring           967 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
tx_ring          1932 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 			if (fm10k_get_tx_pending(interface->tx_ring[i], false))
tx_ring           285 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 		struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
tx_ring           287 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 		if (!tx_ring)
tx_ring           292 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 i, *tx_ring->state,
tx_ring           293 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->queue_index,
tx_ring           294 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->reg_idx);
tx_ring           298 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->next_to_use,
tx_ring           299 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->next_to_clean,
tx_ring           300 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->ring_active);
tx_ring           303 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 i, tx_ring->stats.packets,
tx_ring           304 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->stats.bytes,
tx_ring           305 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->tx_stats.restart_queue);
tx_ring           309 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->tx_stats.tx_busy,
tx_ring           310 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 tx_ring->tx_stats.tx_done_old);
tx_ring           313 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 i, tx_ring->size);
tx_ring           316 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 i, tx_ring->dcb_tc);
tx_ring           319 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 i, tx_ring->itr_setting,
tx_ring           320 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
tx_ring          2734 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	struct i40e_ring *rx_ring, *tx_ring;
tx_ring          2749 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	tx_ring = vsi->tx_rings[queue];
tx_ring          2754 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
tx_ring          2758 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
tx_ring          2814 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	struct i40e_ring *tx_ring = vsi->tx_rings[queue];
tx_ring          2823 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
tx_ring          2831 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
tx_ring          2833 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
tx_ring          2838 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	q_vector = tx_ring->q_vector;
tx_ring          2839 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
tx_ring           309 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_ring *tx_ring = NULL;
tx_ring           338 drivers/net/ethernet/intel/i40e/i40e_main.c 					tx_ring = vsi->tx_rings[i];
tx_ring           355 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (tx_ring) {
tx_ring           356 drivers/net/ethernet/intel/i40e/i40e_main.c 		head = i40e_get_head(tx_ring);
tx_ring           360 drivers/net/ethernet/intel/i40e/i40e_main.c 			     I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
tx_ring           361 drivers/net/ethernet/intel/i40e/i40e_main.c 						tx_ring->vsi->base_vector - 1));
tx_ring           366 drivers/net/ethernet/intel/i40e/i40e_main.c 			    vsi->seid, hung_queue, tx_ring->next_to_clean,
tx_ring           367 drivers/net/ethernet/intel/i40e/i40e_main.c 			    head, tx_ring->next_to_use,
tx_ring           368 drivers/net/ethernet/intel/i40e/i40e_main.c 			    readl(tx_ring->tail), val);
tx_ring          3425 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_ring *tx_ring, *rx_ring;
tx_ring          3433 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_ring = vsi->tx_rings[i];
tx_ring          3435 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_ring->dcb_tc = 0;
tx_ring          3448 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_ring = vsi->tx_rings[i];
tx_ring          3450 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_ring->dcb_tc = n;
tx_ring          4058 drivers/net/ethernet/intel/i40e/i40e_main.c static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring          4060 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_vsi *vsi = tx_ring->vsi;
tx_ring          4061 drivers/net/ethernet/intel/i40e/i40e_main.c 	u16 i = tx_ring->next_to_clean;
tx_ring          4065 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_buf = &tx_ring->tx_bi[i];
tx_ring          4066 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_desc = I40E_TX_DESC(tx_ring, i);
tx_ring          4067 drivers/net/ethernet/intel/i40e/i40e_main.c 	i -= tx_ring->count;
tx_ring          4094 drivers/net/ethernet/intel/i40e/i40e_main.c 			i -= tx_ring->count;
tx_ring          4095 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_buf = tx_ring->tx_bi;
tx_ring          4096 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_desc = I40E_TX_DESC(tx_ring, 0);
tx_ring          4099 drivers/net/ethernet/intel/i40e/i40e_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          4118 drivers/net/ethernet/intel/i40e/i40e_main.c 			i -= tx_ring->count;
tx_ring          4119 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_buf = tx_ring->tx_bi;
tx_ring          4120 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_desc = I40E_TX_DESC(tx_ring, 0);
tx_ring          4127 drivers/net/ethernet/intel/i40e/i40e_main.c 	i += tx_ring->count;
tx_ring          4128 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_ring->next_to_clean = i;
tx_ring          4131 drivers/net/ethernet/intel/i40e/i40e_main.c 		i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
tx_ring          4164 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
tx_ring          4167 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_ring->q_vector = q_vector;
tx_ring          4168 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_ring->next = q_vector->tx.ring;
tx_ring          4169 drivers/net/ethernet/intel/i40e/i40e_main.c 	q_vector->tx.ring = tx_ring;
tx_ring          5583 drivers/net/ethernet/intel/i40e/i40e_main.c 			struct i40e_ring *tx_ring, *rx_ring;
tx_ring          5587 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_ring = vsi->tx_rings[pf_q];
tx_ring          5588 drivers/net/ethernet/intel/i40e/i40e_main.c 			tx_ring->ch = NULL;
tx_ring          5958 drivers/net/ethernet/intel/i40e/i40e_main.c 		struct i40e_ring *tx_ring, *rx_ring;
tx_ring          5966 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_ring = vsi->tx_rings[pf_q];
tx_ring          5967 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_ring->ch = ch;
tx_ring          6953 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_ring *tx_ring, *rx_ring;
tx_ring          6959 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_ring = vsi->tx_rings[pf_q];
tx_ring          6960 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_ring->ch = NULL;
tx_ring          7041 drivers/net/ethernet/intel/i40e/i40e_main.c 				struct i40e_ring *tx_ring, *rx_ring;
tx_ring          7047 drivers/net/ethernet/intel/i40e/i40e_main.c 				tx_ring = vsi->tx_rings[pf_q];
tx_ring          7048 drivers/net/ethernet/intel/i40e/i40e_main.c 				tx_ring->ch = ch;
tx_ring            21 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_fdir(struct i40e_ring *tx_ring,
tx_ring            25 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct i40e_pf *pf = tx_ring->vsi->back;
tx_ring            30 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i = tx_ring->next_to_use;
tx_ring            31 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
tx_ring            34 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring            94 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct i40e_ring *tx_ring;
tx_ring           106 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring = vsi->tx_rings[0];
tx_ring           107 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	dev = tx_ring->dev;
tx_ring           110 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
tx_ring           122 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i = tx_ring->next_to_use;
tx_ring           123 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	first = &tx_ring->tx_bi[i];
tx_ring           124 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_fdir(tx_ring, fdir_data, add);
tx_ring           127 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i = tx_ring->next_to_use;
tx_ring           128 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_desc = I40E_TX_DESC(tx_ring, i);
tx_ring           129 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_buf = &tx_ring->tx_bi[i];
tx_ring           131 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
tx_ring           156 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	writel(tx_ring->next_to_use, tx_ring->tail);
tx_ring           633 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
tx_ring           638 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
tx_ring           639 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		i40e_xsk_clean_tx_ring(tx_ring);
tx_ring           642 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (!tx_ring->tx_bi)
tx_ring           646 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		for (i = 0; i < tx_ring->count; i++)
tx_ring           647 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			i40e_unmap_and_free_tx_resource(tx_ring,
tx_ring           648 drivers/net/ethernet/intel/i40e/i40e_txrx.c 							&tx_ring->tx_bi[i]);
tx_ring           651 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
tx_ring           652 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	memset(tx_ring->tx_bi, 0, bi_size);
tx_ring           655 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring           657 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_use = 0;
tx_ring           658 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_clean = 0;
tx_ring           660 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (!tx_ring->netdev)
tx_ring           664 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring           673 drivers/net/ethernet/intel/i40e/i40e_txrx.c void i40e_free_tx_resources(struct i40e_ring *tx_ring)
tx_ring           675 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_clean_tx_ring(tx_ring);
tx_ring           676 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	kfree(tx_ring->tx_bi);
tx_ring           677 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->tx_bi = NULL;
tx_ring           679 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (tx_ring->desc) {
tx_ring           680 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring           681 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				  tx_ring->desc, tx_ring->dma);
tx_ring           682 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_ring->desc = NULL;
tx_ring           722 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct i40e_ring *tx_ring = NULL;
tx_ring           741 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_ring = vsi->tx_rings[i];
tx_ring           742 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (tx_ring && tx_ring->desc) {
tx_ring           750 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			packets = tx_ring->stats.packets & INT_MAX;
tx_ring           751 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
tx_ring           752 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				i40e_force_wb(vsi, tx_ring->q_vector);
tx_ring           760 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			tx_ring->tx_stats.prev_pkt_ctr =
tx_ring           761 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			    i40e_get_tx_pending(tx_ring, true) ? packets : -1;
tx_ring           775 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			      struct i40e_ring *tx_ring, int napi_budget)
tx_ring           777 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	int i = tx_ring->next_to_clean;
tx_ring           784 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_buf = &tx_ring->tx_bi[i];
tx_ring           785 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_desc = I40E_TX_DESC(tx_ring, i);
tx_ring           786 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i -= tx_ring->count;
tx_ring           788 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
tx_ring           800 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
tx_ring           813 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (ring_is_xdp(tx_ring))
tx_ring           819 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		dma_unmap_single(tx_ring->dev,
tx_ring           831 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				   tx_ring, tx_desc, tx_buf);
tx_ring           837 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				i -= tx_ring->count;
tx_ring           838 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				tx_buf = tx_ring->tx_bi;
tx_ring           839 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				tx_desc = I40E_TX_DESC(tx_ring, 0);
tx_ring           844 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				dma_unmap_page(tx_ring->dev,
tx_ring           857 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			i -= tx_ring->count;
tx_ring           858 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			tx_buf = tx_ring->tx_bi;
tx_ring           859 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			tx_desc = I40E_TX_DESC(tx_ring, 0);
tx_ring           868 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i += tx_ring->count;
tx_ring           869 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_clean = i;
tx_ring           870 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
tx_ring           871 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_arm_wb(tx_ring, vsi, budget);
tx_ring           873 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (ring_is_xdp(tx_ring))
tx_ring           877 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	netdev_tx_completed_queue(txring_txq(tx_ring),
tx_ring           881 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
tx_ring           882 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
tx_ring           887 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring           888 drivers/net/ethernet/intel/i40e/i40e_txrx.c 					     tx_ring->queue_index) &&
tx_ring           890 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			netif_wake_subqueue(tx_ring->netdev,
tx_ring           891 drivers/net/ethernet/intel/i40e/i40e_txrx.c 					    tx_ring->queue_index);
tx_ring           892 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			++tx_ring->tx_stats.restart_queue;
tx_ring          1296 drivers/net/ethernet/intel/i40e/i40e_txrx.c int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring          1298 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct device *dev = tx_ring->dev;
tx_ring          1305 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	WARN_ON(tx_ring->tx_bi);
tx_ring          1306 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
tx_ring          1307 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
tx_ring          1308 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (!tx_ring->tx_bi)
tx_ring          1311 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	u64_stats_init(&tx_ring->syncp);
tx_ring          1314 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
tx_ring          1318 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->size += sizeof(u32);
tx_ring          1319 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring          1320 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
tx_ring          1321 drivers/net/ethernet/intel/i40e/i40e_txrx.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring          1322 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (!tx_ring->desc) {
tx_ring          1324 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			 tx_ring->size);
tx_ring          1328 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_use = 0;
tx_ring          1329 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_clean = 0;
tx_ring          1330 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->tx_stats.prev_pkt_ctr = -1;
tx_ring          1334 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	kfree(tx_ring->tx_bi);
tx_ring          1335 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->tx_bi = NULL;
tx_ring          2667 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring          2671 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct i40e_pf *pf = tx_ring->vsi->back;
tx_ring          2691 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (!tx_ring->atr_sample_rate)
tx_ring          2737 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->atr_count++;
tx_ring          2743 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
tx_ring          2746 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->atr_count = 0;
tx_ring          2749 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i = tx_ring->next_to_use;
tx_ring          2750 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
tx_ring          2753 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring          2755 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
tx_ring          2763 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
tx_ring          2813 drivers/net/ethernet/intel/i40e/i40e_txrx.c 					     struct i40e_ring *tx_ring,
tx_ring          2820 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
tx_ring          2849 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
tx_ring          2995 drivers/net/ethernet/intel/i40e/i40e_txrx.c static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring          3010 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	pf = i40e_netdev_to_pf(tx_ring->netdev);
tx_ring          3041 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			       struct i40e_ring *tx_ring,
tx_ring          3203 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
tx_ring          3208 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	int i = tx_ring->next_to_use;
tx_ring          3215 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
tx_ring          3218 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring          3234 drivers/net/ethernet/intel/i40e/i40e_txrx.c int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
tx_ring          3236 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring          3241 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
tx_ring          3245 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring          3246 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	++tx_ring->tx_stats.restart_queue;
tx_ring          3346 drivers/net/ethernet/intel/i40e/i40e_txrx.c static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring          3355 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	u16 i = tx_ring->next_to_use;
tx_ring          3368 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_ring          3370 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_desc = I40E_TX_DESC(tx_ring, i);
tx_ring          3376 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring          3396 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			if (i == tx_ring->count) {
tx_ring          3397 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				tx_desc = I40E_TX_DESC(tx_ring, 0);
tx_ring          3418 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (i == tx_ring->count) {
tx_ring          3419 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			tx_desc = I40E_TX_DESC(tx_ring, 0);
tx_ring          3426 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
tx_ring          3429 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_bi = &tx_ring->tx_bi[i];
tx_ring          3432 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring          3435 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (i == tx_ring->count)
tx_ring          3438 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_use = i;
tx_ring          3440 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
tx_ring          3448 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	desc_count |= ++tx_ring->packet_stride;
tx_ring          3453 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_ring->packet_stride = 0;
tx_ring          3473 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
tx_ring          3474 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		writel(i, tx_ring->tail);
tx_ring          3480 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	dev_info(tx_ring->dev, "TX DMA map failed\n");
tx_ring          3484 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_bi = &tx_ring->tx_bi[i];
tx_ring          3485 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
tx_ring          3489 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			i = tx_ring->count;
tx_ring          3493 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_ring->next_to_use = i;
tx_ring          3559 drivers/net/ethernet/intel/i40e/i40e_txrx.c 					struct i40e_ring *tx_ring)
tx_ring          3575 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_trace(xmit_frame_ring, skb, tx_ring);
tx_ring          3584 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_ring->tx_stats.tx_linearize++;
tx_ring          3593 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring          3594 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_ring->tx_stats.tx_busy++;
tx_ring          3599 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
tx_ring          3605 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
tx_ring          3626 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				  tx_ring, &cd_tunneling);
tx_ring          3630 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
tx_ring          3638 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
tx_ring          3645 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_atr(tx_ring, skb, tx_flags);
tx_ring          3647 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
tx_ring          3654 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
tx_ring          3659 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
tx_ring          3680 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
tx_ring          3688 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	return i40e_xmit_frame_ring(skb, tx_ring);
tx_ring           484 drivers/net/ethernet/intel/i40e/i40e_txrx.h void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
tx_ring           486 drivers/net/ethernet/intel/i40e/i40e_txrx.h int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
tx_ring           488 drivers/net/ethernet/intel/i40e/i40e_txrx.h void i40e_free_tx_resources(struct i40e_ring *tx_ring);
tx_ring           494 drivers/net/ethernet/intel/i40e/i40e_txrx.h int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
tx_ring           506 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
tx_ring           508 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
tx_ring           547 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
tx_ring           549 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
tx_ring           551 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	return __i40e_maybe_stop_tx(tx_ring, size);
tx_ring            47 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
tx_ring            51 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring            52 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 	tx_ring->stats.bytes += total_bytes;
tx_ring            53 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 	tx_ring->stats.packets += total_packets;
tx_ring            54 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 	u64_stats_update_end(&tx_ring->syncp);
tx_ring            55 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 	tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring            56 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 	tx_ring->q_vector->tx.total_packets += total_packets;
tx_ring            67 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
tx_ring            71 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
tx_ring            77 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 		unsigned int j = i40e_get_tx_pending(tx_ring, false);
tx_ring            82 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring            83 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 			tx_ring->arm_wb = true;
tx_ring            88 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
tx_ring           702 drivers/net/ethernet/intel/i40e/i40e_xsk.c static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
tx_ring           706 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	dma_unmap_single(tx_ring->dev,
tx_ring           720 drivers/net/ethernet/intel/i40e/i40e_xsk.c 			   struct i40e_ring *tx_ring, int napi_budget)
tx_ring           724 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct xdp_umem *umem = tx_ring->xsk_umem;
tx_ring           725 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	u32 head_idx = i40e_get_head(tx_ring);
tx_ring           729 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	if (head_idx < tx_ring->next_to_clean)
tx_ring           730 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		head_idx += tx_ring->count;
tx_ring           731 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	frames_ready = head_idx - tx_ring->next_to_clean;
tx_ring           742 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	ntc = tx_ring->next_to_clean;
tx_ring           745 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		tx_bi = &tx_ring->tx_bi[ntc];
tx_ring           748 drivers/net/ethernet/intel/i40e/i40e_xsk.c 			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
tx_ring           755 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		if (++ntc >= tx_ring->count)
tx_ring           759 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	tx_ring->next_to_clean += completed_frames;
tx_ring           760 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
tx_ring           761 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		tx_ring->next_to_clean -= tx_ring->count;
tx_ring           766 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	i40e_arm_wb(tx_ring, vsi, budget);
tx_ring           767 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
tx_ring           770 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
tx_ring           771 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
tx_ring           773 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	xmit_done = i40e_xmit_zc(tx_ring, budget);
tx_ring           841 drivers/net/ethernet/intel/i40e/i40e_xsk.c void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
tx_ring           843 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
tx_ring           844 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct xdp_umem *umem = tx_ring->xsk_umem;
tx_ring           849 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		tx_bi = &tx_ring->tx_bi[ntc];
tx_ring           852 drivers/net/ethernet/intel/i40e/i40e_xsk.c 			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
tx_ring           859 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		if (ntc >= tx_ring->count)
tx_ring            20 drivers/net/ethernet/intel/i40e/i40e_xsk.h 			   struct i40e_ring *tx_ring, int napi_budget);
tx_ring           646 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	struct iavf_ring *rx_ring, *tx_ring;
tx_ring           660 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	tx_ring = &adapter->tx_rings[queue];
tx_ring           665 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
tx_ring           669 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
tx_ring           716 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
tx_ring           720 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
tx_ring           726 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
tx_ring           728 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
tx_ring           733 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	q_vector = tx_ring->q_vector;
tx_ring           734 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
tx_ring           331 drivers/net/ethernet/intel/iavf/iavf_main.c 	struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
tx_ring           334 drivers/net/ethernet/intel/iavf/iavf_main.c 	tx_ring->q_vector = q_vector;
tx_ring           335 drivers/net/ethernet/intel/iavf/iavf_main.c 	tx_ring->next = q_vector->tx.ring;
tx_ring           336 drivers/net/ethernet/intel/iavf/iavf_main.c 	tx_ring->vsi = &adapter->vsi;
tx_ring           337 drivers/net/ethernet/intel/iavf/iavf_main.c 	q_vector->tx.ring = tx_ring;
tx_ring           340 drivers/net/ethernet/intel/iavf/iavf_main.c 	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
tx_ring          1124 drivers/net/ethernet/intel/iavf/iavf_main.c 		struct iavf_ring *tx_ring;
tx_ring          1127 drivers/net/ethernet/intel/iavf/iavf_main.c 		tx_ring = &adapter->tx_rings[i];
tx_ring          1129 drivers/net/ethernet/intel/iavf/iavf_main.c 		tx_ring->queue_index = i;
tx_ring          1130 drivers/net/ethernet/intel/iavf/iavf_main.c 		tx_ring->netdev = adapter->netdev;
tx_ring          1131 drivers/net/ethernet/intel/iavf/iavf_main.c 		tx_ring->dev = &adapter->pdev->dev;
tx_ring          1132 drivers/net/ethernet/intel/iavf/iavf_main.c 		tx_ring->count = adapter->tx_desc_count;
tx_ring          1133 drivers/net/ethernet/intel/iavf/iavf_main.c 		tx_ring->itr_setting = IAVF_ITR_TX_DEF;
tx_ring          1135 drivers/net/ethernet/intel/iavf/iavf_main.c 			tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
tx_ring            57 drivers/net/ethernet/intel/iavf/iavf_txrx.c void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
tx_ring            63 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (!tx_ring->tx_bi)
tx_ring            67 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	for (i = 0; i < tx_ring->count; i++)
tx_ring            68 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
tx_ring            70 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
tx_ring            71 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	memset(tx_ring->tx_bi, 0, bi_size);
tx_ring            74 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring            76 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->next_to_use = 0;
tx_ring            77 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->next_to_clean = 0;
tx_ring            79 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (!tx_ring->netdev)
tx_ring            83 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring            92 drivers/net/ethernet/intel/iavf/iavf_txrx.c void iavf_free_tx_resources(struct iavf_ring *tx_ring)
tx_ring            94 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_clean_tx_ring(tx_ring);
tx_ring            95 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	kfree(tx_ring->tx_bi);
tx_ring            96 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->tx_bi = NULL;
tx_ring            98 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (tx_ring->desc) {
tx_ring            99 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring           100 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				  tx_ring->desc, tx_ring->dma);
tx_ring           101 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		tx_ring->desc = NULL;
tx_ring           136 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct iavf_ring *tx_ring = NULL;
tx_ring           155 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		tx_ring = &vsi->back->tx_rings[i];
tx_ring           156 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (tx_ring && tx_ring->desc) {
tx_ring           164 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			packets = tx_ring->stats.packets & INT_MAX;
tx_ring           165 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
tx_ring           166 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				iavf_force_wb(vsi, tx_ring->q_vector);
tx_ring           174 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			tx_ring->tx_stats.prev_pkt_ctr =
tx_ring           175 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			  iavf_get_tx_pending(tx_ring, true) ? packets : -1;
tx_ring           191 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			      struct iavf_ring *tx_ring, int napi_budget)
tx_ring           193 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	int i = tx_ring->next_to_clean;
tx_ring           199 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_buf = &tx_ring->tx_bi[i];
tx_ring           200 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_desc = IAVF_TX_DESC(tx_ring, i);
tx_ring           201 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	i -= tx_ring->count;
tx_ring           213 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
tx_ring           230 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		dma_unmap_single(tx_ring->dev,
tx_ring           242 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				   tx_ring, tx_desc, tx_buf);
tx_ring           248 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				i -= tx_ring->count;
tx_ring           249 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				tx_buf = tx_ring->tx_bi;
tx_ring           250 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				tx_desc = IAVF_TX_DESC(tx_ring, 0);
tx_ring           255 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				dma_unmap_page(tx_ring->dev,
tx_ring           268 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			i -= tx_ring->count;
tx_ring           269 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			tx_buf = tx_ring->tx_bi;
tx_ring           270 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			tx_desc = IAVF_TX_DESC(tx_ring, 0);
tx_ring           279 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	i += tx_ring->count;
tx_ring           280 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->next_to_clean = i;
tx_ring           281 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring           282 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->stats.bytes += total_bytes;
tx_ring           283 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->stats.packets += total_packets;
tx_ring           284 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring           285 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring           286 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->q_vector->tx.total_packets += total_packets;
tx_ring           288 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
tx_ring           294 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		unsigned int j = iavf_get_tx_pending(tx_ring, false);
tx_ring           299 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		    (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring           300 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			tx_ring->arm_wb = true;
tx_ring           304 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	netdev_tx_completed_queue(txring_txq(tx_ring),
tx_ring           308 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
tx_ring           309 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		     (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
tx_ring           314 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring           315 drivers/net/ethernet/intel/iavf/iavf_txrx.c 					     tx_ring->queue_index) &&
tx_ring           317 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			netif_wake_subqueue(tx_ring->netdev,
tx_ring           318 drivers/net/ethernet/intel/iavf/iavf_txrx.c 					    tx_ring->queue_index);
tx_ring           319 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			++tx_ring->tx_stats.restart_queue;
tx_ring           614 drivers/net/ethernet/intel/iavf/iavf_txrx.c int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
tx_ring           616 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct device *dev = tx_ring->dev;
tx_ring           623 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	WARN_ON(tx_ring->tx_bi);
tx_ring           624 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
tx_ring           625 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
tx_ring           626 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (!tx_ring->tx_bi)
tx_ring           630 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
tx_ring           631 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring           632 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
tx_ring           633 drivers/net/ethernet/intel/iavf/iavf_txrx.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring           634 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (!tx_ring->desc) {
tx_ring           636 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			 tx_ring->size);
tx_ring           640 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->next_to_use = 0;
tx_ring           641 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->next_to_clean = 0;
tx_ring           642 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->tx_stats.prev_pkt_ctr = -1;
tx_ring           646 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	kfree(tx_ring->tx_bi);
tx_ring           647 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->tx_bi = NULL;
tx_ring          1803 drivers/net/ethernet/intel/iavf/iavf_txrx.c 					     struct iavf_ring *tx_ring,
tx_ring          1810 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
tx_ring          1963 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			       struct iavf_ring *tx_ring,
tx_ring          2125 drivers/net/ethernet/intel/iavf/iavf_txrx.c static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
tx_ring          2130 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	int i = tx_ring->next_to_use;
tx_ring          2137 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
tx_ring          2140 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring          2240 drivers/net/ethernet/intel/iavf/iavf_txrx.c int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
tx_ring          2242 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring          2247 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
tx_ring          2251 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring          2252 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	++tx_ring->tx_stats.restart_queue;
tx_ring          2266 drivers/net/ethernet/intel/iavf/iavf_txrx.c static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
tx_ring          2275 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	u16 i = tx_ring->next_to_use;
tx_ring          2287 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_ring          2289 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_desc = IAVF_TX_DESC(tx_ring, i);
tx_ring          2295 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring          2314 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			if (i == tx_ring->count) {
tx_ring          2315 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				tx_desc = IAVF_TX_DESC(tx_ring, 0);
tx_ring          2335 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (i == tx_ring->count) {
tx_ring          2336 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			tx_desc = IAVF_TX_DESC(tx_ring, 0);
tx_ring          2343 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
tx_ring          2346 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		tx_bi = &tx_ring->tx_bi[i];
tx_ring          2349 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring          2352 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (i == tx_ring->count)
tx_ring          2355 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->next_to_use = i;
tx_ring          2357 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
tx_ring          2378 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
tx_ring          2379 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		writel(i, tx_ring->tail);
tx_ring          2385 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	dev_info(tx_ring->dev, "TX DMA map failed\n");
tx_ring          2389 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		tx_bi = &tx_ring->tx_bi[i];
tx_ring          2390 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
tx_ring          2394 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			i = tx_ring->count;
tx_ring          2398 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_ring->next_to_use = i;
tx_ring          2409 drivers/net/ethernet/intel/iavf/iavf_txrx.c 					struct iavf_ring *tx_ring)
tx_ring          2424 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_trace(xmit_frame_ring, skb, tx_ring);
tx_ring          2433 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		tx_ring->tx_stats.tx_linearize++;
tx_ring          2442 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring          2443 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		tx_ring->tx_stats.tx_busy++;
tx_ring          2448 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
tx_ring          2454 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
tx_ring          2475 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				  tx_ring, &cd_tunneling);
tx_ring          2482 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
tx_ring          2485 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
tx_ring          2491 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
tx_ring          2507 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
tx_ring          2519 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	return iavf_xmit_frame_ring(skb, tx_ring);
tx_ring           441 drivers/net/ethernet/intel/iavf/iavf_txrx.h void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
tx_ring           443 drivers/net/ethernet/intel/iavf/iavf_txrx.h int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
tx_ring           445 drivers/net/ethernet/intel/iavf/iavf_txrx.h void iavf_free_tx_resources(struct iavf_ring *tx_ring);
tx_ring           451 drivers/net/ethernet/intel/iavf/iavf_txrx.h int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
tx_ring           488 drivers/net/ethernet/intel/iavf/iavf_txrx.h static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
tx_ring           490 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
tx_ring           492 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	return __iavf_maybe_stop_tx(tx_ring, size);
tx_ring           108 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 	struct ice_ring *tx_ring, *rx_ring;
tx_ring           115 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 			tx_ring = vsi->tx_rings[i];
tx_ring           116 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 			tx_ring->dcb_tc = 0;
tx_ring           132 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 			tx_ring = vsi->tx_rings[i];
tx_ring           134 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 			tx_ring->dcb_tc = n;
tx_ring           583 drivers/net/ethernet/intel/ice/ice_dcb_lib.c ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
tx_ring           588 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 	if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
tx_ring            20 drivers/net/ethernet/intel/ice/ice_dcb_lib.h ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
tx_ring            52 drivers/net/ethernet/intel/ice/ice_dcb_lib.h ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
tx_ring           555 drivers/net/ethernet/intel/ice/ice_ethtool.c static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
tx_ring           562 drivers/net/ethernet/intel/ice/ice_ethtool.c 	tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use);
tx_ring           563 drivers/net/ethernet/intel/ice/ice_ethtool.c 	tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use];
tx_ring           565 drivers/net/ethernet/intel/ice/ice_ethtool.c 	dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
tx_ring           566 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (dma_mapping_error(tx_ring->dev, dma))
tx_ring           587 drivers/net/ethernet/intel/ice/ice_ethtool.c 	tx_ring->next_to_use++;
tx_ring           588 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (tx_ring->next_to_use >= tx_ring->count)
tx_ring           589 drivers/net/ethernet/intel/ice/ice_ethtool.c 		tx_ring->next_to_use = 0;
tx_ring           591 drivers/net/ethernet/intel/ice/ice_ethtool.c 	writel_relaxed(tx_ring->next_to_use, tx_ring->tail);
tx_ring           595 drivers/net/ethernet/intel/ice/ice_ethtool.c 	dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE);
tx_ring           647 drivers/net/ethernet/intel/ice/ice_ethtool.c 	struct ice_ring *tx_ring, *rx_ring;
tx_ring           663 drivers/net/ethernet/intel/ice/ice_ethtool.c 	tx_ring = test_vsi->tx_rings[0];
tx_ring           699 drivers/net/ethernet/intel/ice/ice_ethtool.c 	num_frames = min_t(int, tx_ring->count, 32);
tx_ring           701 drivers/net/ethernet/intel/ice/ice_ethtool.c 		if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
tx_ring          1377 drivers/net/ethernet/intel/ice/ice_lib.c 			struct ice_ring *tx_ring = vsi->tx_rings[q_id];
tx_ring          1379 drivers/net/ethernet/intel/ice/ice_lib.c 			tx_ring->q_vector = q_vector;
tx_ring          1380 drivers/net/ethernet/intel/ice/ice_lib.c 			tx_ring->next = q_vector->tx.ring;
tx_ring          1381 drivers/net/ethernet/intel/ice/ice_lib.c 			q_vector->tx.ring = tx_ring;
tx_ring            94 drivers/net/ethernet/intel/ice/ice_main.c 		struct ice_ring *tx_ring = vsi->tx_rings[i];
tx_ring            96 drivers/net/ethernet/intel/ice/ice_main.c 		if (tx_ring && tx_ring->desc) {
tx_ring           104 drivers/net/ethernet/intel/ice/ice_main.c 			packets = tx_ring->stats.pkts & INT_MAX;
tx_ring           105 drivers/net/ethernet/intel/ice/ice_main.c 			if (tx_ring->tx_stats.prev_pkt == packets) {
tx_ring           107 drivers/net/ethernet/intel/ice/ice_main.c 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
tx_ring           115 drivers/net/ethernet/intel/ice/ice_main.c 			tx_ring->tx_stats.prev_pkt =
tx_ring           116 drivers/net/ethernet/intel/ice/ice_main.c 			    ice_get_tx_pending(tx_ring) ? packets : -1;
tx_ring          4643 drivers/net/ethernet/intel/ice/ice_main.c 	struct ice_ring *tx_ring = NULL;
tx_ring          4673 drivers/net/ethernet/intel/ice/ice_main.c 					tx_ring = vsi->tx_rings[i];
tx_ring          4686 drivers/net/ethernet/intel/ice/ice_main.c 	if (tx_ring) {
tx_ring          4693 drivers/net/ethernet/intel/ice/ice_main.c 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
tx_ring          4696 drivers/net/ethernet/intel/ice/ice_main.c 			    vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
tx_ring          4697 drivers/net/ethernet/intel/ice/ice_main.c 			    head, tx_ring->next_to_use, val);
tx_ring            50 drivers/net/ethernet/intel/ice/ice_txrx.c void ice_clean_tx_ring(struct ice_ring *tx_ring)
tx_ring            55 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (!tx_ring->tx_buf)
tx_ring            59 drivers/net/ethernet/intel/ice/ice_txrx.c 	for (i = 0; i < tx_ring->count; i++)
tx_ring            60 drivers/net/ethernet/intel/ice/ice_txrx.c 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
tx_ring            62 drivers/net/ethernet/intel/ice/ice_txrx.c 	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
tx_ring            65 drivers/net/ethernet/intel/ice/ice_txrx.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring            67 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->next_to_use = 0;
tx_ring            68 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->next_to_clean = 0;
tx_ring            70 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (!tx_ring->netdev)
tx_ring            74 drivers/net/ethernet/intel/ice/ice_txrx.c 	netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring            83 drivers/net/ethernet/intel/ice/ice_txrx.c void ice_free_tx_ring(struct ice_ring *tx_ring)
tx_ring            85 drivers/net/ethernet/intel/ice/ice_txrx.c 	ice_clean_tx_ring(tx_ring);
tx_ring            86 drivers/net/ethernet/intel/ice/ice_txrx.c 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
tx_ring            87 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->tx_buf = NULL;
tx_ring            89 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (tx_ring->desc) {
tx_ring            90 drivers/net/ethernet/intel/ice/ice_txrx.c 		dmam_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring            91 drivers/net/ethernet/intel/ice/ice_txrx.c 				   tx_ring->desc, tx_ring->dma);
tx_ring            92 drivers/net/ethernet/intel/ice/ice_txrx.c 		tx_ring->desc = NULL;
tx_ring           103 drivers/net/ethernet/intel/ice/ice_txrx.c static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
tx_ring           107 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct ice_vsi *vsi = tx_ring->vsi;
tx_ring           108 drivers/net/ethernet/intel/ice/ice_txrx.c 	s16 i = tx_ring->next_to_clean;
tx_ring           112 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_buf = &tx_ring->tx_buf[i];
tx_ring           113 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_desc = ICE_TX_DESC(tx_ring, i);
tx_ring           114 drivers/net/ethernet/intel/ice/ice_txrx.c 	i -= tx_ring->count;
tx_ring           143 drivers/net/ethernet/intel/ice/ice_txrx.c 		dma_unmap_single(tx_ring->dev,
tx_ring           158 drivers/net/ethernet/intel/ice/ice_txrx.c 				i -= tx_ring->count;
tx_ring           159 drivers/net/ethernet/intel/ice/ice_txrx.c 				tx_buf = tx_ring->tx_buf;
tx_ring           160 drivers/net/ethernet/intel/ice/ice_txrx.c 				tx_desc = ICE_TX_DESC(tx_ring, 0);
tx_ring           165 drivers/net/ethernet/intel/ice/ice_txrx.c 				dma_unmap_page(tx_ring->dev,
tx_ring           178 drivers/net/ethernet/intel/ice/ice_txrx.c 			i -= tx_ring->count;
tx_ring           179 drivers/net/ethernet/intel/ice/ice_txrx.c 			tx_buf = tx_ring->tx_buf;
tx_ring           180 drivers/net/ethernet/intel/ice/ice_txrx.c 			tx_desc = ICE_TX_DESC(tx_ring, 0);
tx_ring           189 drivers/net/ethernet/intel/ice/ice_txrx.c 	i += tx_ring->count;
tx_ring           190 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->next_to_clean = i;
tx_ring           191 drivers/net/ethernet/intel/ice/ice_txrx.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring           192 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->stats.bytes += total_bytes;
tx_ring           193 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->stats.pkts += total_pkts;
tx_ring           194 drivers/net/ethernet/intel/ice/ice_txrx.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring           195 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring           196 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->q_vector->tx.total_pkts += total_pkts;
tx_ring           198 drivers/net/ethernet/intel/ice/ice_txrx.c 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
tx_ring           202 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
tx_ring           203 drivers/net/ethernet/intel/ice/ice_txrx.c 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
tx_ring           208 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring           209 drivers/net/ethernet/intel/ice/ice_txrx.c 					     tx_ring->q_index) &&
tx_ring           211 drivers/net/ethernet/intel/ice/ice_txrx.c 			netif_wake_subqueue(tx_ring->netdev,
tx_ring           212 drivers/net/ethernet/intel/ice/ice_txrx.c 					    tx_ring->q_index);
tx_ring           213 drivers/net/ethernet/intel/ice/ice_txrx.c 			++tx_ring->tx_stats.restart_q;
tx_ring           226 drivers/net/ethernet/intel/ice/ice_txrx.c int ice_setup_tx_ring(struct ice_ring *tx_ring)
tx_ring           228 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct device *dev = tx_ring->dev;
tx_ring           234 drivers/net/ethernet/intel/ice/ice_txrx.c 	WARN_ON(tx_ring->tx_buf);
tx_ring           235 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->tx_buf =
tx_ring           236 drivers/net/ethernet/intel/ice/ice_txrx.c 		devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
tx_ring           238 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (!tx_ring->tx_buf)
tx_ring           242 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
tx_ring           244 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
tx_ring           246 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (!tx_ring->desc) {
tx_ring           248 drivers/net/ethernet/intel/ice/ice_txrx.c 			tx_ring->size);
tx_ring           252 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->next_to_use = 0;
tx_ring           253 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->next_to_clean = 0;
tx_ring           254 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->tx_stats.prev_pkt = -1;
tx_ring           258 drivers/net/ethernet/intel/ice/ice_txrx.c 	devm_kfree(dev, tx_ring->tx_buf);
tx_ring           259 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->tx_buf = NULL;
tx_ring          1548 drivers/net/ethernet/intel/ice/ice_txrx.c static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
tx_ring          1550 drivers/net/ethernet/intel/ice/ice_txrx.c 	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
tx_ring          1555 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
tx_ring          1559 drivers/net/ethernet/intel/ice/ice_txrx.c 	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
tx_ring          1560 drivers/net/ethernet/intel/ice/ice_txrx.c 	++tx_ring->tx_stats.restart_q;
tx_ring          1571 drivers/net/ethernet/intel/ice/ice_txrx.c static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
tx_ring          1573 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
tx_ring          1576 drivers/net/ethernet/intel/ice/ice_txrx.c 	return __ice_maybe_stop_tx(tx_ring, size);
tx_ring          1590 drivers/net/ethernet/intel/ice/ice_txrx.c ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
tx_ring          1594 drivers/net/ethernet/intel/ice/ice_txrx.c 	u16 i = tx_ring->next_to_use;
tx_ring          1610 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_desc = ICE_TX_DESC(tx_ring, i);
tx_ring          1618 drivers/net/ethernet/intel/ice/ice_txrx.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_ring          1625 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring          1646 drivers/net/ethernet/intel/ice/ice_txrx.c 			if (i == tx_ring->count) {
tx_ring          1647 drivers/net/ethernet/intel/ice/ice_txrx.c 				tx_desc = ICE_TX_DESC(tx_ring, 0);
tx_ring          1667 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (i == tx_ring->count) {
tx_ring          1668 drivers/net/ethernet/intel/ice/ice_txrx.c 			tx_desc = ICE_TX_DESC(tx_ring, 0);
tx_ring          1675 drivers/net/ethernet/intel/ice/ice_txrx.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
tx_ring          1678 drivers/net/ethernet/intel/ice/ice_txrx.c 		tx_buf = &tx_ring->tx_buf[i];
tx_ring          1682 drivers/net/ethernet/intel/ice/ice_txrx.c 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring          1688 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (i == tx_ring->count)
tx_ring          1707 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->next_to_use = i;
tx_ring          1709 drivers/net/ethernet/intel/ice/ice_txrx.c 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
tx_ring          1712 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
tx_ring          1713 drivers/net/ethernet/intel/ice/ice_txrx.c 		writel(i, tx_ring->tail);
tx_ring          1721 drivers/net/ethernet/intel/ice/ice_txrx.c 		tx_buf = &tx_ring->tx_buf[i];
tx_ring          1722 drivers/net/ethernet/intel/ice/ice_txrx.c 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
tx_ring          1726 drivers/net/ethernet/intel/ice/ice_txrx.c 			i = tx_ring->count;
tx_ring          1730 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring->next_to_use = i;
tx_ring          1844 drivers/net/ethernet/intel/ice/ice_txrx.c ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
tx_ring          1850 drivers/net/ethernet/intel/ice/ice_txrx.c 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
tx_ring          1881 drivers/net/ethernet/intel/ice/ice_txrx.c 	return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
tx_ring          2107 drivers/net/ethernet/intel/ice/ice_txrx.c ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
tx_ring          2110 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct ice_vsi *vsi = tx_ring->vsi;
tx_ring          2120 drivers/net/ethernet/intel/ice/ice_txrx.c 		tx_ring->tx_stats.tx_linearize++;
tx_ring          2129 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
tx_ring          2131 drivers/net/ethernet/intel/ice/ice_txrx.c 		tx_ring->tx_stats.tx_busy++;
tx_ring          2135 drivers/net/ethernet/intel/ice/ice_txrx.c 	offload.tx_ring = tx_ring;
tx_ring          2138 drivers/net/ethernet/intel/ice/ice_txrx.c 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
tx_ring          2145 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (ice_tx_prepare_vlan_flags(tx_ring, first))
tx_ring          2168 drivers/net/ethernet/intel/ice/ice_txrx.c 		int i = tx_ring->next_to_use;
tx_ring          2171 drivers/net/ethernet/intel/ice/ice_txrx.c 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
tx_ring          2173 drivers/net/ethernet/intel/ice/ice_txrx.c 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring          2182 drivers/net/ethernet/intel/ice/ice_txrx.c 	ice_tx_map(tx_ring, first, &offload);
tx_ring          2201 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct ice_ring *tx_ring;
tx_ring          2203 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring = vsi->tx_rings[skb->queue_mapping];
tx_ring          2211 drivers/net/ethernet/intel/ice/ice_txrx.c 	return ice_xmit_frame_ring(skb, tx_ring);
tx_ring            67 drivers/net/ethernet/intel/ice/ice_txrx.h 	struct ice_ring *tx_ring;
tx_ring           235 drivers/net/ethernet/intel/ice/ice_txrx.h void ice_clean_tx_ring(struct ice_ring *tx_ring);
tx_ring           237 drivers/net/ethernet/intel/ice/ice_txrx.h int ice_setup_tx_ring(struct ice_ring *tx_ring);
tx_ring           239 drivers/net/ethernet/intel/ice/ice_txrx.h void ice_free_tx_ring(struct ice_ring *tx_ring);
tx_ring           487 drivers/net/ethernet/intel/igb/igb.h 	struct igb_ring *tx_ring[16];
tx_ring           720 drivers/net/ethernet/intel/igb/igb.h static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
tx_ring           722 drivers/net/ethernet/intel/igb/igb.h 	return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
tx_ring           899 drivers/net/ethernet/intel/igb/igb_ethtool.c 			adapter->tx_ring[i]->count = new_tx_count;
tx_ring           927 drivers/net/ethernet/intel/igb/igb_ethtool.c 			memcpy(&temp_ring[i], adapter->tx_ring[i],
tx_ring           942 drivers/net/ethernet/intel/igb/igb_ethtool.c 			igb_free_tx_resources(adapter->tx_ring[i]);
tx_ring           944 drivers/net/ethernet/intel/igb/igb_ethtool.c 			memcpy(adapter->tx_ring[i], &temp_ring[i],
tx_ring          1547 drivers/net/ethernet/intel/igb/igb_ethtool.c 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
tx_ring          1553 drivers/net/ethernet/intel/igb/igb_ethtool.c 	tx_ring->count = IGB_DEFAULT_TXD;
tx_ring          1554 drivers/net/ethernet/intel/igb/igb_ethtool.c 	tx_ring->dev = &adapter->pdev->dev;
tx_ring          1555 drivers/net/ethernet/intel/igb/igb_ethtool.c 	tx_ring->netdev = adapter->netdev;
tx_ring          1556 drivers/net/ethernet/intel/igb/igb_ethtool.c 	tx_ring->reg_idx = adapter->vfs_allocated_count;
tx_ring          1558 drivers/net/ethernet/intel/igb/igb_ethtool.c 	if (igb_setup_tx_resources(tx_ring)) {
tx_ring          1564 drivers/net/ethernet/intel/igb/igb_ethtool.c 	igb_configure_tx_ring(adapter, tx_ring);
tx_ring          1802 drivers/net/ethernet/intel/igb/igb_ethtool.c 				struct igb_ring *tx_ring,
tx_ring          1812 drivers/net/ethernet/intel/igb/igb_ethtool.c 	tx_ntc = tx_ring->next_to_clean;
tx_ring          1836 drivers/net/ethernet/intel/igb/igb_ethtool.c 		tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
tx_ring          1842 drivers/net/ethernet/intel/igb/igb_ethtool.c 		dma_unmap_single(tx_ring->dev,
tx_ring          1853 drivers/net/ethernet/intel/igb/igb_ethtool.c 		if (tx_ntc == tx_ring->count)
tx_ring          1860 drivers/net/ethernet/intel/igb/igb_ethtool.c 	netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring          1865 drivers/net/ethernet/intel/igb/igb_ethtool.c 	tx_ring->next_to_clean = tx_ntc;
tx_ring          1872 drivers/net/ethernet/intel/igb/igb_ethtool.c 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
tx_ring          1894 drivers/net/ethernet/intel/igb/igb_ethtool.c 	if (rx_ring->count <= tx_ring->count)
tx_ring          1895 drivers/net/ethernet/intel/igb/igb_ethtool.c 		lc = ((tx_ring->count / 64) * 2) + 1;
tx_ring          1906 drivers/net/ethernet/intel/igb/igb_ethtool.c 			tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
tx_ring          1919 drivers/net/ethernet/intel/igb/igb_ethtool.c 		good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
tx_ring          2317 drivers/net/ethernet/intel/igb/igb_ethtool.c 		ring = adapter->tx_ring[j];
tx_ring           363 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *tx_ring;
tx_ring           398 drivers/net/ethernet/intel/igb/igb_main.c 		tx_ring = adapter->tx_ring[n];
tx_ring           399 drivers/net/ethernet/intel/igb/igb_main.c 		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
tx_ring           401 drivers/net/ethernet/intel/igb/igb_main.c 			n, tx_ring->next_to_use, tx_ring->next_to_clean,
tx_ring           426 drivers/net/ethernet/intel/igb/igb_main.c 		tx_ring = adapter->tx_ring[n];
tx_ring           428 drivers/net/ethernet/intel/igb/igb_main.c 		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
tx_ring           432 drivers/net/ethernet/intel/igb/igb_main.c 		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_ring           435 drivers/net/ethernet/intel/igb/igb_main.c 			tx_desc = IGB_TX_DESC(tx_ring, i);
tx_ring           436 drivers/net/ethernet/intel/igb/igb_main.c 			buffer_info = &tx_ring->tx_buffer_info[i];
tx_ring           438 drivers/net/ethernet/intel/igb/igb_main.c 			if (i == tx_ring->next_to_use &&
tx_ring           439 drivers/net/ethernet/intel/igb/igb_main.c 			    i == tx_ring->next_to_clean)
tx_ring           441 drivers/net/ethernet/intel/igb/igb_main.c 			else if (i == tx_ring->next_to_use)
tx_ring           443 drivers/net/ethernet/intel/igb/igb_main.c 			else if (i == tx_ring->next_to_clean)
tx_ring           735 drivers/net/ethernet/intel/igb/igb_main.c 			adapter->tx_ring[j]->reg_idx = rbase_offset + j;
tx_ring          1032 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
tx_ring          1275 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->tx_ring[txr_idx] = ring;
tx_ring          1659 drivers/net/ethernet/intel/igb/igb_main.c 		if (adapter->tx_ring[i]->cbs_enable)
tx_ring          1671 drivers/net/ethernet/intel/igb/igb_main.c 		if (adapter->tx_ring[i]->launchtime_enable)
tx_ring          1690 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *ring = adapter->tx_ring[queue];
tx_ring          1871 drivers/net/ethernet/intel/igb/igb_main.c 	ring = adapter->tx_ring[queue];
tx_ring          1886 drivers/net/ethernet/intel/igb/igb_main.c 	ring = adapter->tx_ring[queue];
tx_ring          4038 drivers/net/ethernet/intel/igb/igb_main.c int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring          4040 drivers/net/ethernet/intel/igb/igb_main.c 	struct device *dev = tx_ring->dev;
tx_ring          4043 drivers/net/ethernet/intel/igb/igb_main.c 	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
tx_ring          4045 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->tx_buffer_info = vmalloc(size);
tx_ring          4046 drivers/net/ethernet/intel/igb/igb_main.c 	if (!tx_ring->tx_buffer_info)
tx_ring          4050 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring          4051 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring          4053 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
tx_ring          4054 drivers/net/ethernet/intel/igb/igb_main.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring          4055 drivers/net/ethernet/intel/igb/igb_main.c 	if (!tx_ring->desc)
tx_ring          4058 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->next_to_use = 0;
tx_ring          4059 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->next_to_clean = 0;
tx_ring          4064 drivers/net/ethernet/intel/igb/igb_main.c 	vfree(tx_ring->tx_buffer_info);
tx_ring          4065 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->tx_buffer_info = NULL;
tx_ring          4083 drivers/net/ethernet/intel/igb/igb_main.c 		err = igb_setup_tx_resources(adapter->tx_ring[i]);
tx_ring          4088 drivers/net/ethernet/intel/igb/igb_main.c 				igb_free_tx_resources(adapter->tx_ring[i]);
tx_ring          4172 drivers/net/ethernet/intel/igb/igb_main.c 		wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
tx_ring          4178 drivers/net/ethernet/intel/igb/igb_main.c 		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
tx_ring          4584 drivers/net/ethernet/intel/igb/igb_main.c void igb_free_tx_resources(struct igb_ring *tx_ring)
tx_ring          4586 drivers/net/ethernet/intel/igb/igb_main.c 	igb_clean_tx_ring(tx_ring);
tx_ring          4588 drivers/net/ethernet/intel/igb/igb_main.c 	vfree(tx_ring->tx_buffer_info);
tx_ring          4589 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->tx_buffer_info = NULL;
tx_ring          4592 drivers/net/ethernet/intel/igb/igb_main.c 	if (!tx_ring->desc)
tx_ring          4595 drivers/net/ethernet/intel/igb/igb_main.c 	dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring          4596 drivers/net/ethernet/intel/igb/igb_main.c 			  tx_ring->desc, tx_ring->dma);
tx_ring          4598 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->desc = NULL;
tx_ring          4612 drivers/net/ethernet/intel/igb/igb_main.c 		if (adapter->tx_ring[i])
tx_ring          4613 drivers/net/ethernet/intel/igb/igb_main.c 			igb_free_tx_resources(adapter->tx_ring[i]);
tx_ring          4620 drivers/net/ethernet/intel/igb/igb_main.c static void igb_clean_tx_ring(struct igb_ring *tx_ring)
tx_ring          4622 drivers/net/ethernet/intel/igb/igb_main.c 	u16 i = tx_ring->next_to_clean;
tx_ring          4623 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          4625 drivers/net/ethernet/intel/igb/igb_main.c 	while (i != tx_ring->next_to_use) {
tx_ring          4632 drivers/net/ethernet/intel/igb/igb_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          4639 drivers/net/ethernet/intel/igb/igb_main.c 		tx_desc = IGB_TX_DESC(tx_ring, i);
tx_ring          4646 drivers/net/ethernet/intel/igb/igb_main.c 			if (unlikely(i == tx_ring->count)) {
tx_ring          4648 drivers/net/ethernet/intel/igb/igb_main.c 				tx_buffer = tx_ring->tx_buffer_info;
tx_ring          4649 drivers/net/ethernet/intel/igb/igb_main.c 				tx_desc = IGB_TX_DESC(tx_ring, 0);
tx_ring          4654 drivers/net/ethernet/intel/igb/igb_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring          4663 drivers/net/ethernet/intel/igb/igb_main.c 		if (unlikely(i == tx_ring->count)) {
tx_ring          4665 drivers/net/ethernet/intel/igb/igb_main.c 			tx_buffer = tx_ring->tx_buffer_info;
tx_ring          4670 drivers/net/ethernet/intel/igb/igb_main.c 	netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring          4673 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->next_to_use = 0;
tx_ring          4674 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->next_to_clean = 0;
tx_ring          4686 drivers/net/ethernet/intel/igb/igb_main.c 		if (adapter->tx_ring[i])
tx_ring          4687 drivers/net/ethernet/intel/igb/igb_main.c 			igb_clean_tx_ring(adapter->tx_ring[i]);
tx_ring          5391 drivers/net/ethernet/intel/igb/igb_main.c 		struct igb_ring *tx_ring = adapter->tx_ring[i];
tx_ring          5398 drivers/net/ethernet/intel/igb/igb_main.c 			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
tx_ring          5407 drivers/net/ethernet/intel/igb/igb_main.c 		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
tx_ring          5649 drivers/net/ethernet/intel/igb/igb_main.c static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
tx_ring          5655 drivers/net/ethernet/intel/igb/igb_main.c 	u16 i = tx_ring->next_to_use;
tx_ring          5658 drivers/net/ethernet/intel/igb/igb_main.c 	context_desc = IGB_TX_CTXTDESC(tx_ring, i);
tx_ring          5661 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring          5667 drivers/net/ethernet/intel/igb/igb_main.c 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
tx_ring          5668 drivers/net/ethernet/intel/igb/igb_main.c 		mss_l4len_idx |= tx_ring->reg_idx << 4;
tx_ring          5677 drivers/net/ethernet/intel/igb/igb_main.c 	if (tx_ring->launchtime_enable) {
tx_ring          5686 drivers/net/ethernet/intel/igb/igb_main.c static int igb_tso(struct igb_ring *tx_ring,
tx_ring          5766 drivers/net/ethernet/intel/igb/igb_main.c 	igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
tx_ring          5781 drivers/net/ethernet/intel/igb/igb_main.c static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
tx_ring          5790 drivers/net/ethernet/intel/igb/igb_main.c 		    !tx_ring->launchtime_enable)
tx_ring          5824 drivers/net/ethernet/intel/igb/igb_main.c 	igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
tx_ring          5857 drivers/net/ethernet/intel/igb/igb_main.c static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_ring          5864 drivers/net/ethernet/intel/igb/igb_main.c 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
tx_ring          5865 drivers/net/ethernet/intel/igb/igb_main.c 		olinfo_status |= tx_ring->reg_idx << 4;
tx_ring          5880 drivers/net/ethernet/intel/igb/igb_main.c static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
tx_ring          5882 drivers/net/ethernet/intel/igb/igb_main.c 	struct net_device *netdev = tx_ring->netdev;
tx_ring          5884 drivers/net/ethernet/intel/igb/igb_main.c 	netif_stop_subqueue(netdev, tx_ring->queue_index);
tx_ring          5895 drivers/net/ethernet/intel/igb/igb_main.c 	if (igb_desc_unused(tx_ring) < size)
tx_ring          5899 drivers/net/ethernet/intel/igb/igb_main.c 	netif_wake_subqueue(netdev, tx_ring->queue_index);
tx_ring          5901 drivers/net/ethernet/intel/igb/igb_main.c 	u64_stats_update_begin(&tx_ring->tx_syncp2);
tx_ring          5902 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->tx_stats.restart_queue2++;
tx_ring          5903 drivers/net/ethernet/intel/igb/igb_main.c 	u64_stats_update_end(&tx_ring->tx_syncp2);
tx_ring          5908 drivers/net/ethernet/intel/igb/igb_main.c static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
tx_ring          5910 drivers/net/ethernet/intel/igb/igb_main.c 	if (igb_desc_unused(tx_ring) >= size)
tx_ring          5912 drivers/net/ethernet/intel/igb/igb_main.c 	return __igb_maybe_stop_tx(tx_ring, size);
tx_ring          5915 drivers/net/ethernet/intel/igb/igb_main.c static int igb_tx_map(struct igb_ring *tx_ring,
tx_ring          5927 drivers/net/ethernet/intel/igb/igb_main.c 	u16 i = tx_ring->next_to_use;
tx_ring          5929 drivers/net/ethernet/intel/igb/igb_main.c 	tx_desc = IGB_TX_DESC(tx_ring, i);
tx_ring          5931 drivers/net/ethernet/intel/igb/igb_main.c 	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
tx_ring          5936 drivers/net/ethernet/intel/igb/igb_main.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_ring          5941 drivers/net/ethernet/intel/igb/igb_main.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring          5956 drivers/net/ethernet/intel/igb/igb_main.c 			if (i == tx_ring->count) {
tx_ring          5957 drivers/net/ethernet/intel/igb/igb_main.c 				tx_desc = IGB_TX_DESC(tx_ring, 0);
tx_ring          5975 drivers/net/ethernet/intel/igb/igb_main.c 		if (i == tx_ring->count) {
tx_ring          5976 drivers/net/ethernet/intel/igb/igb_main.c 			tx_desc = IGB_TX_DESC(tx_ring, 0);
tx_ring          5984 drivers/net/ethernet/intel/igb/igb_main.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
tx_ring          5987 drivers/net/ethernet/intel/igb/igb_main.c 		tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          5994 drivers/net/ethernet/intel/igb/igb_main.c 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring          6014 drivers/net/ethernet/intel/igb/igb_main.c 	if (i == tx_ring->count)
tx_ring          6017 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->next_to_use = i;
tx_ring          6020 drivers/net/ethernet/intel/igb/igb_main.c 	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
tx_ring          6022 drivers/net/ethernet/intel/igb/igb_main.c 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
tx_ring          6023 drivers/net/ethernet/intel/igb/igb_main.c 		writel(i, tx_ring->tail);
tx_ring          6028 drivers/net/ethernet/intel/igb/igb_main.c 	dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_ring          6029 drivers/net/ethernet/intel/igb/igb_main.c 	tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          6034 drivers/net/ethernet/intel/igb/igb_main.c 			dma_unmap_page(tx_ring->dev,
tx_ring          6041 drivers/net/ethernet/intel/igb/igb_main.c 			i += tx_ring->count;
tx_ring          6042 drivers/net/ethernet/intel/igb/igb_main.c 		tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          6046 drivers/net/ethernet/intel/igb/igb_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          6055 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->next_to_use = i;
tx_ring          6061 drivers/net/ethernet/intel/igb/igb_main.c 				struct igb_ring *tx_ring)
tx_ring          6081 drivers/net/ethernet/intel/igb/igb_main.c 	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring          6087 drivers/net/ethernet/intel/igb/igb_main.c 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
tx_ring          6093 drivers/net/ethernet/intel/igb/igb_main.c 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
tx_ring          6119 drivers/net/ethernet/intel/igb/igb_main.c 	tso = igb_tso(tx_ring, first, &hdr_len);
tx_ring          6123 drivers/net/ethernet/intel/igb/igb_main.c 		igb_tx_csum(tx_ring, first);
tx_ring          6125 drivers/net/ethernet/intel/igb/igb_main.c 	if (igb_tx_map(tx_ring, first, hdr_len))
tx_ring          6135 drivers/net/ethernet/intel/igb/igb_main.c 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
tx_ring          6155 drivers/net/ethernet/intel/igb/igb_main.c 	return adapter->tx_ring[r_idx];
tx_ring          6311 drivers/net/ethernet/intel/igb/igb_main.c 		struct igb_ring *ring = adapter->tx_ring[i];
tx_ring          6594 drivers/net/ethernet/intel/igb/igb_main.c 			      struct igb_ring *tx_ring,
tx_ring          6598 drivers/net/ethernet/intel/igb/igb_main.c 	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
tx_ring          6611 drivers/net/ethernet/intel/igb/igb_main.c 	wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
tx_ring          7748 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *tx_ring = q_vector->tx.ring;
tx_ring          7753 drivers/net/ethernet/intel/igb/igb_main.c 	unsigned int i = tx_ring->next_to_clean;
tx_ring          7758 drivers/net/ethernet/intel/igb/igb_main.c 	tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          7759 drivers/net/ethernet/intel/igb/igb_main.c 	tx_desc = IGB_TX_DESC(tx_ring, i);
tx_ring          7760 drivers/net/ethernet/intel/igb/igb_main.c 	i -= tx_ring->count;
tx_ring          7787 drivers/net/ethernet/intel/igb/igb_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          7801 drivers/net/ethernet/intel/igb/igb_main.c 				i -= tx_ring->count;
tx_ring          7802 drivers/net/ethernet/intel/igb/igb_main.c 				tx_buffer = tx_ring->tx_buffer_info;
tx_ring          7803 drivers/net/ethernet/intel/igb/igb_main.c 				tx_desc = IGB_TX_DESC(tx_ring, 0);
tx_ring          7808 drivers/net/ethernet/intel/igb/igb_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring          7821 drivers/net/ethernet/intel/igb/igb_main.c 			i -= tx_ring->count;
tx_ring          7822 drivers/net/ethernet/intel/igb/igb_main.c 			tx_buffer = tx_ring->tx_buffer_info;
tx_ring          7823 drivers/net/ethernet/intel/igb/igb_main.c 			tx_desc = IGB_TX_DESC(tx_ring, 0);
tx_ring          7833 drivers/net/ethernet/intel/igb/igb_main.c 	netdev_tx_completed_queue(txring_txq(tx_ring),
tx_ring          7835 drivers/net/ethernet/intel/igb/igb_main.c 	i += tx_ring->count;
tx_ring          7836 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->next_to_clean = i;
tx_ring          7837 drivers/net/ethernet/intel/igb/igb_main.c 	u64_stats_update_begin(&tx_ring->tx_syncp);
tx_ring          7838 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->tx_stats.bytes += total_bytes;
tx_ring          7839 drivers/net/ethernet/intel/igb/igb_main.c 	tx_ring->tx_stats.packets += total_packets;
tx_ring          7840 drivers/net/ethernet/intel/igb/igb_main.c 	u64_stats_update_end(&tx_ring->tx_syncp);
tx_ring          7844 drivers/net/ethernet/intel/igb/igb_main.c 	if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
tx_ring          7850 drivers/net/ethernet/intel/igb/igb_main.c 		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
tx_ring          7857 drivers/net/ethernet/intel/igb/igb_main.c 			dev_err(tx_ring->dev,
tx_ring          7869 drivers/net/ethernet/intel/igb/igb_main.c 				tx_ring->queue_index,
tx_ring          7870 drivers/net/ethernet/intel/igb/igb_main.c 				rd32(E1000_TDH(tx_ring->reg_idx)),
tx_ring          7871 drivers/net/ethernet/intel/igb/igb_main.c 				readl(tx_ring->tail),
tx_ring          7872 drivers/net/ethernet/intel/igb/igb_main.c 				tx_ring->next_to_use,
tx_ring          7873 drivers/net/ethernet/intel/igb/igb_main.c 				tx_ring->next_to_clean,
tx_ring          7878 drivers/net/ethernet/intel/igb/igb_main.c 			netif_stop_subqueue(tx_ring->netdev,
tx_ring          7879 drivers/net/ethernet/intel/igb/igb_main.c 					    tx_ring->queue_index);
tx_ring          7888 drivers/net/ethernet/intel/igb/igb_main.c 	    netif_carrier_ok(tx_ring->netdev) &&
tx_ring          7889 drivers/net/ethernet/intel/igb/igb_main.c 	    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
tx_ring          7894 drivers/net/ethernet/intel/igb/igb_main.c 		if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring          7895 drivers/net/ethernet/intel/igb/igb_main.c 					     tx_ring->queue_index) &&
tx_ring          7897 drivers/net/ethernet/intel/igb/igb_main.c 			netif_wake_subqueue(tx_ring->netdev,
tx_ring          7898 drivers/net/ethernet/intel/igb/igb_main.c 					    tx_ring->queue_index);
tx_ring          7900 drivers/net/ethernet/intel/igb/igb_main.c 			u64_stats_update_begin(&tx_ring->tx_syncp);
tx_ring          7901 drivers/net/ethernet/intel/igb/igb_main.c 			tx_ring->tx_stats.restart_queue++;
tx_ring          7902 drivers/net/ethernet/intel/igb/igb_main.c 			u64_stats_update_end(&tx_ring->tx_syncp);
tx_ring           183 drivers/net/ethernet/intel/igbvf/ethtool.c 	struct igbvf_ring *tx_ring = adapter->tx_ring;
tx_ring           189 drivers/net/ethernet/intel/igbvf/ethtool.c 	ring->tx_pending = tx_ring->count;
tx_ring           211 drivers/net/ethernet/intel/igbvf/ethtool.c 	if ((new_tx_count == adapter->tx_ring->count) &&
tx_ring           221 drivers/net/ethernet/intel/igbvf/ethtool.c 		adapter->tx_ring->count = new_tx_count;
tx_ring           238 drivers/net/ethernet/intel/igbvf/ethtool.c 	if (new_tx_count != adapter->tx_ring->count) {
tx_ring           239 drivers/net/ethernet/intel/igbvf/ethtool.c 		memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
tx_ring           246 drivers/net/ethernet/intel/igbvf/ethtool.c 		igbvf_free_tx_resources(adapter->tx_ring);
tx_ring           248 drivers/net/ethernet/intel/igbvf/ethtool.c 		memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
tx_ring           182 drivers/net/ethernet/intel/igbvf/igbvf.h 	struct igbvf_ring *tx_ring /* One per active queue */
tx_ring           415 drivers/net/ethernet/intel/igbvf/netdev.c 			     struct igbvf_ring *tx_ring)
tx_ring           420 drivers/net/ethernet/intel/igbvf/netdev.c 	size = sizeof(struct igbvf_buffer) * tx_ring->count;
tx_ring           421 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->buffer_info = vzalloc(size);
tx_ring           422 drivers/net/ethernet/intel/igbvf/netdev.c 	if (!tx_ring->buffer_info)
tx_ring           426 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring           427 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring           429 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
tx_ring           430 drivers/net/ethernet/intel/igbvf/netdev.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring           431 drivers/net/ethernet/intel/igbvf/netdev.c 	if (!tx_ring->desc)
tx_ring           434 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->adapter = adapter;
tx_ring           435 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->next_to_use = 0;
tx_ring           436 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->next_to_clean = 0;
tx_ring           440 drivers/net/ethernet/intel/igbvf/netdev.c 	vfree(tx_ring->buffer_info);
tx_ring           493 drivers/net/ethernet/intel/igbvf/netdev.c static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
tx_ring           495 drivers/net/ethernet/intel/igbvf/netdev.c 	struct igbvf_adapter *adapter = tx_ring->adapter;
tx_ring           500 drivers/net/ethernet/intel/igbvf/netdev.c 	if (!tx_ring->buffer_info)
tx_ring           504 drivers/net/ethernet/intel/igbvf/netdev.c 	for (i = 0; i < tx_ring->count; i++) {
tx_ring           505 drivers/net/ethernet/intel/igbvf/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring           509 drivers/net/ethernet/intel/igbvf/netdev.c 	size = sizeof(struct igbvf_buffer) * tx_ring->count;
tx_ring           510 drivers/net/ethernet/intel/igbvf/netdev.c 	memset(tx_ring->buffer_info, 0, size);
tx_ring           513 drivers/net/ethernet/intel/igbvf/netdev.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring           515 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->next_to_use = 0;
tx_ring           516 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->next_to_clean = 0;
tx_ring           518 drivers/net/ethernet/intel/igbvf/netdev.c 	writel(0, adapter->hw.hw_addr + tx_ring->head);
tx_ring           519 drivers/net/ethernet/intel/igbvf/netdev.c 	writel(0, adapter->hw.hw_addr + tx_ring->tail);
tx_ring           528 drivers/net/ethernet/intel/igbvf/netdev.c void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
tx_ring           530 drivers/net/ethernet/intel/igbvf/netdev.c 	struct pci_dev *pdev = tx_ring->adapter->pdev;
tx_ring           532 drivers/net/ethernet/intel/igbvf/netdev.c 	igbvf_clean_tx_ring(tx_ring);
tx_ring           534 drivers/net/ethernet/intel/igbvf/netdev.c 	vfree(tx_ring->buffer_info);
tx_ring           535 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->buffer_info = NULL;
tx_ring           537 drivers/net/ethernet/intel/igbvf/netdev.c 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
tx_ring           538 drivers/net/ethernet/intel/igbvf/netdev.c 			  tx_ring->dma);
tx_ring           540 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->desc = NULL;
tx_ring           714 drivers/net/ethernet/intel/igbvf/netdev.c 	adapter->tx_ring->itr_range =
tx_ring           716 drivers/net/ethernet/intel/igbvf/netdev.c 					 adapter->tx_ring->itr_val,
tx_ring           722 drivers/net/ethernet/intel/igbvf/netdev.c 	    adapter->tx_ring->itr_range == lowest_latency)
tx_ring           723 drivers/net/ethernet/intel/igbvf/netdev.c 		adapter->tx_ring->itr_range = low_latency;
tx_ring           725 drivers/net/ethernet/intel/igbvf/netdev.c 	new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
tx_ring           727 drivers/net/ethernet/intel/igbvf/netdev.c 	if (new_itr != adapter->tx_ring->itr_val) {
tx_ring           728 drivers/net/ethernet/intel/igbvf/netdev.c 		u32 current_itr = adapter->tx_ring->itr_val;
tx_ring           736 drivers/net/ethernet/intel/igbvf/netdev.c 		adapter->tx_ring->itr_val = new_itr;
tx_ring           738 drivers/net/ethernet/intel/igbvf/netdev.c 		adapter->tx_ring->set_itr = 1;
tx_ring           769 drivers/net/ethernet/intel/igbvf/netdev.c static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
tx_ring           771 drivers/net/ethernet/intel/igbvf/netdev.c 	struct igbvf_adapter *adapter = tx_ring->adapter;
tx_ring           780 drivers/net/ethernet/intel/igbvf/netdev.c 	i = tx_ring->next_to_clean;
tx_ring           781 drivers/net/ethernet/intel/igbvf/netdev.c 	buffer_info = &tx_ring->buffer_info[i];
tx_ring           800 drivers/net/ethernet/intel/igbvf/netdev.c 			tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
tx_ring           820 drivers/net/ethernet/intel/igbvf/netdev.c 			if (i == tx_ring->count)
tx_ring           823 drivers/net/ethernet/intel/igbvf/netdev.c 			buffer_info = &tx_ring->buffer_info[i];
tx_ring           827 drivers/net/ethernet/intel/igbvf/netdev.c 	} while (count < tx_ring->count);
tx_ring           829 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->next_to_clean = i;
tx_ring           832 drivers/net/ethernet/intel/igbvf/netdev.c 	    igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
tx_ring           846 drivers/net/ethernet/intel/igbvf/netdev.c 	return count < tx_ring->count;
tx_ring           871 drivers/net/ethernet/intel/igbvf/netdev.c 	struct igbvf_ring *tx_ring = adapter->tx_ring;
tx_ring           873 drivers/net/ethernet/intel/igbvf/netdev.c 	if (tx_ring->set_itr) {
tx_ring           874 drivers/net/ethernet/intel/igbvf/netdev.c 		writel(tx_ring->itr_val,
tx_ring           875 drivers/net/ethernet/intel/igbvf/netdev.c 		       adapter->hw.hw_addr + tx_ring->itr_register);
tx_ring           876 drivers/net/ethernet/intel/igbvf/netdev.c 		adapter->tx_ring->set_itr = 0;
tx_ring           885 drivers/net/ethernet/intel/igbvf/netdev.c 	if (!igbvf_clean_tx_irq(tx_ring))
tx_ring           887 drivers/net/ethernet/intel/igbvf/netdev.c 		ew32(EICS, tx_ring->eims_value);
tx_ring           889 drivers/net/ethernet/intel/igbvf/netdev.c 		ew32(EIMS, tx_ring->eims_value);
tx_ring           959 drivers/net/ethernet/intel/igbvf/netdev.c 		adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
tx_ring           975 drivers/net/ethernet/intel/igbvf/netdev.c 	struct igbvf_ring *tx_ring = adapter->tx_ring;
tx_ring           982 drivers/net/ethernet/intel/igbvf/netdev.c 	adapter->eims_enable_mask |= tx_ring->eims_value;
tx_ring           983 drivers/net/ethernet/intel/igbvf/netdev.c 	writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
tx_ring          1052 drivers/net/ethernet/intel/igbvf/netdev.c 		sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
tx_ring          1055 drivers/net/ethernet/intel/igbvf/netdev.c 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
tx_ring          1060 drivers/net/ethernet/intel/igbvf/netdev.c 			  igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
tx_ring          1065 drivers/net/ethernet/intel/igbvf/netdev.c 	adapter->tx_ring->itr_register = E1000_EITR(vector);
tx_ring          1066 drivers/net/ethernet/intel/igbvf/netdev.c 	adapter->tx_ring->itr_val = adapter->current_itr;
tx_ring          1098 drivers/net/ethernet/intel/igbvf/netdev.c 	adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
tx_ring          1099 drivers/net/ethernet/intel/igbvf/netdev.c 	if (!adapter->tx_ring)
tx_ring          1104 drivers/net/ethernet/intel/igbvf/netdev.c 		kfree(adapter->tx_ring);
tx_ring          1284 drivers/net/ethernet/intel/igbvf/netdev.c 	struct igbvf_ring *tx_ring = adapter->tx_ring;
tx_ring          1295 drivers/net/ethernet/intel/igbvf/netdev.c 	ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
tx_ring          1296 drivers/net/ethernet/intel/igbvf/netdev.c 	tdba = tx_ring->dma;
tx_ring          1301 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->head = E1000_TDH(0);
tx_ring          1302 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->tail = E1000_TDT(0);
tx_ring          1598 drivers/net/ethernet/intel/igbvf/netdev.c 	igbvf_clean_tx_ring(adapter->tx_ring);
tx_ring          1713 drivers/net/ethernet/intel/igbvf/netdev.c 	err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
tx_ring          1752 drivers/net/ethernet/intel/igbvf/netdev.c 	igbvf_free_tx_resources(adapter->tx_ring);
tx_ring          1779 drivers/net/ethernet/intel/igbvf/netdev.c 	igbvf_free_tx_resources(adapter->tx_ring);
tx_ring          1913 drivers/net/ethernet/intel/igbvf/netdev.c 	struct igbvf_ring *tx_ring = adapter->tx_ring;
tx_ring          1943 drivers/net/ethernet/intel/igbvf/netdev.c 		tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
tx_ring          1944 drivers/net/ethernet/intel/igbvf/netdev.c 			      tx_ring->count);
tx_ring          1972 drivers/net/ethernet/intel/igbvf/netdev.c static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
tx_ring          1977 drivers/net/ethernet/intel/igbvf/netdev.c 	u16 i = tx_ring->next_to_use;
tx_ring          1979 drivers/net/ethernet/intel/igbvf/netdev.c 	context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
tx_ring          1980 drivers/net/ethernet/intel/igbvf/netdev.c 	buffer_info = &tx_ring->buffer_info[i];
tx_ring          1983 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring          1997 drivers/net/ethernet/intel/igbvf/netdev.c static int igbvf_tso(struct igbvf_ring *tx_ring,
tx_ring          2066 drivers/net/ethernet/intel/igbvf/netdev.c 	igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
tx_ring          2080 drivers/net/ethernet/intel/igbvf/netdev.c static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
tx_ring          2120 drivers/net/ethernet/intel/igbvf/netdev.c 	igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
tx_ring          2129 drivers/net/ethernet/intel/igbvf/netdev.c 	if (igbvf_desc_unused(adapter->tx_ring) >= size)
tx_ring          2141 drivers/net/ethernet/intel/igbvf/netdev.c 	if (igbvf_desc_unused(adapter->tx_ring) < size)
tx_ring          2154 drivers/net/ethernet/intel/igbvf/netdev.c 				   struct igbvf_ring *tx_ring,
tx_ring          2163 drivers/net/ethernet/intel/igbvf/netdev.c 	i = tx_ring->next_to_use;
tx_ring          2165 drivers/net/ethernet/intel/igbvf/netdev.c 	buffer_info = &tx_ring->buffer_info[i];
tx_ring          2181 drivers/net/ethernet/intel/igbvf/netdev.c 		if (i == tx_ring->count)
tx_ring          2187 drivers/net/ethernet/intel/igbvf/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          2198 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->buffer_info[i].skb = skb;
tx_ring          2216 drivers/net/ethernet/intel/igbvf/netdev.c 			i += tx_ring->count;
tx_ring          2218 drivers/net/ethernet/intel/igbvf/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          2226 drivers/net/ethernet/intel/igbvf/netdev.c 				      struct igbvf_ring *tx_ring,
tx_ring          2258 drivers/net/ethernet/intel/igbvf/netdev.c 	i = tx_ring->next_to_use;
tx_ring          2260 drivers/net/ethernet/intel/igbvf/netdev.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          2261 drivers/net/ethernet/intel/igbvf/netdev.c 		tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
tx_ring          2267 drivers/net/ethernet/intel/igbvf/netdev.c 		if (i == tx_ring->count)
tx_ring          2279 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring          2280 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->next_to_use = i;
tx_ring          2281 drivers/net/ethernet/intel/igbvf/netdev.c 	writel(i, adapter->hw.hw_addr + tx_ring->tail);
tx_ring          2286 drivers/net/ethernet/intel/igbvf/netdev.c 					     struct igbvf_ring *tx_ring)
tx_ring          2325 drivers/net/ethernet/intel/igbvf/netdev.c 	first = tx_ring->next_to_use;
tx_ring          2327 drivers/net/ethernet/intel/igbvf/netdev.c 	tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
tx_ring          2335 drivers/net/ethernet/intel/igbvf/netdev.c 	else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) &&
tx_ring          2342 drivers/net/ethernet/intel/igbvf/netdev.c 	count = igbvf_tx_map_adv(adapter, tx_ring, skb);
tx_ring          2345 drivers/net/ethernet/intel/igbvf/netdev.c 		igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
tx_ring          2351 drivers/net/ethernet/intel/igbvf/netdev.c 		tx_ring->buffer_info[first].time_stamp = 0;
tx_ring          2352 drivers/net/ethernet/intel/igbvf/netdev.c 		tx_ring->next_to_use = first;
tx_ring          2362 drivers/net/ethernet/intel/igbvf/netdev.c 	struct igbvf_ring *tx_ring;
tx_ring          2369 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring = &adapter->tx_ring[0];
tx_ring          2371 drivers/net/ethernet/intel/igbvf/netdev.c 	return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
tx_ring          2538 drivers/net/ethernet/intel/igbvf/netdev.c 	igbvf_clean_tx_irq(adapter->tx_ring);
tx_ring          2865 drivers/net/ethernet/intel/igbvf/netdev.c 	adapter->tx_ring->count = 1024;
tx_ring          2890 drivers/net/ethernet/intel/igbvf/netdev.c 	kfree(adapter->tx_ring);
tx_ring          2938 drivers/net/ethernet/intel/igbvf/netdev.c 	kfree(adapter->tx_ring);
tx_ring           363 drivers/net/ethernet/intel/igc/igc.h 	struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
tx_ring           461 drivers/net/ethernet/intel/igc/igc.h static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
tx_ring           463 drivers/net/ethernet/intel/igc/igc.h 	return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
tx_ring           514 drivers/net/ethernet/intel/igc/igc_ethtool.c 			adapter->tx_ring[i]->count = new_tx_count;
tx_ring           542 drivers/net/ethernet/intel/igc/igc_ethtool.c 			memcpy(&temp_ring[i], adapter->tx_ring[i],
tx_ring           557 drivers/net/ethernet/intel/igc/igc_ethtool.c 			igc_free_tx_resources(adapter->tx_ring[i]);
tx_ring           559 drivers/net/ethernet/intel/igc/igc_ethtool.c 			memcpy(adapter->tx_ring[i], &temp_ring[i],
tx_ring           749 drivers/net/ethernet/intel/igc/igc_ethtool.c 		ring = adapter->tx_ring[j];
tx_ring            54 drivers/net/ethernet/intel/igc/igc_main.c static void igc_clean_tx_ring(struct igc_ring *tx_ring);
tx_ring           184 drivers/net/ethernet/intel/igc/igc_main.c void igc_free_tx_resources(struct igc_ring *tx_ring)
tx_ring           186 drivers/net/ethernet/intel/igc/igc_main.c 	igc_clean_tx_ring(tx_ring);
tx_ring           188 drivers/net/ethernet/intel/igc/igc_main.c 	vfree(tx_ring->tx_buffer_info);
tx_ring           189 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->tx_buffer_info = NULL;
tx_ring           192 drivers/net/ethernet/intel/igc/igc_main.c 	if (!tx_ring->desc)
tx_ring           195 drivers/net/ethernet/intel/igc/igc_main.c 	dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring           196 drivers/net/ethernet/intel/igc/igc_main.c 			  tx_ring->desc, tx_ring->dma);
tx_ring           198 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->desc = NULL;
tx_ring           212 drivers/net/ethernet/intel/igc/igc_main.c 		igc_free_tx_resources(adapter->tx_ring[i]);
tx_ring           219 drivers/net/ethernet/intel/igc/igc_main.c static void igc_clean_tx_ring(struct igc_ring *tx_ring)
tx_ring           221 drivers/net/ethernet/intel/igc/igc_main.c 	u16 i = tx_ring->next_to_clean;
tx_ring           222 drivers/net/ethernet/intel/igc/igc_main.c 	struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring           224 drivers/net/ethernet/intel/igc/igc_main.c 	while (i != tx_ring->next_to_use) {
tx_ring           231 drivers/net/ethernet/intel/igc/igc_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring           238 drivers/net/ethernet/intel/igc/igc_main.c 		tx_desc = IGC_TX_DESC(tx_ring, i);
tx_ring           245 drivers/net/ethernet/intel/igc/igc_main.c 			if (unlikely(i == tx_ring->count)) {
tx_ring           247 drivers/net/ethernet/intel/igc/igc_main.c 				tx_buffer = tx_ring->tx_buffer_info;
tx_ring           248 drivers/net/ethernet/intel/igc/igc_main.c 				tx_desc = IGC_TX_DESC(tx_ring, 0);
tx_ring           253 drivers/net/ethernet/intel/igc/igc_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring           262 drivers/net/ethernet/intel/igc/igc_main.c 		if (unlikely(i == tx_ring->count)) {
tx_ring           264 drivers/net/ethernet/intel/igc/igc_main.c 			tx_buffer = tx_ring->tx_buffer_info;
tx_ring           269 drivers/net/ethernet/intel/igc/igc_main.c 	netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring           272 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->next_to_use = 0;
tx_ring           273 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->next_to_clean = 0;
tx_ring           285 drivers/net/ethernet/intel/igc/igc_main.c 		if (adapter->tx_ring[i])
tx_ring           286 drivers/net/ethernet/intel/igc/igc_main.c 			igc_clean_tx_ring(adapter->tx_ring[i]);
tx_ring           295 drivers/net/ethernet/intel/igc/igc_main.c int igc_setup_tx_resources(struct igc_ring *tx_ring)
tx_ring           297 drivers/net/ethernet/intel/igc/igc_main.c 	struct device *dev = tx_ring->dev;
tx_ring           300 drivers/net/ethernet/intel/igc/igc_main.c 	size = sizeof(struct igc_tx_buffer) * tx_ring->count;
tx_ring           301 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->tx_buffer_info = vzalloc(size);
tx_ring           302 drivers/net/ethernet/intel/igc/igc_main.c 	if (!tx_ring->tx_buffer_info)
tx_ring           306 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
tx_ring           307 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring           309 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
tx_ring           310 drivers/net/ethernet/intel/igc/igc_main.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring           312 drivers/net/ethernet/intel/igc/igc_main.c 	if (!tx_ring->desc)
tx_ring           315 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->next_to_use = 0;
tx_ring           316 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->next_to_clean = 0;
tx_ring           321 drivers/net/ethernet/intel/igc/igc_main.c 	vfree(tx_ring->tx_buffer_info);
tx_ring           339 drivers/net/ethernet/intel/igc/igc_main.c 		err = igc_setup_tx_resources(adapter->tx_ring[i]);
tx_ring           344 drivers/net/ethernet/intel/igc/igc_main.c 				igc_free_tx_resources(adapter->tx_ring[i]);
tx_ring           642 drivers/net/ethernet/intel/igc/igc_main.c 		igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
tx_ring           798 drivers/net/ethernet/intel/igc/igc_main.c static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
tx_ring           804 drivers/net/ethernet/intel/igc/igc_main.c 	u16 i = tx_ring->next_to_use;
tx_ring           807 drivers/net/ethernet/intel/igc/igc_main.c 	context_desc = IGC_TX_CTXTDESC(tx_ring, i);
tx_ring           810 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring           816 drivers/net/ethernet/intel/igc/igc_main.c 	if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
tx_ring           817 drivers/net/ethernet/intel/igc/igc_main.c 		mss_l4len_idx |= tx_ring->reg_idx << 4;
tx_ring           826 drivers/net/ethernet/intel/igc/igc_main.c 	if (tx_ring->launchtime_enable) {
tx_ring           844 drivers/net/ethernet/intel/igc/igc_main.c static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
tx_ring           853 drivers/net/ethernet/intel/igc/igc_main.c 		    !tx_ring->launchtime_enable)
tx_ring           887 drivers/net/ethernet/intel/igc/igc_main.c 	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
tx_ring           890 drivers/net/ethernet/intel/igc/igc_main.c static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
tx_ring           892 drivers/net/ethernet/intel/igc/igc_main.c 	struct net_device *netdev = tx_ring->netdev;
tx_ring           894 drivers/net/ethernet/intel/igc/igc_main.c 	netif_stop_subqueue(netdev, tx_ring->queue_index);
tx_ring           902 drivers/net/ethernet/intel/igc/igc_main.c 	if (igc_desc_unused(tx_ring) < size)
tx_ring           906 drivers/net/ethernet/intel/igc/igc_main.c 	netif_wake_subqueue(netdev, tx_ring->queue_index);
tx_ring           908 drivers/net/ethernet/intel/igc/igc_main.c 	u64_stats_update_begin(&tx_ring->tx_syncp2);
tx_ring           909 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->tx_stats.restart_queue2++;
tx_ring           910 drivers/net/ethernet/intel/igc/igc_main.c 	u64_stats_update_end(&tx_ring->tx_syncp2);
tx_ring           915 drivers/net/ethernet/intel/igc/igc_main.c static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
tx_ring           917 drivers/net/ethernet/intel/igc/igc_main.c 	if (igc_desc_unused(tx_ring) >= size)
tx_ring           919 drivers/net/ethernet/intel/igc/igc_main.c 	return __igc_maybe_stop_tx(tx_ring, size);
tx_ring           932 drivers/net/ethernet/intel/igc/igc_main.c static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
tx_ring           951 drivers/net/ethernet/intel/igc/igc_main.c static int igc_tx_map(struct igc_ring *tx_ring,
tx_ring           960 drivers/net/ethernet/intel/igc/igc_main.c 	u16 i = tx_ring->next_to_use;
tx_ring           965 drivers/net/ethernet/intel/igc/igc_main.c 	tx_desc = IGC_TX_DESC(tx_ring, i);
tx_ring           967 drivers/net/ethernet/intel/igc/igc_main.c 	igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
tx_ring           972 drivers/net/ethernet/intel/igc/igc_main.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_ring           977 drivers/net/ethernet/intel/igc/igc_main.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring           992 drivers/net/ethernet/intel/igc/igc_main.c 			if (i == tx_ring->count) {
tx_ring           993 drivers/net/ethernet/intel/igc/igc_main.c 				tx_desc = IGC_TX_DESC(tx_ring, 0);
tx_ring          1011 drivers/net/ethernet/intel/igc/igc_main.c 		if (i == tx_ring->count) {
tx_ring          1012 drivers/net/ethernet/intel/igc/igc_main.c 			tx_desc = IGC_TX_DESC(tx_ring, 0);
tx_ring          1020 drivers/net/ethernet/intel/igc/igc_main.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
tx_ring          1023 drivers/net/ethernet/intel/igc/igc_main.c 		tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          1030 drivers/net/ethernet/intel/igc/igc_main.c 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring          1050 drivers/net/ethernet/intel/igc/igc_main.c 	if (i == tx_ring->count)
tx_ring          1053 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->next_to_use = i;
tx_ring          1056 drivers/net/ethernet/intel/igc/igc_main.c 	igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
tx_ring          1058 drivers/net/ethernet/intel/igc/igc_main.c 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
tx_ring          1059 drivers/net/ethernet/intel/igc/igc_main.c 		writel(i, tx_ring->tail);
tx_ring          1064 drivers/net/ethernet/intel/igc/igc_main.c 	dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_ring          1065 drivers/net/ethernet/intel/igc/igc_main.c 	tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          1070 drivers/net/ethernet/intel/igc/igc_main.c 			dma_unmap_page(tx_ring->dev,
tx_ring          1077 drivers/net/ethernet/intel/igc/igc_main.c 			i += tx_ring->count;
tx_ring          1078 drivers/net/ethernet/intel/igc/igc_main.c 		tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          1082 drivers/net/ethernet/intel/igc/igc_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          1091 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->next_to_use = i;
tx_ring          1097 drivers/net/ethernet/intel/igc/igc_main.c 				       struct igc_ring *tx_ring)
tx_ring          1116 drivers/net/ethernet/intel/igc/igc_main.c 	if (igc_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring          1122 drivers/net/ethernet/intel/igc/igc_main.c 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
tx_ring          1131 drivers/net/ethernet/intel/igc/igc_main.c 	igc_tx_csum(tx_ring, first);
tx_ring          1133 drivers/net/ethernet/intel/igc/igc_main.c 	igc_tx_map(tx_ring, first, hdr_len);
tx_ring          1146 drivers/net/ethernet/intel/igc/igc_main.c 	return adapter->tx_ring[r_idx];
tx_ring          1706 drivers/net/ethernet/intel/igc/igc_main.c 	struct igc_ring *tx_ring = q_vector->tx.ring;
tx_ring          1707 drivers/net/ethernet/intel/igc/igc_main.c 	unsigned int i = tx_ring->next_to_clean;
tx_ring          1714 drivers/net/ethernet/intel/igc/igc_main.c 	tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          1715 drivers/net/ethernet/intel/igc/igc_main.c 	tx_desc = IGC_TX_DESC(tx_ring, i);
tx_ring          1716 drivers/net/ethernet/intel/igc/igc_main.c 	i -= tx_ring->count;
tx_ring          1743 drivers/net/ethernet/intel/igc/igc_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          1757 drivers/net/ethernet/intel/igc/igc_main.c 				i -= tx_ring->count;
tx_ring          1758 drivers/net/ethernet/intel/igc/igc_main.c 				tx_buffer = tx_ring->tx_buffer_info;
tx_ring          1759 drivers/net/ethernet/intel/igc/igc_main.c 				tx_desc = IGC_TX_DESC(tx_ring, 0);
tx_ring          1764 drivers/net/ethernet/intel/igc/igc_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring          1777 drivers/net/ethernet/intel/igc/igc_main.c 			i -= tx_ring->count;
tx_ring          1778 drivers/net/ethernet/intel/igc/igc_main.c 			tx_buffer = tx_ring->tx_buffer_info;
tx_ring          1779 drivers/net/ethernet/intel/igc/igc_main.c 			tx_desc = IGC_TX_DESC(tx_ring, 0);
tx_ring          1789 drivers/net/ethernet/intel/igc/igc_main.c 	netdev_tx_completed_queue(txring_txq(tx_ring),
tx_ring          1792 drivers/net/ethernet/intel/igc/igc_main.c 	i += tx_ring->count;
tx_ring          1793 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->next_to_clean = i;
tx_ring          1794 drivers/net/ethernet/intel/igc/igc_main.c 	u64_stats_update_begin(&tx_ring->tx_syncp);
tx_ring          1795 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->tx_stats.bytes += total_bytes;
tx_ring          1796 drivers/net/ethernet/intel/igc/igc_main.c 	tx_ring->tx_stats.packets += total_packets;
tx_ring          1797 drivers/net/ethernet/intel/igc/igc_main.c 	u64_stats_update_end(&tx_ring->tx_syncp);
tx_ring          1801 drivers/net/ethernet/intel/igc/igc_main.c 	if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
tx_ring          1807 drivers/net/ethernet/intel/igc/igc_main.c 		clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
tx_ring          1813 drivers/net/ethernet/intel/igc/igc_main.c 			dev_err(tx_ring->dev,
tx_ring          1825 drivers/net/ethernet/intel/igc/igc_main.c 				tx_ring->queue_index,
tx_ring          1826 drivers/net/ethernet/intel/igc/igc_main.c 				rd32(IGC_TDH(tx_ring->reg_idx)),
tx_ring          1827 drivers/net/ethernet/intel/igc/igc_main.c 				readl(tx_ring->tail),
tx_ring          1828 drivers/net/ethernet/intel/igc/igc_main.c 				tx_ring->next_to_use,
tx_ring          1829 drivers/net/ethernet/intel/igc/igc_main.c 				tx_ring->next_to_clean,
tx_ring          1834 drivers/net/ethernet/intel/igc/igc_main.c 			netif_stop_subqueue(tx_ring->netdev,
tx_ring          1835 drivers/net/ethernet/intel/igc/igc_main.c 					    tx_ring->queue_index);
tx_ring          1844 drivers/net/ethernet/intel/igc/igc_main.c 		     netif_carrier_ok(tx_ring->netdev) &&
tx_ring          1845 drivers/net/ethernet/intel/igc/igc_main.c 		     igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
tx_ring          1850 drivers/net/ethernet/intel/igc/igc_main.c 		if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring          1851 drivers/net/ethernet/intel/igc/igc_main.c 					     tx_ring->queue_index) &&
tx_ring          1853 drivers/net/ethernet/intel/igc/igc_main.c 			netif_wake_subqueue(tx_ring->netdev,
tx_ring          1854 drivers/net/ethernet/intel/igc/igc_main.c 					    tx_ring->queue_index);
tx_ring          1856 drivers/net/ethernet/intel/igc/igc_main.c 			u64_stats_update_begin(&tx_ring->tx_syncp);
tx_ring          1857 drivers/net/ethernet/intel/igc/igc_main.c 			tx_ring->tx_stats.restart_queue++;
tx_ring          1858 drivers/net/ethernet/intel/igc/igc_main.c 			u64_stats_update_end(&tx_ring->tx_syncp);
tx_ring          1952 drivers/net/ethernet/intel/igc/igc_main.c 		struct igc_ring *ring = adapter->tx_ring[i];
tx_ring          2761 drivers/net/ethernet/intel/igc/igc_main.c 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
tx_ring          3033 drivers/net/ethernet/intel/igc/igc_main.c 		struct igc_ring *tx_ring = adapter->tx_ring[i];
tx_ring          3041 drivers/net/ethernet/intel/igc/igc_main.c 			if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
tx_ring          3050 drivers/net/ethernet/intel/igc/igc_main.c 		set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
tx_ring          3575 drivers/net/ethernet/intel/igc/igc_main.c 		adapter->tx_ring[txr_idx] = ring;
tx_ring          3680 drivers/net/ethernet/intel/igc/igc_main.c 			adapter->tx_ring[j]->reg_idx = j;
tx_ring           123 drivers/net/ethernet/intel/ixgb/ixgb.h 	struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
tx_ring           472 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
tx_ring           486 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
tx_ring           491 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	tx_old = adapter->tx_ring;
tx_ring           519 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 		tx_new = adapter->tx_ring;
tx_ring           521 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 		adapter->tx_ring = tx_old;
tx_ring           525 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 		adapter->tx_ring = tx_new;
tx_ring           536 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	adapter->tx_ring = tx_old;
tx_ring           669 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
tx_ring           706 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	u64 tdba = adapter->tx_ring.dma;
tx_ring           707 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
tx_ring           882 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	vfree(adapter->tx_ring.buffer_info);
tx_ring           883 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	adapter->tx_ring.buffer_info = NULL;
tx_ring           885 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
tx_ring           886 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			  adapter->tx_ring.desc, adapter->tx_ring.dma);
tx_ring           888 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	adapter->tx_ring.desc = NULL;
tx_ring           923 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
tx_ring           930 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	for (i = 0; i < tx_ring->count; i++) {
tx_ring           931 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring           935 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	size = sizeof(struct ixgb_buffer) * tx_ring->count;
tx_ring           936 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	memset(tx_ring->buffer_info, 0, size);
tx_ring           940 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring           942 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	tx_ring->next_to_use = 0;
tx_ring           943 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	tx_ring->next_to_clean = 0;
tx_ring          1124 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
tx_ring          1216 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		i = adapter->tx_ring.next_to_use;
tx_ring          1217 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
tx_ring          1218 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info = &adapter->tx_ring.buffer_info[i];
tx_ring          1239 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		if (++i == adapter->tx_ring.count) i = 0;
tx_ring          1240 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		adapter->tx_ring.next_to_use = i;
tx_ring          1260 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		i = adapter->tx_ring.next_to_use;
tx_ring          1261 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
tx_ring          1262 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info = &adapter->tx_ring.buffer_info[i];
tx_ring          1277 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		if (++i == adapter->tx_ring.count) i = 0;
tx_ring          1278 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		adapter->tx_ring.next_to_use = i;
tx_ring          1293 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
tx_ring          1302 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	i = tx_ring->next_to_use;
tx_ring          1305 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          1328 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			if (i == tx_ring->count)
tx_ring          1340 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			if (i == tx_ring->count)
tx_ring          1343 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			buffer_info = &tx_ring->buffer_info[i];
tx_ring          1367 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	tx_ring->buffer_info[i].skb = skb;
tx_ring          1368 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	tx_ring->buffer_info[first].next_to_watch = i;
tx_ring          1380 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			i += tx_ring->count;
tx_ring          1382 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          1392 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
tx_ring          1411 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	i = tx_ring->next_to_use;
tx_ring          1414 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          1415 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		tx_desc = IXGB_TX_DESC(*tx_ring, i);
tx_ring          1423 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		if (++i == tx_ring->count) i = 0;
tx_ring          1435 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	tx_ring->next_to_use = i;
tx_ring          1442 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
tx_ring          1452 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
tx_ring          1462 drivers/net/ethernet/intel/ixgb/ixgb_main.c                               struct ixgb_desc_ring *tx_ring, int size)
tx_ring          1464 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
tx_ring          1497 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
tx_ring          1506 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	first = adapter->tx_ring.next_to_use;
tx_ring          1524 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
tx_ring          1528 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		adapter->tx_ring.buffer_info[first].time_stamp = 0;
tx_ring          1529 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		adapter->tx_ring.next_to_use = first;
tx_ring          1783 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
tx_ring          1790 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	i = tx_ring->next_to_clean;
tx_ring          1791 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	eop = tx_ring->buffer_info[i].next_to_watch;
tx_ring          1792 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	eop_desc = IXGB_TX_DESC(*tx_ring, eop);
tx_ring          1798 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			tx_desc = IXGB_TX_DESC(*tx_ring, i);
tx_ring          1799 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			buffer_info = &tx_ring->buffer_info[i];
tx_ring          1811 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			if (++i == tx_ring->count) i = 0;
tx_ring          1814 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		eop = tx_ring->buffer_info[i].next_to_watch;
tx_ring          1815 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		eop_desc = IXGB_TX_DESC(*tx_ring, eop);
tx_ring          1818 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	tx_ring->next_to_clean = i;
tx_ring          1821 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		     IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
tx_ring          1837 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		if (tx_ring->buffer_info[eop].time_stamp &&
tx_ring          1838 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
tx_ring          1855 drivers/net/ethernet/intel/ixgb/ixgb_main.c 				  tx_ring->next_to_use,
tx_ring          1856 drivers/net/ethernet/intel/ixgb/ixgb_main.c 				  tx_ring->next_to_clean,
tx_ring          1857 drivers/net/ethernet/intel/ixgb/ixgb_main.c 				  tx_ring->buffer_info[eop].time_stamp,
tx_ring           253 drivers/net/ethernet/intel/ixgb/ixgb_param.c 		struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
tx_ring           256 drivers/net/ethernet/intel/ixgb/ixgb_param.c 			tx_ring->count = TxDescriptors[bd];
tx_ring           257 drivers/net/ethernet/intel/ixgb/ixgb_param.c 			ixgb_validate_option(&tx_ring->count, &opt);
tx_ring           259 drivers/net/ethernet/intel/ixgb/ixgb_param.c 			tx_ring->count = opt.def;
tx_ring           261 drivers/net/ethernet/intel/ixgb/ixgb_param.c 		tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
tx_ring           639 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
tx_ring           924 drivers/net/ethernet/intel/ixgbe/ixgbe.h int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
tx_ring          1001 drivers/net/ethernet/intel/ixgbe/ixgbe.h 				  struct ixgbe_ring *tx_ring);
tx_ring          1014 drivers/net/ethernet/intel/ixgbe/ixgbe.h int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
tx_ring          1026 drivers/net/ethernet/intel/ixgbe/ixgbe.h static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
tx_ring          1023 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
tx_ring          1029 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	ring->tx_pending = tx_ring->count;
tx_ring          1062 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			adapter->tx_ring[i]->count = new_tx_count;
tx_ring          1093 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			memcpy(&temp_ring[i], adapter->tx_ring[i],
tx_ring          1123 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			ixgbe_free_tx_resources(adapter->tx_ring[i]);
tx_ring          1125 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			memcpy(adapter->tx_ring[i], &temp_ring[i],
tx_ring          1224 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		ring = adapter->tx_ring[j];
tx_ring          1723 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
tx_ring          1731 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	tx_ring->count = IXGBE_DEFAULT_TXD;
tx_ring          1732 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	tx_ring->queue_index = 0;
tx_ring          1733 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	tx_ring->dev = &adapter->pdev->dev;
tx_ring          1734 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	tx_ring->netdev = adapter->netdev;
tx_ring          1735 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
tx_ring          1737 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	err = ixgbe_setup_tx_resources(tx_ring);
tx_ring          1755 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	ixgbe_configure_tx_ring(adapter, tx_ring);
tx_ring          1887 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 				  struct ixgbe_ring *tx_ring,
tx_ring          1895 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	tx_ntc = tx_ring->next_to_clean;
tx_ring          1898 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	while (tx_ntc != tx_ring->next_to_use) {
tx_ring          1902 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
tx_ring          1909 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
tx_ring          1915 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		dma_unmap_single(tx_ring->dev,
tx_ring          1923 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		if (tx_ntc == tx_ring->count)
tx_ring          1960 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring          1965 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	tx_ring->next_to_clean = tx_ntc;
tx_ring          1972 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
tx_ring          1998 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	if (rx_ring->count <= tx_ring->count)
tx_ring          1999 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		lc = ((tx_ring->count / 64) * 2) + 1;
tx_ring          2012 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 							   tx_ring);
tx_ring          2025 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
tx_ring           487 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c int ixgbe_fso(struct ixgbe_ring *tx_ring,
tx_ring           500 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
tx_ring           526 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
tx_ring           552 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
tx_ring           591 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
tx_ring          1061 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
tx_ring          1065 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
tx_ring          1073 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
tx_ring          1080 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
tx_ring          1087 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
tx_ring          1094 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
tx_ring            52 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->tx_ring[i]->reg_idx = reg_idx;
tx_ring            80 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			adapter->tx_ring[i]->reg_idx = reg_idx;
tx_ring           162 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
tx_ring           165 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			adapter->tx_ring[offset + i]->dcb_tc = tc;
tx_ring           232 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->tx_ring[i]->reg_idx = reg_idx;
tx_ring           238 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->tx_ring[i]->reg_idx = reg_idx;
tx_ring           261 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->tx_ring[i]->reg_idx = reg_idx;
tx_ring           283 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	adapter->tx_ring[0]->reg_idx = 0;
tx_ring           926 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->tx_ring[txr_idx] = ring;
tx_ring          1027 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			adapter->tx_ring[ring->queue_index] = NULL;
tx_ring          1106 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		if (adapter->tx_ring[i])
tx_ring          1107 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			adapter->tx_ring[i]->ring_idx = i;
tx_ring          1281 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
tx_ring          1285 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	u16 i = tx_ring->next_to_use;
tx_ring          1287 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
tx_ring          1290 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring           612 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring = adapter->tx_ring[n];
tx_ring           663 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring = adapter->tx_ring[n];
tx_ring           952 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			  &adapter->tx_ring[i]->state);
tx_ring           995 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
tx_ring           997 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tc = tx_ring->dcb_tc;
tx_ring           999 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
tx_ring          1026 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
tx_ring          1028 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u32 tx_done = ixgbe_get_tx_completed(tx_ring);
tx_ring          1029 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
tx_ring          1030 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
tx_ring          1032 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	clear_check_for_tx_hang(tx_ring);
tx_ring          1049 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					&tx_ring->state);
tx_ring          1051 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->tx_stats.tx_done_old = tx_done;
tx_ring          1053 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
tx_ring          1113 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			       struct ixgbe_ring *tx_ring, int napi_budget)
tx_ring          1120 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	unsigned int i = tx_ring->next_to_clean;
tx_ring          1125 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          1126 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_desc = IXGBE_TX_DESC(tx_ring, i);
tx_ring          1127 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	i -= tx_ring->count;
tx_ring          1153 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (ring_is_xdp(tx_ring))
tx_ring          1159 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          1173 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				i -= tx_ring->count;
tx_ring          1174 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				tx_buffer = tx_ring->tx_buffer_info;
tx_ring          1175 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
tx_ring          1180 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring          1193 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			i -= tx_ring->count;
tx_ring          1194 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_buffer = tx_ring->tx_buffer_info;
tx_ring          1195 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
tx_ring          1205 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	i += tx_ring->count;
tx_ring          1206 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->next_to_clean = i;
tx_ring          1207 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring          1208 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->stats.bytes += total_bytes;
tx_ring          1209 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->stats.packets += total_packets;
tx_ring          1210 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring          1215 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
tx_ring          1226 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ring_is_xdp(tx_ring) ? "(XDP)" : "",
tx_ring          1227 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_ring->queue_index,
tx_ring          1228 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
tx_ring          1229 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
tx_ring          1230 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_ring->next_to_use, i,
tx_ring          1231 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_ring->tx_buffer_info[i].time_stamp, jiffies);
tx_ring          1233 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (!ring_is_xdp(tx_ring))
tx_ring          1234 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			netif_stop_subqueue(tx_ring->netdev,
tx_ring          1235 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					    tx_ring->queue_index);
tx_ring          1239 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			adapter->tx_timeout_count + 1, tx_ring->queue_index);
tx_ring          1248 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (ring_is_xdp(tx_ring))
tx_ring          1251 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	netdev_tx_completed_queue(txring_txq(tx_ring),
tx_ring          1255 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
tx_ring          1256 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		     (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
tx_ring          1261 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring          1262 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					     tx_ring->queue_index)
tx_ring          1264 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			netif_wake_subqueue(tx_ring->netdev,
tx_ring          1265 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					    tx_ring->queue_index);
tx_ring          1266 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			++tx_ring->tx_stats.restart_queue;
tx_ring          1275 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				struct ixgbe_ring *tx_ring,
tx_ring          1283 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		txctrl = dca3_get_tag(tx_ring->dev, cpu);
tx_ring          1287 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
tx_ring          1291 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
tx_ring          3106 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				struct ixgbe_ring *ring = adapter->tx_ring[i];
tx_ring          3645 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
tx_ring          5824 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = adapter->tx_ring[i];
tx_ring          5873 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			struct ixgbe_ring *ring = adapter->tx_ring[i];
tx_ring          5976 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
tx_ring          5978 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u16 i = tx_ring->next_to_clean;
tx_ring          5979 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          5981 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (tx_ring->xsk_umem) {
tx_ring          5982 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_xsk_clean_tx_ring(tx_ring);
tx_ring          5986 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	while (i != tx_ring->next_to_use) {
tx_ring          5990 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (ring_is_xdp(tx_ring))
tx_ring          5996 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          6003 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_desc = IXGBE_TX_DESC(tx_ring, i);
tx_ring          6010 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (unlikely(i == tx_ring->count)) {
tx_ring          6012 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				tx_buffer = tx_ring->tx_buffer_info;
tx_ring          6013 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
tx_ring          6018 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring          6027 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (unlikely(i == tx_ring->count)) {
tx_ring          6029 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_buffer = tx_ring->tx_buffer_info;
tx_ring          6034 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!ring_is_xdp(tx_ring))
tx_ring          6035 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring          6039 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->next_to_use = 0;
tx_ring          6040 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->next_to_clean = 0;
tx_ring          6064 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_clean_tx_ring(adapter->tx_ring[i]);
tx_ring          6443 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring          6445 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct device *dev = tx_ring->dev;
tx_ring          6450 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
tx_ring          6452 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (tx_ring->q_vector)
tx_ring          6453 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring_node = tx_ring->q_vector->numa_node;
tx_ring          6455 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
tx_ring          6456 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!tx_ring->tx_buffer_info)
tx_ring          6457 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_ring->tx_buffer_info = vmalloc(size);
tx_ring          6458 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!tx_ring->tx_buffer_info)
tx_ring          6462 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring          6463 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring          6466 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->desc = dma_alloc_coherent(dev,
tx_ring          6467 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					   tx_ring->size,
tx_ring          6468 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					   &tx_ring->dma,
tx_ring          6471 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!tx_ring->desc)
tx_ring          6472 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
tx_ring          6473 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						   &tx_ring->dma, GFP_KERNEL);
tx_ring          6474 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!tx_ring->desc)
tx_ring          6477 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->next_to_use = 0;
tx_ring          6478 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->next_to_clean = 0;
tx_ring          6482 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	vfree(tx_ring->tx_buffer_info);
tx_ring          6483 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->tx_buffer_info = NULL;
tx_ring          6503 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
tx_ring          6525 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_free_tx_resources(adapter->tx_ring[i]);
tx_ring          6630 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring          6632 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_clean_tx_ring(tx_ring);
tx_ring          6634 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	vfree(tx_ring->tx_buffer_info);
tx_ring          6635 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->tx_buffer_info = NULL;
tx_ring          6638 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!tx_ring->desc)
tx_ring          6641 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring          6642 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			  tx_ring->desc, tx_ring->dma);
tx_ring          6644 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->desc = NULL;
tx_ring          6658 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (adapter->tx_ring[i]->desc)
tx_ring          6659 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ixgbe_free_tx_resources(adapter->tx_ring[i]);
tx_ring          7087 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
tx_ring          7088 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		restart_queue += tx_ring->tx_stats.restart_queue;
tx_ring          7089 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_busy += tx_ring->tx_stats.tx_busy;
tx_ring          7090 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		bytes += tx_ring->stats.bytes;
tx_ring          7091 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		packets += tx_ring->stats.packets;
tx_ring          7294 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				&(adapter->tx_ring[i]->state));
tx_ring          7330 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			set_check_for_tx_hang(adapter->tx_ring[i]);
tx_ring          7541 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
tx_ring          7543 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (tx_ring->next_to_use != tx_ring->next_to_clean)
tx_ring          7951 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_tso(struct ixgbe_ring *tx_ring,
tx_ring          8041 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
tx_ring          8056 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
tx_ring          8106 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
tx_ring          8170 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
tx_ring          8172 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring          8183 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (likely(ixgbe_desc_unused(tx_ring) < size))
tx_ring          8187 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring          8188 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	++tx_ring->tx_stats.restart_queue;
tx_ring          8192 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
tx_ring          8194 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (likely(ixgbe_desc_unused(tx_ring) >= size))
tx_ring          8197 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return __ixgbe_maybe_stop_tx(tx_ring, size);
tx_ring          8200 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_ring          8212 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u16 i = tx_ring->next_to_use;
tx_ring          8214 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_desc = IXGBE_TX_DESC(tx_ring, i);
tx_ring          8232 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_ring          8237 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring          8252 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (i == tx_ring->count) {
tx_ring          8253 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
tx_ring          8271 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (i == tx_ring->count) {
tx_ring          8272 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
tx_ring          8284 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
tx_ring          8287 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          8294 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring          8315 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (i == tx_ring->count)
tx_ring          8318 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->next_to_use = i;
tx_ring          8320 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
tx_ring          8322 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
tx_ring          8323 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		writel(i, tx_ring->tail);
tx_ring          8328 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_ring          8332 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          8334 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			dma_unmap_page(tx_ring->dev,
tx_ring          8342 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			i += tx_ring->count;
tx_ring          8349 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring->next_to_use = i;
tx_ring          8607 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			  struct ixgbe_ring *tx_ring)
tx_ring          8629 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring          8630 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_ring->tx_stats.tx_busy++;
tx_ring          8635 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
tx_ring          8710 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	    (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
tx_ring          8711 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tso = ixgbe_fso(tx_ring, first, &hdr_len);
tx_ring          8722 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	    !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
tx_ring          8725 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
tx_ring          8729 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
tx_ring          8732 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
tx_ring          8733 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_atr(tx_ring, first);
tx_ring          8738 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (ixgbe_tx_map(tx_ring, first, hdr_len))
tx_ring          8762 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring *tx_ring;
tx_ring          8771 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
tx_ring          8772 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
tx_ring          8775 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
tx_ring          8957 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
tx_ring          10418 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				 struct ixgbe_ring *tx_ring)
tx_ring          10422 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 reg_idx = tx_ring->reg_idx;
tx_ring          10447 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			      struct ixgbe_ring *tx_ring)
tx_ring          10449 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
tx_ring          10450 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_disable_txr_hw(adapter, tx_ring);
tx_ring          10492 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
tx_ring          10494 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
tx_ring          10495 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
tx_ring          10514 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
tx_ring          10517 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring = adapter->tx_ring[ring];
tx_ring          10520 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_disable_txr(adapter, tx_ring);
tx_ring          10531 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_clean_tx_ring(tx_ring);
tx_ring          10536 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_reset_txr_stats(tx_ring);
tx_ring          10552 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
tx_ring          10555 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring = adapter->tx_ring[ring];
tx_ring          10561 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_configure_tx_ring(adapter, tx_ring);
tx_ring          10566 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
tx_ring          11053 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u64_stats_init(&adapter->tx_ring[i]->syncp);
tx_ring            44 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h 			    struct ixgbe_ring *tx_ring, int napi_budget);
tx_ring            46 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
tx_ring           630 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
tx_ring           634 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	dma_unmap_single(tx_ring->dev,
tx_ring           641 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 			    struct ixgbe_ring *tx_ring, int napi_budget)
tx_ring           643 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
tx_ring           645 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct xdp_umem *umem = tx_ring->xsk_umem;
tx_ring           650 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	tx_bi = &tx_ring->tx_buffer_info[ntc];
tx_ring           651 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
tx_ring           661 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
tx_ring           670 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		if (unlikely(ntc == tx_ring->count)) {
tx_ring           672 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 			tx_bi = tx_ring->tx_buffer_info;
tx_ring           673 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
tx_ring           680 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	tx_ring->next_to_clean = ntc;
tx_ring           682 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring           683 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	tx_ring->stats.bytes += total_bytes;
tx_ring           684 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	tx_ring->stats.packets += total_packets;
tx_ring           685 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring           692 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
tx_ring           693 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
tx_ring           695 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
tx_ring           729 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
tx_ring           731 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
tx_ring           732 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct xdp_umem *umem = tx_ring->xsk_umem;
tx_ring           737 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		tx_bi = &tx_ring->tx_buffer_info[ntc];
tx_ring           740 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
tx_ring           747 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		if (ntc == tx_ring->count)
tx_ring           244 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
tx_ring           269 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			adapter->tx_ring[i]->count = new_tx_count;
tx_ring           281 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		tx_ring = vmalloc(array_size(sizeof(*tx_ring),
tx_ring           284 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		if (!tx_ring) {
tx_ring           291 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			tx_ring[i] = *adapter->tx_ring[i];
tx_ring           292 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			tx_ring[i].count = new_tx_count;
tx_ring           293 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			err = ixgbevf_setup_tx_resources(&tx_ring[i]);
tx_ring           297 drivers/net/ethernet/intel/ixgbevf/ethtool.c 					ixgbevf_free_tx_resources(&tx_ring[i]);
tx_ring           300 drivers/net/ethernet/intel/ixgbevf/ethtool.c 				vfree(tx_ring);
tx_ring           301 drivers/net/ethernet/intel/ixgbevf/ethtool.c 				tx_ring = NULL;
tx_ring           309 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			tx_ring[i] = *adapter->xdp_ring[j];
tx_ring           310 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			tx_ring[i].count = new_tx_count;
tx_ring           311 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			err = ixgbevf_setup_tx_resources(&tx_ring[i]);
tx_ring           315 drivers/net/ethernet/intel/ixgbevf/ethtool.c 					ixgbevf_free_tx_resources(&tx_ring[i]);
tx_ring           318 drivers/net/ethernet/intel/ixgbevf/ethtool.c 				vfree(tx_ring);
tx_ring           319 drivers/net/ethernet/intel/ixgbevf/ethtool.c 				tx_ring = NULL;
tx_ring           362 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	if (tx_ring) {
tx_ring           364 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			ixgbevf_free_tx_resources(adapter->tx_ring[i]);
tx_ring           365 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			*adapter->tx_ring[i] = tx_ring[i];
tx_ring           371 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			*adapter->xdp_ring[j] = tx_ring[i];
tx_ring           375 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		vfree(tx_ring);
tx_ring           376 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		tx_ring = NULL;
tx_ring           396 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	if (tx_ring) {
tx_ring           399 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			ixgbevf_free_tx_resources(&tx_ring[i]);
tx_ring           400 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		vfree(tx_ring);
tx_ring           455 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		ring = adapter->tx_ring[j];
tx_ring           446 drivers/net/ethernet/intel/ixgbevf/ipsec.c int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
tx_ring           450 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
tx_ring           459 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
tx_ring           466 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
tx_ring           473 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
tx_ring           480 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
tx_ring           340 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
tx_ring           469 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
tx_ring           481 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
tx_ring           213 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
tx_ring           215 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
tx_ring           216 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
tx_ring           217 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
tx_ring           219 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	clear_check_for_tx_hang(tx_ring);
tx_ring           229 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 					&tx_ring->state);
tx_ring           232 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
tx_ring           235 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->tx_stats.tx_done_old = tx_done;
tx_ring           267 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				 struct ixgbevf_ring *tx_ring, int napi_budget)
tx_ring           273 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	unsigned int budget = tx_ring->count / 2;
tx_ring           274 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	unsigned int i = tx_ring->next_to_clean;
tx_ring           279 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring           280 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_ring           281 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	i -= tx_ring->count;
tx_ring           307 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (ring_is_xdp(tx_ring))
tx_ring           313 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring           327 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				i -= tx_ring->count;
tx_ring           328 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				tx_buffer = tx_ring->tx_buffer_info;
tx_ring           329 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
tx_ring           334 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring           347 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			i -= tx_ring->count;
tx_ring           348 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			tx_buffer = tx_ring->tx_buffer_info;
tx_ring           349 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
tx_ring           359 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	i += tx_ring->count;
tx_ring           360 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->next_to_clean = i;
tx_ring           361 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u64_stats_update_begin(&tx_ring->syncp);
tx_ring           362 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->stats.bytes += total_bytes;
tx_ring           363 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->stats.packets += total_packets;
tx_ring           364 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u64_stats_update_end(&tx_ring->syncp);
tx_ring           369 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
tx_ring           373 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
tx_ring           385 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		       ring_is_xdp(tx_ring) ? " XDP" : "",
tx_ring           386 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		       tx_ring->queue_index,
tx_ring           387 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		       IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
tx_ring           388 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		       IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
tx_ring           389 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		       tx_ring->next_to_use, i,
tx_ring           391 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		       tx_ring->tx_buffer_info[i].time_stamp, jiffies);
tx_ring           393 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (!ring_is_xdp(tx_ring))
tx_ring           394 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			netif_stop_subqueue(tx_ring->netdev,
tx_ring           395 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 					    tx_ring->queue_index);
tx_ring           403 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (ring_is_xdp(tx_ring))
tx_ring           407 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
tx_ring           408 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		     (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
tx_ring           414 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring           415 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 					     tx_ring->queue_index) &&
tx_ring           417 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			netif_wake_subqueue(tx_ring->netdev,
tx_ring           418 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 					    tx_ring->queue_index);
tx_ring           419 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			++tx_ring->tx_stats.restart_queue;
tx_ring          1752 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
tx_ring          2187 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		adapter->tx_ring[0]->reg_idx = def_q;
tx_ring          2376 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
tx_ring          2378 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u16 i = tx_ring->next_to_clean;
tx_ring          2379 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          2381 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	while (i != tx_ring->next_to_use) {
tx_ring          2385 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (ring_is_xdp(tx_ring))
tx_ring          2391 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          2398 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_ring          2405 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			if (unlikely(i == tx_ring->count)) {
tx_ring          2407 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				tx_buffer = tx_ring->tx_buffer_info;
tx_ring          2408 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
tx_ring          2413 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				dma_unmap_page(tx_ring->dev,
tx_ring          2422 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (unlikely(i == tx_ring->count)) {
tx_ring          2424 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			tx_buffer = tx_ring->tx_buffer_info;
tx_ring          2429 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->next_to_use = 0;
tx_ring          2430 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->next_to_clean = 0;
tx_ring          2455 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
tx_ring          2490 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		u8 reg_idx = adapter->tx_ring[i]->reg_idx;
tx_ring          2740 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		 adapter->tx_ring[txr_idx] = ring;
tx_ring          2828 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			adapter->tx_ring[ring->queue_index] = NULL;
tx_ring          3198 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			set_check_for_tx_hang(adapter->tx_ring[i]);
tx_ring          3342 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
tx_ring          3344 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_clean_tx_ring(tx_ring);
tx_ring          3346 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	vfree(tx_ring->tx_buffer_info);
tx_ring          3347 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->tx_buffer_info = NULL;
tx_ring          3350 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (!tx_ring->desc)
tx_ring          3353 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
tx_ring          3354 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			  tx_ring->dma);
tx_ring          3356 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->desc = NULL;
tx_ring          3370 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (adapter->tx_ring[i]->desc)
tx_ring          3371 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			ixgbevf_free_tx_resources(adapter->tx_ring[i]);
tx_ring          3383 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
tx_ring          3385 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
tx_ring          3388 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
tx_ring          3389 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->tx_buffer_info = vmalloc(size);
tx_ring          3390 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (!tx_ring->tx_buffer_info)
tx_ring          3393 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u64_stats_init(&tx_ring->syncp);
tx_ring          3396 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring          3397 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring          3399 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
tx_ring          3400 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring          3401 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (!tx_ring->desc)
tx_ring          3407 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	vfree(tx_ring->tx_buffer_info);
tx_ring          3408 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->tx_buffer_info = NULL;
tx_ring          3428 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
tx_ring          3449 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_free_tx_resources(adapter->tx_ring[i]);
tx_ring          3718 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
tx_ring          3723 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u16 i = tx_ring->next_to_use;
tx_ring          3725 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
tx_ring          3728 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_ring          3739 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
tx_ring          3830 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
tx_ring          3845 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
tx_ring          3893 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
tx_ring          3944 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_ring          3956 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u16 i = tx_ring->next_to_use;
tx_ring          3958 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_ring          3965 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_ring          3970 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (dma_mapping_error(tx_ring->dev, dma))
tx_ring          3985 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			if (i == tx_ring->count) {
tx_ring          3986 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
tx_ring          4004 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (i == tx_ring->count) {
tx_ring          4005 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
tx_ring          4013 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
tx_ring          4016 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          4041 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (i == tx_ring->count)
tx_ring          4044 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->next_to_use = i;
tx_ring          4047 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_write_tail(tx_ring, i);
tx_ring          4051 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_ring          4052 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          4057 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			dma_unmap_page(tx_ring->dev,
tx_ring          4064 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			i += tx_ring->count;
tx_ring          4065 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		tx_buffer = &tx_ring->tx_buffer_info[i];
tx_ring          4069 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		dma_unmap_single(tx_ring->dev,
tx_ring          4078 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring->next_to_use = i;
tx_ring          4081 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
tx_ring          4083 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring          4093 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (likely(ixgbevf_desc_unused(tx_ring) < size))
tx_ring          4097 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
tx_ring          4098 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	++tx_ring->tx_stats.restart_queue;
tx_ring          4103 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
tx_ring          4105 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (likely(ixgbevf_desc_unused(tx_ring) >= size))
tx_ring          4107 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	return __ixgbevf_maybe_stop_tx(tx_ring, size);
tx_ring          4111 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				   struct ixgbevf_ring *tx_ring)
tx_ring          4144 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring          4145 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		tx_ring->tx_stats.tx_busy++;
tx_ring          4150 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
tx_ring          4166 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
tx_ring          4169 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
tx_ring          4173 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
tx_ring          4175 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_tx_map(tx_ring, first, hdr_len);
tx_ring          4177 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
tx_ring          4191 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct ixgbevf_ring *tx_ring;
tx_ring          4207 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring = adapter->tx_ring[skb->queue_mapping];
tx_ring          4208 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	return ixgbevf_xmit_frame_ring(skb, tx_ring);
tx_ring          4400 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring = adapter->tx_ring[i];
tx_ring           503 drivers/net/ethernet/marvell/skge.c 	p->tx_pending = skge->tx_ring.count;
tx_ring           517 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.count = p->tx_pending;
tx_ring          2547 drivers/net/ethernet/marvell/skge.c 	tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
tx_ring          2569 drivers/net/ethernet/marvell/skge.c 	err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
tx_ring          2600 drivers/net/ethernet/marvell/skge.c 	BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
tx_ring          2602 drivers/net/ethernet/marvell/skge.c 	skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
tx_ring          2622 drivers/net/ethernet/marvell/skge.c 	kfree(skge->tx_ring.start);
tx_ring          2716 drivers/net/ethernet/marvell/skge.c 	kfree(skge->tx_ring.start);
tx_ring          2743 drivers/net/ethernet/marvell/skge.c 	if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
tx_ring          2746 drivers/net/ethernet/marvell/skge.c 	e = skge->tx_ring.to_use;
tx_ring          2818 drivers/net/ethernet/marvell/skge.c 		     e - skge->tx_ring.start, skb->len);
tx_ring          2820 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.to_use = e->next;
tx_ring          2823 drivers/net/ethernet/marvell/skge.c 	if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
tx_ring          2831 drivers/net/ethernet/marvell/skge.c 	e = skge->tx_ring.to_use;
tx_ring          2873 drivers/net/ethernet/marvell/skge.c 	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
tx_ring          2884 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.to_clean = e;
tx_ring          3149 drivers/net/ethernet/marvell/skge.c 	struct skge_ring *ring = &skge->tx_ring;
tx_ring          3166 drivers/net/ethernet/marvell/skge.c 				     e - skge->tx_ring.start);
tx_ring          3175 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.to_clean = e;
tx_ring          3181 drivers/net/ethernet/marvell/skge.c 		     skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
tx_ring          3184 drivers/net/ethernet/marvell/skge.c 			     skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
tx_ring          3701 drivers/net/ethernet/marvell/skge.c 	seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring));
tx_ring          3702 drivers/net/ethernet/marvell/skge.c 	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
tx_ring          3834 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
tx_ring          2457 drivers/net/ethernet/marvell/skge.h 	struct skge_ring     tx_ring;
tx_ring          1602 drivers/net/ethernet/marvell/sky2.c 	sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info),
tx_ring          1604 drivers/net/ethernet/marvell/sky2.c 	if (!sky2->tx_ring)
tx_ring          1639 drivers/net/ethernet/marvell/sky2.c 	kfree(sky2->tx_ring);
tx_ring          1642 drivers/net/ethernet/marvell/sky2.c 	sky2->tx_ring = NULL;
tx_ring          1922 drivers/net/ethernet/marvell/sky2.c 	re = sky2->tx_ring + slot;
tx_ring          1951 drivers/net/ethernet/marvell/sky2.c 		re = sky2->tx_ring + slot;
tx_ring          1978 drivers/net/ethernet/marvell/sky2.c 		re = sky2->tx_ring + i;
tx_ring          2010 drivers/net/ethernet/marvell/sky2.c 		struct tx_ring_info *re = sky2->tx_ring + idx;
tx_ring          2227 drivers/net/ethernet/marvell/sky2.h 	struct tx_ring_info  *tx_ring;
tx_ring          1130 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
tx_ring          1344 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
tx_ring          1392 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
tx_ring          1429 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
tx_ring          1535 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
tx_ring          1610 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
tx_ring           876 drivers/net/ethernet/mediatek/mtk_eth_soc.h 	struct mtk_tx_ring		tx_ring;
tx_ring           423 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		data[index++] = priv->tx_ring[TX][i]->packets;
tx_ring           424 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		data[index++] = priv->tx_ring[TX][i]->bytes;
tx_ring          1150 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	    tx_size == priv->tx_ring[TX][0]->size)
tx_ring          1195 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	param->tx_pending = priv->tx_ring[TX][0]->size;
tx_ring          1930 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 						priv->tx_ring[t][i]->bf_alloced;
tx_ring          1944 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 				priv->tx_ring[t][i]->bf_enabled =
tx_ring          1376 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
tx_ring          1381 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			i, tx_ring->qpn, tx_ring->sp_cqn,
tx_ring          1382 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			tx_ring->cons, tx_ring->prod);
tx_ring          1607 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
tx_ring          1610 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
tx_ring          1611 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	tx_ring->recycle_ring = priv->rx_ring[rr_index];
tx_ring          1621 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_tx_ring *tx_ring;
tx_ring          1732 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			tx_ring = priv->tx_ring[t][i];
tx_ring          1733 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			err = mlx4_en_activate_tx_ring(priv, tx_ring,
tx_ring          1742 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring          1743 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				tx_ring->recycle_ring = NULL;
tx_ring          1749 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
tx_ring          1755 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
tx_ring          1756 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				*((u32 *)(tx_ring->buf + j)) = 0xffffffff;
tx_ring          1849 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
tx_ring          1971 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
tx_ring          1979 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
tx_ring          2028 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_tx_ring **tx_ring;
tx_ring          2046 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	tx_ring = priv->tx_ring[TX];
tx_ring          2048 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		tx_ring[i]->bytes = 0;
tx_ring          2049 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		tx_ring[i]->packets = 0;
tx_ring          2050 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		tx_ring[i]->tx_csum = 0;
tx_ring          2051 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		tx_ring[i]->tx_dropped = 0;
tx_ring          2052 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		tx_ring[i]->queue_stopped = 0;
tx_ring          2053 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		tx_ring[i]->wake_queue = 0;
tx_ring          2054 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		tx_ring[i]->tso_packets = 0;
tx_ring          2055 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		tx_ring[i]->xmit_more = 0;
tx_ring          2119 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (priv->tx_ring[t] && priv->tx_ring[t][i])
tx_ring          2121 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 							&priv->tx_ring[t][i]);
tx_ring          2125 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		kfree(priv->tx_ring[t]);
tx_ring          2153 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
tx_ring          2192 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (priv->tx_ring[t][i])
tx_ring          2194 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 							&priv->tx_ring[t][i]);
tx_ring          2226 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		dst->tx_ring[t] = kcalloc(MAX_TX_RINGS,
tx_ring          2229 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!dst->tx_ring[t])
tx_ring          2236 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			kfree(dst->tx_ring[t]);
tx_ring          2245 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		kfree(dst->tx_ring[t]);
tx_ring          2263 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		dst->tx_ring[t] = src->tx_ring[t];
tx_ring          2286 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			kfree(tmp->tx_ring[t]);
tx_ring          2751 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
tx_ring          2770 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
tx_ring          3296 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		priv->tx_ring[t] = kcalloc(MAX_TX_RINGS,
tx_ring          3299 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!priv->tx_ring[t]) {
tx_ring           174 drivers/net/ethernet/mellanox/mlx4/en_port.c 		const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
tx_ring           268 drivers/net/ethernet/mellanox/mlx4/en_port.c 		const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
tx_ring           909 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
tx_ring           400 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
tx_ring           862 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring = priv->tx_ring[TX][tx_ind];
tx_ring          1137 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring = priv->tx_ring[TX_XDP][tx_ind];
tx_ring           592 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 	struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES];
tx_ring           542 drivers/net/ethernet/natsemi/natsemi.c 	struct netdev_desc *tx_ring;
tx_ring          1867 drivers/net/ethernet/natsemi/natsemi.c 		printk(KERN_DEBUG "  Tx ring at %p:\n", np->tx_ring);
tx_ring          1870 drivers/net/ethernet/natsemi/natsemi.c 				i, np->tx_ring[i].next_desc,
tx_ring          1871 drivers/net/ethernet/natsemi/natsemi.c 				np->tx_ring[i].cmd_status,
tx_ring          1872 drivers/net/ethernet/natsemi/natsemi.c 				np->tx_ring[i].addr);
tx_ring          1924 drivers/net/ethernet/natsemi/natsemi.c 	np->tx_ring = &np->rx_ring[RX_RING_SIZE];
tx_ring          1980 drivers/net/ethernet/natsemi/natsemi.c 		np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
tx_ring          1983 drivers/net/ethernet/natsemi/natsemi.c 		np->tx_ring[i].cmd_status = 0;
tx_ring          2085 drivers/net/ethernet/natsemi/natsemi.c 		np->tx_ring[i].cmd_status = 0;
tx_ring          2113 drivers/net/ethernet/natsemi/natsemi.c 	np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
tx_ring          2118 drivers/net/ethernet/natsemi/natsemi.c 		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
tx_ring          2149 drivers/net/ethernet/natsemi/natsemi.c 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
tx_ring          2155 drivers/net/ethernet/natsemi/natsemi.c 					le32_to_cpu(np->tx_ring[entry].cmd_status));
tx_ring          2156 drivers/net/ethernet/natsemi/natsemi.c 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
tx_ring          2161 drivers/net/ethernet/natsemi/natsemi.c 				le32_to_cpu(np->tx_ring[entry].cmd_status);
tx_ring           408 drivers/net/ethernet/netronome/nfp/nfp_net.h 	struct nfp_net_tx_ring *tx_ring;
tx_ring           572 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
tx_ring           578 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->idx = idx;
tx_ring           579 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->r_vec = r_vec;
tx_ring           580 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->is_xdp = is_xdp;
tx_ring           581 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	u64_stats_init(&tx_ring->r_vec->tx_sync);
tx_ring           583 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring           584 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
tx_ring           678 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
tx_ring           680 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
tx_ring           684 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
tx_ring           686 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
tx_ring           689 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
tx_ring           691 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
tx_ring           704 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 				 struct nfp_net_tx_ring *tx_ring)
tx_ring           710 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
tx_ring           918 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring           921 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
tx_ring           922 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_ptr_add = 0;
tx_ring           980 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net_tx_ring *tx_ring;
tx_ring           993 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring = &dp->tx_rings[qidx];
tx_ring           994 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	r_vec = tx_ring->r_vec;
tx_ring           998 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
tx_ring          1000 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			   qidx, tx_ring->wr_p, tx_ring->rd_p);
tx_ring          1003 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nfp_net_tx_xmit_more_flush(tx_ring);
tx_ring          1012 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nfp_net_tx_xmit_more_flush(tx_ring);
tx_ring          1026 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
tx_ring          1029 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txbuf = &tx_ring->txbufs[wr_idx];
tx_ring          1037 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txd = &tx_ring->txds[wr_idx];
tx_ring          1071 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			wr_idx = D_IDX(tx_ring, wr_idx + 1);
tx_ring          1072 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			tx_ring->txbufs[wr_idx].skb = skb;
tx_ring          1073 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
tx_ring          1074 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			tx_ring->txbufs[wr_idx].fidx = f;
tx_ring          1076 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			txd = &tx_ring->txds[wr_idx];
tx_ring          1091 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
tx_ring          1093 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_p += nr_frags + 1;
tx_ring          1094 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (nfp_net_tx_ring_should_stop(tx_ring))
tx_ring          1095 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring          1097 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_ptr_add += nr_frags + 1;
tx_ring          1099 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nfp_net_tx_xmit_more_flush(tx_ring);
tx_ring          1106 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
tx_ring          1108 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring          1109 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring          1110 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_ring->txbufs[wr_idx].fidx = -2;
tx_ring          1113 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			wr_idx += tx_ring->cnt;
tx_ring          1115 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
tx_ring          1117 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring          1118 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring          1119 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->txbufs[wr_idx].fidx = -2;
tx_ring          1123 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_net_tx_xmit_more_flush(tx_ring);
tx_ring          1137 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
tx_ring          1139 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
tx_ring          1146 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (tx_ring->wr_p == tx_ring->rd_p)
tx_ring          1150 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
tx_ring          1152 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (qcp_rd_p == tx_ring->qcp_rd_p)
tx_ring          1155 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
tx_ring          1164 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		idx = D_IDX(tx_ring, tx_ring->rd_p++);
tx_ring          1165 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_buf = &tx_ring->txbufs[idx];
tx_ring          1197 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->qcp_rd_p = qcp_rd_p;
tx_ring          1207 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
tx_ring          1209 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (nfp_net_tx_ring_should_wake(tx_ring)) {
tx_ring          1217 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
tx_ring          1219 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		  tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
tx_ring          1222 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
tx_ring          1224 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
tx_ring          1231 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
tx_ring          1233 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (qcp_rd_p == tx_ring->qcp_rd_p)
tx_ring          1236 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
tx_ring          1241 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
tx_ring          1245 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_ring          1246 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_ring->rd_p++;
tx_ring          1248 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		done_bytes += tx_ring->txbufs[idx].real_len;
tx_ring          1256 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
tx_ring          1258 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		  tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
tx_ring          1271 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring          1276 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
tx_ring          1281 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_ring          1282 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_buf = &tx_ring->txbufs[idx];
tx_ring          1284 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb = tx_ring->txbufs[idx].skb;
tx_ring          1306 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_ring->qcp_rd_p++;
tx_ring          1307 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_ring->rd_p++;
tx_ring          1310 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	memset(tx_ring->txds, 0, tx_ring->size);
tx_ring          1311 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_p = 0;
tx_ring          1312 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->rd_p = 0;
tx_ring          1313 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->qcp_rd_p = 0;
tx_ring          1314 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_ptr_add = 0;
tx_ring          1316 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (tx_ring->is_xdp || !dp->netdev)
tx_ring          1319 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
tx_ring          1738 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		   struct nfp_net_tx_ring *tx_ring,
tx_ring          1746 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
tx_ring          1748 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			nfp_net_xdp_complete(tx_ring);
tx_ring          1752 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
tx_ring          1759 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
tx_ring          1762 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txbuf = &tx_ring->txbufs[wr_idx];
tx_ring          1776 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txd = &tx_ring->txds[wr_idx];
tx_ring          1786 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_p++;
tx_ring          1787 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_ptr_add++;
tx_ring          1806 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net_tx_ring *tx_ring;
tx_ring          1819 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring = r_vec->xdp_ring;
tx_ring          1928 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 								 tx_ring, rxbuf,
tx_ring          2026 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (tx_ring->wr_ptr_add)
tx_ring          2027 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			nfp_net_tx_xmit_more_flush(tx_ring);
tx_ring          2028 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
tx_ring          2030 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			if (!nfp_net_xdp_complete(tx_ring))
tx_ring          2051 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (r_vec->tx_ring)
tx_ring          2052 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nfp_net_tx_complete(r_vec->tx_ring, budget);
tx_ring          2071 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net_tx_ring *tx_ring;
tx_ring          2079 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring = r_vec->tx_ring;
tx_ring          2086 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
tx_ring          2113 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
tx_ring          2116 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txbuf = &tx_ring->txbufs[wr_idx];
tx_ring          2124 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txd = &tx_ring->txds[wr_idx];
tx_ring          2134 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_p++;
tx_ring          2135 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->wr_ptr_add++;
tx_ring          2136 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_net_tx_xmit_more_flush(tx_ring);
tx_ring          2289 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_net_tx_complete(r_vec->tx_ring, 0);
tx_ring          2347 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
tx_ring          2349 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
tx_ring          2352 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	kvfree(tx_ring->txbufs);
tx_ring          2354 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (tx_ring->txds)
tx_ring          2355 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		dma_free_coherent(dp->dev, tx_ring->size,
tx_ring          2356 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 				  tx_ring->txds, tx_ring->dma);
tx_ring          2358 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->cnt = 0;
tx_ring          2359 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->txbufs = NULL;
tx_ring          2360 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->txds = NULL;
tx_ring          2361 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->dma = 0;
tx_ring          2362 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->size = 0;
tx_ring          2373 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring          2375 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
tx_ring          2377 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->cnt = dp->txd_cnt;
tx_ring          2379 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
tx_ring          2380 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
tx_ring          2381 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 					   &tx_ring->dma,
tx_ring          2383 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!tx_ring->txds) {
tx_ring          2385 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			    tx_ring->cnt);
tx_ring          2389 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
tx_ring          2391 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!tx_ring->txbufs)
tx_ring          2394 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!tx_ring->is_xdp && dp->netdev)
tx_ring          2396 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 				    tx_ring->idx);
tx_ring          2401 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_net_tx_ring_free(tx_ring);
tx_ring          2407 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			  struct nfp_net_tx_ring *tx_ring)
tx_ring          2411 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!tx_ring->is_xdp)
tx_ring          2414 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	for (i = 0; i < tx_ring->cnt; i++) {
tx_ring          2415 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (!tx_ring->txbufs[i].frag)
tx_ring          2418 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
tx_ring          2419 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		__free_page(virt_to_page(tx_ring->txbufs[i].frag));
tx_ring          2425 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			   struct nfp_net_tx_ring *tx_ring)
tx_ring          2427 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
tx_ring          2430 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!tx_ring->is_xdp)
tx_ring          2433 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	for (i = 0; i < tx_ring->cnt; i++) {
tx_ring          2436 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			nfp_net_tx_ring_bufs_free(dp, tx_ring);
tx_ring          2607 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	r_vec->tx_ring =
tx_ring          2794 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			     struct nfp_net_tx_ring *tx_ring, unsigned int idx)
tx_ring          2796 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
tx_ring          2797 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
tx_ring          2798 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
tx_ring            76 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 	struct nfp_net_tx_ring *tx_ring;
tx_ring            85 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		tx_ring = r_vec->tx_ring;
tx_ring            87 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		tx_ring = r_vec->xdp_ring;
tx_ring            88 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 	if (!r_vec->nfp_net || !tx_ring)
tx_ring            94 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 	txd_cnt = tx_ring->cnt;
tx_ring            96 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 	d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
tx_ring            97 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 	d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
tx_ring           100 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		   tx_ring->idx, tx_ring->qcidx,
tx_ring           101 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		   tx_ring == r_vec->tx_ring ? "" : "xdp",
tx_ring           102 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		   tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
tx_ring           103 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		   tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
tx_ring           106 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		txd = &tx_ring->txds[i];
tx_ring           111 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		if (tx_ring == r_vec->tx_ring) {
tx_ring           112 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 			struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb);
tx_ring           119 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 				   READ_ONCE(tx_ring->txbufs[i].frag));
tx_ring           122 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		if (tx_ring->txbufs[i].dma_addr)
tx_ring           124 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 				   &tx_ring->txbufs[i].dma_addr);
tx_ring           126 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		if (i == tx_ring->rd_p % txd_cnt)
tx_ring           128 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 		if (i == tx_ring->wr_p % txd_cnt)
tx_ring           830 drivers/net/ethernet/nvidia/forcedeth.c 	union ring_type tx_ring;
tx_ring          1955 drivers/net/ethernet/nvidia/forcedeth.c 	np->get_tx = np->tx_ring;
tx_ring          1956 drivers/net/ethernet/nvidia/forcedeth.c 	np->put_tx = np->tx_ring;
tx_ring          1959 drivers/net/ethernet/nvidia/forcedeth.c 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
tx_ring          1961 drivers/net/ethernet/nvidia/forcedeth.c 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
tx_ring          1973 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.orig[i].flaglen = 0;
tx_ring          1974 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.orig[i].buf = 0;
tx_ring          1976 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.ex[i].flaglen = 0;
tx_ring          1977 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.ex[i].txvlan = 0;
tx_ring          1978 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.ex[i].bufhigh = 0;
tx_ring          1979 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.ex[i].buflow = 0;
tx_ring          2036 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.orig[i].flaglen = 0;
tx_ring          2037 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.orig[i].buf = 0;
tx_ring          2039 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.ex[i].flaglen = 0;
tx_ring          2040 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.ex[i].txvlan = 0;
tx_ring          2041 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.ex[i].bufhigh = 0;
tx_ring          2042 drivers/net/ethernet/nvidia/forcedeth.c 			np->tx_ring.ex[i].buflow = 0;
tx_ring          2273 drivers/net/ethernet/nvidia/forcedeth.c 			put_tx = np->tx_ring.orig;
tx_ring          2319 drivers/net/ethernet/nvidia/forcedeth.c 				put_tx = np->tx_ring.orig;
tx_ring          2325 drivers/net/ethernet/nvidia/forcedeth.c 	if (unlikely(put_tx == np->tx_ring.orig))
tx_ring          2431 drivers/net/ethernet/nvidia/forcedeth.c 			put_tx = np->tx_ring.ex;
tx_ring          2477 drivers/net/ethernet/nvidia/forcedeth.c 				put_tx = np->tx_ring.ex;
tx_ring          2483 drivers/net/ethernet/nvidia/forcedeth.c 	if (unlikely(put_tx == np->tx_ring.ex))
tx_ring          2628 drivers/net/ethernet/nvidia/forcedeth.c 			np->get_tx.orig = np->tx_ring.orig;
tx_ring          2685 drivers/net/ethernet/nvidia/forcedeth.c 			np->get_tx.ex = np->tx_ring.ex;
tx_ring          2740 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.orig[i].buf),
tx_ring          2741 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.orig[i].flaglen),
tx_ring          2742 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.orig[i+1].buf),
tx_ring          2743 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
tx_ring          2744 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.orig[i+2].buf),
tx_ring          2745 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
tx_ring          2746 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.orig[i+3].buf),
tx_ring          2747 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
tx_ring          2755 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
tx_ring          2756 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i].buflow),
tx_ring          2757 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i].flaglen),
tx_ring          2758 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
tx_ring          2759 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
tx_ring          2760 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
tx_ring          2761 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
tx_ring          2762 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
tx_ring          2763 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
tx_ring          2764 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
tx_ring          2765 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
tx_ring          2766 drivers/net/ethernet/nvidia/forcedeth.c 					    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
tx_ring          4713 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
tx_ring          4716 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
tx_ring          5157 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
tx_ring          5158 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
tx_ring          5160 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
tx_ring          5161 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
tx_ring          5162 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
tx_ring          5821 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
tx_ring          5830 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
tx_ring           586 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h 	struct pch_gbe_tx_ring *tx_ring;
tx_ring           611 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h 			       struct pch_gbe_tx_ring *tx_ring);
tx_ring           276 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	struct pch_gbe_tx_ring *txdr = adapter->tx_ring;
tx_ring           309 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	tx_old = adapter->tx_ring;
tx_ring           322 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	adapter->tx_ring = txdr;
tx_ring           338 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 		err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
tx_ring           346 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 		adapter->tx_ring = txdr;
tx_ring           355 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	adapter->tx_ring = tx_old;
tx_ring           574 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
tx_ring           575 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 					sizeof(*adapter->tx_ring), GFP_KERNEL);
tx_ring           576 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (!adapter->tx_ring)
tx_ring           807 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		   (unsigned long long)adapter->tx_ring->dma,
tx_ring           808 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		   adapter->tx_ring->size);
tx_ring           811 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tdba = adapter->tx_ring->dma;
tx_ring           812 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tdlen = adapter->tx_ring->size - 0x10;
tx_ring           926 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				   struct pch_gbe_tx_ring *tx_ring)
tx_ring           934 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	for (i = 0; i < tx_ring->count; i++) {
tx_ring           935 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring           941 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
tx_ring           942 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	memset(tx_ring->buffer_info, 0, size);
tx_ring           945 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	memset(tx_ring->desc, 0, tx_ring->size);
tx_ring           946 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->next_to_use = 0;
tx_ring           947 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->next_to_clean = 0;
tx_ring           948 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
tx_ring           949 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
tx_ring          1093 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			      struct pch_gbe_tx_ring *tx_ring,
tx_ring          1149 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	ring_num = tx_ring->next_to_use;
tx_ring          1150 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (unlikely((ring_num + 1) == tx_ring->count))
tx_ring          1151 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		tx_ring->next_to_use = 0;
tx_ring          1153 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		tx_ring->next_to_use = ring_num + 1;
tx_ring          1156 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	buffer_info = &tx_ring->buffer_info[ring_num];
tx_ring          1175 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		tx_ring->next_to_use = ring_num;
tx_ring          1182 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
tx_ring          1189 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (unlikely(++ring_num == tx_ring->count))
tx_ring          1193 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	iowrite32(tx_ring->dma +
tx_ring          1449 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 					struct pch_gbe_tx_ring *tx_ring)
tx_ring          1460 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	for (i = 0; i < tx_ring->count; i++) {
tx_ring          1461 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          1465 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
tx_ring          1481 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		 struct pch_gbe_tx_ring *tx_ring)
tx_ring          1492 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		   tx_ring->next_to_clean);
tx_ring          1494 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	i = tx_ring->next_to_clean;
tx_ring          1495 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
tx_ring          1499 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	unused = PCH_GBE_DESC_UNUSED(tx_ring);
tx_ring          1500 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
tx_ring          1507 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				   tx_ring->next_to_clean, tx_ring->next_to_use,
tx_ring          1515 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
tx_ring          1517 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			if (++k >= tx_ring->count) k = 0;  /*increment, wrap*/
tx_ring          1522 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				   unused, j, i, k, tx_ring->next_to_use,
tx_ring          1531 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		buffer_info = &tx_ring->buffer_info[i];
tx_ring          1573 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		if (unlikely(++i == tx_ring->count))
tx_ring          1575 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
tx_ring          1596 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		tx_ring->next_to_clean = i;
tx_ring          1599 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			   tx_ring->next_to_clean);
tx_ring          1730 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				struct pch_gbe_tx_ring *tx_ring)
tx_ring          1737 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
tx_ring          1738 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->buffer_info = vzalloc(size);
tx_ring          1739 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (!tx_ring->buffer_info)
tx_ring          1742 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
tx_ring          1744 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
tx_ring          1745 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 					   &tx_ring->dma, GFP_KERNEL);
tx_ring          1746 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (!tx_ring->desc) {
tx_ring          1747 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		vfree(tx_ring->buffer_info);
tx_ring          1751 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->next_to_use = 0;
tx_ring          1752 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->next_to_clean = 0;
tx_ring          1754 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	for (desNo = 0; desNo < tx_ring->count; desNo++) {
tx_ring          1755 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
tx_ring          1760 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		   tx_ring->desc, (unsigned long long)tx_ring->dma,
tx_ring          1761 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		   tx_ring->next_to_clean, tx_ring->next_to_use);
tx_ring          1812 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				struct pch_gbe_tx_ring *tx_ring)
tx_ring          1816 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pch_gbe_clean_tx_ring(adapter, tx_ring);
tx_ring          1817 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	vfree(tx_ring->buffer_info);
tx_ring          1818 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->buffer_info = NULL;
tx_ring          1819 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
tx_ring          1820 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tx_ring->desc = NULL;
tx_ring          1878 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
tx_ring          1908 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pch_gbe_alloc_tx_buffers(adapter, tx_ring);
tx_ring          1954 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
tx_ring          2014 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
tx_ring          2033 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
tx_ring          2054 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
tx_ring          2070 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
tx_ring          2072 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
tx_ring          2076 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			   tx_ring->next_to_use, tx_ring->next_to_clean);
tx_ring          2081 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pch_gbe_tx_queue(adapter, tx_ring, skb);
tx_ring          2302 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
tx_ring           448 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 		struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
tx_ring           449 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 		tx_ring->count = TxDescriptors;
tx_ring           450 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 		pch_gbe_validate_option(&tx_ring->count, &opt, adapter);
tx_ring           451 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 		tx_ring->count = roundup(tx_ring->count,
tx_ring           485 drivers/net/ethernet/packetengines/hamachi.c 	struct hamachi_desc *tx_ring;
tx_ring           650 drivers/net/ethernet/packetengines/hamachi.c 	hmp->tx_ring = ring_space;
tx_ring           779 drivers/net/ethernet/packetengines/hamachi.c 	pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
tx_ring           999 drivers/net/ethernet/packetengines/hamachi.c 		if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
tx_ring          1005 drivers/net/ethernet/packetengines/hamachi.c 				leXX_to_cpu(hmp->tx_ring[entry].addr),
tx_ring          1010 drivers/net/ethernet/packetengines/hamachi.c 		hmp->tx_ring[entry].status_n_length = 0;
tx_ring          1012 drivers/net/ethernet/packetengines/hamachi.c 			hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
tx_ring          1060 drivers/net/ethernet/packetengines/hamachi.c 		printk(KERN_DEBUG"  Tx ring %p: ", hmp->tx_ring);
tx_ring          1063 drivers/net/ethernet/packetengines/hamachi.c 			       le32_to_cpu(hmp->tx_ring[i].status_n_length));
tx_ring          1088 drivers/net/ethernet/packetengines/hamachi.c 			hmp->tx_ring[i].status_n_length =
tx_ring          1090 drivers/net/ethernet/packetengines/hamachi.c 				(hmp->tx_ring[i].status_n_length &
tx_ring          1093 drivers/net/ethernet/packetengines/hamachi.c 			hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff);
tx_ring          1096 drivers/net/ethernet/packetengines/hamachi.c 			pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr),
tx_ring          1197 drivers/net/ethernet/packetengines/hamachi.c 		hmp->tx_ring[i].status_n_length = 0;
tx_ring          1200 drivers/net/ethernet/packetengines/hamachi.c 	hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
tx_ring          1236 drivers/net/ethernet/packetengines/hamachi.c         hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
tx_ring          1249 drivers/net/ethernet/packetengines/hamachi.c 		hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
tx_ring          1252 drivers/net/ethernet/packetengines/hamachi.c 		hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
tx_ring          1331 drivers/net/ethernet/packetengines/hamachi.c 					if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
tx_ring          1337 drivers/net/ethernet/packetengines/hamachi.c 							leXX_to_cpu(hmp->tx_ring[entry].addr),
tx_ring          1343 drivers/net/ethernet/packetengines/hamachi.c 					hmp->tx_ring[entry].status_n_length = 0;
tx_ring          1345 drivers/net/ethernet/packetengines/hamachi.c 						hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
tx_ring          1675 drivers/net/ethernet/packetengines/hamachi.c 				   readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
tx_ring          1676 drivers/net/ethernet/packetengines/hamachi.c 				   i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
tx_ring          1719 drivers/net/ethernet/packetengines/hamachi.c 				leXX_to_cpu(hmp->tx_ring[i].addr),
tx_ring          1904 drivers/net/ethernet/packetengines/hamachi.c 		pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
tx_ring           310 drivers/net/ethernet/packetengines/yellowfin.c 	struct yellowfin_desc *tx_ring;
tx_ring           440 drivers/net/ethernet/packetengines/yellowfin.c 	np->tx_ring = ring_space;
tx_ring           513 drivers/net/ethernet/packetengines/yellowfin.c         pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
tx_ring           697 drivers/net/ethernet/packetengines/yellowfin.c 		pr_warn("  Tx ring %p: ", yp->tx_ring);
tx_ring           701 drivers/net/ethernet/packetengines/yellowfin.c 			       yp->tx_ring[i].result_status);
tx_ring           759 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
tx_ring           760 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
tx_ring           764 drivers/net/ethernet/packetengines/yellowfin.c 	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
tx_ring           772 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
tx_ring           773 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
tx_ring           777 drivers/net/ethernet/packetengines/yellowfin.c 			yp->tx_ring[j].dbdma_cmd =
tx_ring           779 drivers/net/ethernet/packetengines/yellowfin.c 			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
tx_ring           780 drivers/net/ethernet/packetengines/yellowfin.c 			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
tx_ring           784 drivers/net/ethernet/packetengines/yellowfin.c 			yp->tx_ring[j].dbdma_cmd =
tx_ring           786 drivers/net/ethernet/packetengines/yellowfin.c 			yp->tx_ring[j].request_cnt = 2;
tx_ring           788 drivers/net/ethernet/packetengines/yellowfin.c 			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
tx_ring           793 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
tx_ring           797 drivers/net/ethernet/packetengines/yellowfin.c 	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
tx_ring           834 drivers/net/ethernet/packetengines/yellowfin.c 	yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
tx_ring           836 drivers/net/ethernet/packetengines/yellowfin.c 	yp->tx_ring[entry].result_status = 0;
tx_ring           839 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
tx_ring           840 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
tx_ring           843 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
tx_ring           844 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[entry].dbdma_cmd =
tx_ring           849 drivers/net/ethernet/packetengines/yellowfin.c 	yp->tx_ring[entry<<1].request_cnt = len;
tx_ring           850 drivers/net/ethernet/packetengines/yellowfin.c 	yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
tx_ring           858 drivers/net/ethernet/packetengines/yellowfin.c 		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
tx_ring           862 drivers/net/ethernet/packetengines/yellowfin.c 	yp->tx_ring[entry<<1].dbdma_cmd =
tx_ring           920 drivers/net/ethernet/packetengines/yellowfin.c 			if (yp->tx_ring[entry].result_status == 0)
tx_ring           926 drivers/net/ethernet/packetengines/yellowfin.c 			pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
tx_ring           984 drivers/net/ethernet/packetengines/yellowfin.c 					yp->tx_ring[entry<<1].addr, skb->len,
tx_ring          1218 drivers/net/ethernet/packetengines/yellowfin.c 				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
tx_ring          1219 drivers/net/ethernet/packetengines/yellowfin.c 				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
tx_ring          1220 drivers/net/ethernet/packetengines/yellowfin.c 				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
tx_ring          1385 drivers/net/ethernet/packetengines/yellowfin.c 	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
tx_ring           509 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct pasemi_mac_txring *txring = tx_ring(mac);
tx_ring           649 drivers/net/ethernet/pasemi/pasemi_mac.c 	pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
tx_ring           653 drivers/net/ethernet/pasemi/pasemi_mac.c 	write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg);
tx_ring           680 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct pasemi_dmachan *chan = &tx_ring(mac)->chan;
tx_ring          1126 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
tx_ring          1215 drivers/net/ethernet/pasemi/pasemi_mac.c 	int txch = tx_ring(mac)->chan.chno;
tx_ring          1280 drivers/net/ethernet/pasemi/pasemi_mac.c 	txch = tx_ring(mac)->chan.chno;
tx_ring          1311 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_mac_clean_tx(tx_ring(mac));
tx_ring          1425 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct pasemi_mac_txring * const txring = tx_ring(mac);
tx_ring          1561 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_mac_clean_tx(tx_ring(mac));
tx_ring            65 drivers/net/ethernet/qlogic/netxen/netxen_nic.h #define TX_BUFF_RINGSIZE(tx_ring)	\
tx_ring            66 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 	(sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
tx_ring            67 drivers/net/ethernet/qlogic/netxen/netxen_nic.h #define TX_DESC_RINGSIZE(tx_ring)	\
tx_ring            68 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 	(sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
tx_ring          1641 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 	struct nx_host_tx_ring *tx_ring;
tx_ring          1790 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 		struct nx_host_tx_ring *tx_ring);
tx_ring          1858 drivers/net/ethernet/qlogic/netxen/netxen_nic.h static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
tx_ring          1861 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 	return find_diff_among(tx_ring->producer,
tx_ring          1862 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 			tx_ring->sw_consumer, tx_ring->num_desc);
tx_ring           427 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
tx_ring           467 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
tx_ring           468 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
tx_ring           480 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
tx_ring           683 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	struct nx_host_tx_ring *tx_ring;
tx_ring           689 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	tx_ring = adapter->tx_ring;
tx_ring           693 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
tx_ring           694 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
tx_ring           739 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	struct nx_host_tx_ring *tx_ring;
tx_ring           746 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	tx_ring = adapter->tx_ring;
tx_ring           761 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	tx_ring->hw_consumer =
tx_ring           765 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
tx_ring           766 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 			&tx_ring->phys_addr);
tx_ring           775 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	tx_ring->desc_head = addr;
tx_ring           852 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	struct nx_host_tx_ring *tx_ring;
tx_ring           885 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	tx_ring = adapter->tx_ring;
tx_ring           886 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	if (tx_ring->desc_head != NULL) {
tx_ring           888 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 				TX_DESC_RINGSIZE(tx_ring),
tx_ring           889 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 				tx_ring->desc_head, tx_ring->phys_addr);
tx_ring           890 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		tx_ring->desc_head = NULL;
tx_ring           322 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
tx_ring           334 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 				 adapter->tx_ring->crb_cmd_consumer);
tx_ring           337 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
tx_ring           555 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	struct nx_host_tx_ring *tx_ring;
tx_ring           562 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	tx_ring = adapter->tx_ring;
tx_ring           563 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	__netif_tx_lock_bh(tx_ring->txq);
tx_ring           565 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	producer = tx_ring->producer;
tx_ring           567 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	if (nr_desc >= netxen_tx_avail(tx_ring)) {
tx_ring           568 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 		netif_tx_stop_queue(tx_ring->txq);
tx_ring           570 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 		if (netxen_tx_avail(tx_ring) > nr_desc) {
tx_ring           571 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 			if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
tx_ring           572 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 				netif_tx_wake_queue(tx_ring->txq);
tx_ring           574 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 			__netif_tx_unlock_bh(tx_ring->txq);
tx_ring           580 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 		pbuf = &tx_ring->cmd_buf_arr[producer];
tx_ring           584 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 		memcpy(&tx_ring->desc_head[producer],
tx_ring           587 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 		producer = get_next_index(producer, tx_ring->num_desc);
tx_ring           592 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	tx_ring->producer = producer;
tx_ring           594 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	netxen_nic_update_cmd_producer(adapter, tx_ring);
tx_ring           596 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 	__netif_tx_unlock_bh(tx_ring->txq);
tx_ring           120 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
tx_ring           123 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	cmd_buf = tx_ring->cmd_buf_arr;
tx_ring           124 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	for (i = 0; i < tx_ring->num_desc; i++) {
tx_ring           153 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	struct nx_host_tx_ring *tx_ring;
tx_ring           169 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (adapter->tx_ring == NULL)
tx_ring           172 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	tx_ring = adapter->tx_ring;
tx_ring           173 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	vfree(tx_ring->cmd_buf_arr);
tx_ring           174 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	kfree(tx_ring);
tx_ring           175 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	adapter->tx_ring = NULL;
tx_ring           183 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	struct nx_host_tx_ring *tx_ring;
tx_ring           190 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
tx_ring           191 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (tx_ring == NULL)
tx_ring           194 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	adapter->tx_ring = tx_ring;
tx_ring           196 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	tx_ring->num_desc = adapter->num_txd;
tx_ring           197 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	tx_ring->txq = netdev_get_tx_queue(netdev, 0);
tx_ring           199 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
tx_ring           203 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	tx_ring->cmd_buf_arr = cmd_buf_arr;
tx_ring          1745 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
tx_ring          1750 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	sw_consumer = tx_ring->sw_consumer;
tx_ring          1751 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
tx_ring          1754 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
tx_ring          1772 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
tx_ring          1777 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	tx_ring->sw_consumer = sw_consumer;
tx_ring          1783 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
tx_ring          1800 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
tx_ring           104 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		struct nx_host_tx_ring *tx_ring)
tx_ring           106 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
tx_ring           116 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		struct nx_host_tx_ring *tx_ring)
tx_ring           118 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
tx_ring          1204 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	struct nx_host_tx_ring *tx_ring;
tx_ring          1240 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring = adapter->tx_ring;
tx_ring          1241 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
tx_ring          1243 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter,
tx_ring          1246 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring->producer = 0;
tx_ring          1247 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring->sw_consumer = 0;
tx_ring          1249 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		netxen_nic_update_cmd_producer(adapter, tx_ring);
tx_ring          1250 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		netxen_nic_update_cmd_consumer(adapter, tx_ring);
tx_ring          1857 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		struct nx_host_tx_ring *tx_ring,
tx_ring          1931 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	producer = tx_ring->producer;
tx_ring          1938 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		hwdesc = &tx_ring->desc_head[producer];
tx_ring          1939 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring->cmd_buf_arr[producer].skb = NULL;
tx_ring          1954 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		producer = get_next_index(producer, tx_ring->num_desc);
tx_ring          1962 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		hwdesc = &tx_ring->desc_head[producer];
tx_ring          1963 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring->cmd_buf_arr[producer].skb = NULL;
tx_ring          1971 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		producer = get_next_index(producer, tx_ring->num_desc);
tx_ring          1974 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	tx_ring->producer = producer;
tx_ring          2039 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
tx_ring          2050 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	u32 num_txd = tx_ring->num_desc;
tx_ring          2070 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
tx_ring          2073 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
tx_ring          2079 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	producer = tx_ring->producer;
tx_ring          2080 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	pbuf = &tx_ring->cmd_buf_arr[producer];
tx_ring          2090 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	first_desc = hwdesc = &tx_ring->desc_head[producer];
tx_ring          2103 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 			hwdesc = &tx_ring->desc_head[producer];
tx_ring          2105 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
tx_ring          2127 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	tx_ring->producer = get_next_index(producer, num_txd);
tx_ring          2129 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	netxen_tso_check(netdev, tx_ring, first_desc, skb);
tx_ring          2134 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	netxen_nic_update_cmd_producer(adapter, tx_ring);
tx_ring            71 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h #define TX_BUFF_RINGSIZE(tx_ring)	\
tx_ring            72 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	(sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
tx_ring            73 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h #define TX_DESC_RINGSIZE(tx_ring)	\
tx_ring            74 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	(sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
tx_ring           595 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1108 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1726 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring          1728 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	if (likely(tx_ring->producer < tx_ring->sw_consumer))
tx_ring          1729 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 		return tx_ring->sw_consumer - tx_ring->producer;
tx_ring          1731 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 		return tx_ring->sw_consumer + tx_ring->num_desc -
tx_ring          1732 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 				tx_ring->producer;
tx_ring          1804 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 				 u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
tx_ring          2069 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 					struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2071 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
tx_ring          2175 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 			   struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2179 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 		writel(0x0, tx_ring->crb_intr_mask);
tx_ring          2184 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 			    struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2188 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 		writel(1, tx_ring->crb_intr_mask);
tx_ring          2193 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 			   struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2195 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	writel(0, tx_ring->crb_intr_mask);
tx_ring          2200 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 			    struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2202 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	writel(1, tx_ring->crb_intr_mask);
tx_ring          2258 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 					 struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2261 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 		adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
tx_ring          2265 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 					  struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2268 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 		adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
tx_ring          1293 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			    struct qlcnic_host_tx_ring *tx_ring)
tx_ring          1307 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = tx_ring->ctx_id | temp;
tx_ring          2139 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 				  struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2228 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	temp = adapter->tx_ring->ctx_id;
tx_ring          3533 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
tx_ring           414 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 				     struct qlcnic_host_tx_ring *tx_ring,
tx_ring           432 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	tx_ring->producer = 0;
tx_ring           433 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	tx_ring->sw_consumer = 0;
tx_ring           434 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	*(tx_ring->hw_consumer) = 0;
tx_ring           475 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
tx_ring           479 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
tx_ring           480 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
tx_ring           494 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
tx_ring           496 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
tx_ring           497 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
tx_ring           503 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
tx_ring           507 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			    tx_ring->ctx_id, tx_ring->state);
tx_ring           525 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 				   struct qlcnic_host_tx_ring *tx_ring)
tx_ring           534 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	cmd.req.arg[1] = tx_ring->ctx_id;
tx_ring           564 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring           572 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring = &adapter->tx_ring[ring];
tx_ring           574 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 						   &tx_ring->hw_cons_phys_addr,
tx_ring           581 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring->hw_consumer = ptr;
tx_ring           583 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
tx_ring           584 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 					  &tx_ring->phys_addr,
tx_ring           591 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring->desc_head = addr;
tx_ring           657 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 						  &dev->tx_ring[ring],
tx_ring           665 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 				qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
tx_ring           696 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 						 &adapter->tx_ring[ring]);
tx_ring           719 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring           725 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring = &adapter->tx_ring[ring];
tx_ring           726 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		if (tx_ring->hw_consumer != NULL) {
tx_ring           728 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 					  tx_ring->hw_consumer,
tx_ring           729 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 					  tx_ring->hw_cons_phys_addr);
tx_ring           731 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			tx_ring->hw_consumer = NULL;
tx_ring           734 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		if (tx_ring->desc_head != NULL) {
tx_ring           736 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 					  TX_DESC_RINGSIZE(tx_ring),
tx_ring           737 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 					  tx_ring->desc_head,
tx_ring           738 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 					  tx_ring->phys_addr);
tx_ring           739 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			tx_ring->desc_head = NULL;
tx_ring           536 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring           564 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_ring = &adapter->tx_ring[ring];
tx_ring           565 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
tx_ring           566 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		regs_buff[i++] = tx_ring->sw_consumer;
tx_ring           567 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
tx_ring           568 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		regs_buff[i++] = tx_ring->producer;
tx_ring           569 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		if (tx_ring->crb_intr_mask)
tx_ring           570 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 			regs_buff[i++] = readl(tx_ring->crb_intr_mask);
tx_ring           697 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 				      u8 rx_ring, u8 tx_ring)
tx_ring           699 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	if (rx_ring == 0 || tx_ring == 0)
tx_ring           711 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	 if (tx_ring != 0) {
tx_ring           712 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		if (tx_ring > adapter->max_tx_rings) {
tx_ring           715 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 				   tx_ring, adapter->max_tx_rings);
tx_ring          1307 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1312 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_ring = &adapter->tx_ring[ring];
tx_ring          1313 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_stats.xmit_on += tx_ring->tx_stats.xmit_on;
tx_ring          1314 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_stats.xmit_off += tx_ring->tx_stats.xmit_off;
tx_ring          1315 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_stats.xmit_called += tx_ring->tx_stats.xmit_called;
tx_ring          1316 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished;
tx_ring          1317 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes;
tx_ring          1329 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1331 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	tx_ring = (struct qlcnic_host_tx_ring *)stats;
tx_ring          1333 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
tx_ring          1334 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
tx_ring          1335 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
tx_ring          1336 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
tx_ring          1337 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
tx_ring          1346 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1356 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          1357 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 			data = qlcnic_fill_tx_queue_stats(data, tx_ring);
tx_ring           388 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring           395 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	tx_ring = &adapter->tx_ring[0];
tx_ring           396 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	__netif_tx_lock_bh(tx_ring->txq);
tx_ring           398 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	producer = tx_ring->producer;
tx_ring           400 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
tx_ring           401 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 		netif_tx_stop_queue(tx_ring->txq);
tx_ring           403 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 		if (qlcnic_tx_avail(tx_ring) > nr_desc) {
tx_ring           404 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
tx_ring           405 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 				netif_tx_wake_queue(tx_ring->txq);
tx_ring           408 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 			__netif_tx_unlock_bh(tx_ring->txq);
tx_ring           416 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 		pbuf = &tx_ring->cmd_buf_arr[producer];
tx_ring           420 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 		memcpy(&tx_ring->desc_head[producer],
tx_ring           423 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 		producer = get_next_index(producer, tx_ring->num_desc);
tx_ring           428 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	tx_ring->producer = producer;
tx_ring           430 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	qlcnic_update_cmd_producer(tx_ring);
tx_ring           432 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 	__netif_tx_unlock_bh(tx_ring->txq);
tx_ring           177 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h 			       struct qlcnic_host_tx_ring *tx_ring);
tx_ring           196 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h 				     struct qlcnic_host_tx_ring *tx_ring, int);
tx_ring           131 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 			       struct qlcnic_host_tx_ring *tx_ring)
tx_ring           137 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	spin_lock(&tx_ring->tx_clean_lock);
tx_ring           139 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	cmd_buf = tx_ring->cmd_buf_arr;
tx_ring           140 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	for (i = 0; i < tx_ring->num_desc; i++) {
tx_ring           163 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	spin_unlock(&tx_ring->tx_clean_lock);
tx_ring           251 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 				sds_ring->tx_ring = &adapter->tx_ring[ring];
tx_ring           253 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 				sds_ring->tx_ring = &adapter->tx_ring[0];
tx_ring           271 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			       u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
tx_ring           280 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	producer = tx_ring->producer;
tx_ring           281 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	hwdesc = &tx_ring->desc_head[tx_ring->producer];
tx_ring           297 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
tx_ring           304 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			       struct qlcnic_host_tx_ring *tx_ring)
tx_ring           338 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 						     vlan_id, tx_ring);
tx_ring           353 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
tx_ring           373 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			       struct qlcnic_host_tx_ring *tx_ring)
tx_ring           377 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	u32 producer = tx_ring->producer;
tx_ring           407 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			hwdesc = &tx_ring->desc_head[producer];
tx_ring           408 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
tx_ring           413 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			producer = get_next_index(producer, tx_ring->num_desc);
tx_ring           416 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		tx_ring->producer = producer;
tx_ring           460 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			 struct qlcnic_host_tx_ring *tx_ring)
tx_ring           468 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	u32 producer = tx_ring->producer;
tx_ring           520 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			hwdesc = &tx_ring->desc_head[producer];
tx_ring           521 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
tx_ring           536 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			producer = get_next_index(producer, tx_ring->num_desc);
tx_ring           542 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			hwdesc = &tx_ring->desc_head[producer];
tx_ring           543 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
tx_ring           549 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			producer = get_next_index(producer, tx_ring->num_desc);
tx_ring           552 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		tx_ring->producer = producer;
tx_ring           652 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring           674 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
tx_ring           675 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	num_txd = tx_ring->num_desc;
tx_ring           692 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
tx_ring           693 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		netif_tx_stop_queue(tx_ring->txq);
tx_ring           694 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
tx_ring           695 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			netif_tx_start_queue(tx_ring->txq);
tx_ring           697 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->tx_stats.xmit_off++;
tx_ring           702 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	producer = tx_ring->producer;
tx_ring           703 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	pbuf = &tx_ring->cmd_buf_arr[producer];
tx_ring           705 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	first_desc = &tx_ring->desc_head[producer];
tx_ring           706 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	hwdesc = &tx_ring->desc_head[producer];
tx_ring           726 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			hwdesc = &tx_ring->desc_head[producer];
tx_ring           728 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
tx_ring           749 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring->producer = get_next_index(producer, num_txd);
tx_ring           762 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 					   tx_ring)))
tx_ring           766 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 						 skb, tx_ring)))
tx_ring           771 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring           773 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring           774 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring->tx_stats.xmit_called++;
tx_ring           778 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	qlcnic_update_cmd_producer(tx_ring);
tx_ring           887 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 				   struct qlcnic_host_tx_ring *tx_ring,
tx_ring           897 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!spin_trylock(&tx_ring->tx_clean_lock))
tx_ring           900 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	sw_consumer = tx_ring->sw_consumer;
tx_ring           901 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
tx_ring           904 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
tx_ring           916 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->tx_stats.xmit_finished++;
tx_ring           921 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
tx_ring           926 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring->sw_consumer = sw_consumer;
tx_ring           930 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (netif_tx_queue_stopped(tx_ring->txq) &&
tx_ring           932 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
tx_ring           933 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 				netif_tx_wake_queue(tx_ring->txq);
tx_ring           934 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 				tx_ring->tx_stats.xmit_on++;
tx_ring           952 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
tx_ring           955 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	spin_unlock(&tx_ring->tx_clean_lock);
tx_ring           965 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring           969 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring = sds_ring->tx_ring;
tx_ring           971 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
tx_ring           983 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			qlcnic_enable_tx_intr(adapter, tx_ring);
tx_ring           992 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring           996 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
tx_ring           997 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	adapter = tx_ring->adapter;
tx_ring           999 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
tx_ring          1001 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		napi_complete(&tx_ring->napi);
tx_ring          1003 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			qlcnic_enable_tx_intr(adapter, tx_ring);
tx_ring          1579 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1609 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          1610 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
tx_ring          1623 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1634 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          1635 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			netif_napi_del(&tx_ring->napi);
tx_ring          1646 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1662 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          1663 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			napi_enable(&tx_ring->napi);
tx_ring          1664 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			qlcnic_enable_tx_intr(adapter, tx_ring);
tx_ring          1673 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1690 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          1691 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			qlcnic_disable_tx_intr(adapter, tx_ring);
tx_ring          1692 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			napi_synchronize(&tx_ring->napi);
tx_ring          1693 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			napi_disable(&tx_ring->napi);
tx_ring          1956 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1961 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring = adapter->tx_ring;
tx_ring          1963 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
tx_ring          1984 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1989 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring = adapter->tx_ring;
tx_ring          1991 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
tx_ring          2009 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          2012 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
tx_ring          2013 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	adapter = tx_ring->adapter;
tx_ring          2014 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
tx_ring          2016 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		napi_complete(&tx_ring->napi);
tx_ring          2018 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			qlcnic_enable_tx_intr(adapter, tx_ring);
tx_ring          2049 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          2065 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          2066 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			napi_enable(&tx_ring->napi);
tx_ring          2067 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			qlcnic_enable_tx_intr(adapter, tx_ring);
tx_ring          2077 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          2093 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          2094 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			qlcnic_disable_tx_intr(adapter, tx_ring);
tx_ring          2095 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			napi_synchronize(&tx_ring->napi);
tx_ring          2096 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			napi_disable(&tx_ring->napi);
tx_ring          2106 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          2139 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          2140 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			netif_tx_napi_add(netdev, &tx_ring->napi,
tx_ring          2154 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          2166 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          2167 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			netif_napi_del(&tx_ring->napi);
tx_ring           119 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
tx_ring           121 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	writel(tx_ring->producer, tx_ring->crb_cmd_producer);
tx_ring          1723 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1793 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				tx_ring = &adapter->tx_ring[ring];
tx_ring          1794 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				snprintf(tx_ring->name, sizeof(tx_ring->name),
tx_ring          1796 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				err = request_irq(tx_ring->irq, handler, flags,
tx_ring          1797 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 						  tx_ring->name, tx_ring);
tx_ring          1811 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          1830 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				tx_ring = &adapter->tx_ring[ring];
tx_ring          1831 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				if (tx_ring->irq)
tx_ring          1832 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 					free_irq(tx_ring->irq, tx_ring);
tx_ring          1971 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
tx_ring          2384 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          2387 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring = &adapter->tx_ring[ring];
tx_ring          2388 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		if (tx_ring) {
tx_ring          2389 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			vfree(tx_ring->cmd_buf_arr);
tx_ring          2390 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			tx_ring->cmd_buf_arr = NULL;
tx_ring          2393 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	kfree(adapter->tx_ring);
tx_ring          2400 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          2403 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	tx_ring = kcalloc(adapter->drv_tx_rings,
tx_ring          2405 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	if (tx_ring == NULL)
tx_ring          2408 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	adapter->tx_ring = tx_ring;
tx_ring          2411 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring = &adapter->tx_ring[ring];
tx_ring          2412 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring->num_desc = adapter->num_txd;
tx_ring          2413 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring->txq = netdev_get_tx_queue(netdev, ring);
tx_ring          2414 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
tx_ring          2419 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring->cmd_buf_arr = cmd_buf_arr;
tx_ring          2420 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		spin_lock_init(&tx_ring->tx_clean_lock);
tx_ring          2426 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			tx_ring = &adapter->tx_ring[ring];
tx_ring          2427 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			tx_ring->adapter = adapter;
tx_ring          2431 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				tx_ring->irq = vector;
tx_ring          2994 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
tx_ring          2998 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (i = 0; i < tx_ring->num_desc; i++) {
tx_ring          3001 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			       &tx_ring->desc_head[i],
tx_ring          3012 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	struct qlcnic_host_tx_ring *tx_ring;
tx_ring          3040 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring = &adapter->tx_ring[ring];
tx_ring          3041 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		if (!tx_ring)
tx_ring          3044 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    ring, tx_ring->ctx_id);
tx_ring          3047 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    tx_ring->tx_stats.xmit_finished,
tx_ring          3048 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    tx_ring->tx_stats.xmit_called,
tx_ring          3049 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    tx_ring->tx_stats.xmit_on,
tx_ring          3050 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    tx_ring->tx_stats.xmit_off);
tx_ring          3052 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		if (tx_ring->crb_intr_mask)
tx_ring          3054 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				    readl(tx_ring->crb_intr_mask));
tx_ring          3058 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    readl(tx_ring->crb_cmd_producer),
tx_ring          3059 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    tx_ring->producer, tx_ring->sw_consumer,
tx_ring          3060 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    le32_to_cpu(*(tx_ring->hw_consumer)));
tx_ring          3063 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
tx_ring          3066 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			dump_tx_ring_desc(tx_ring);
tx_ring          3190 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	struct qlcnic_host_tx_ring *tx_ring = data;
tx_ring          3192 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	napi_schedule(&tx_ring->napi);
tx_ring           459 drivers/net/ethernet/qualcomm/qca_spi.c qcaspi_tx_ring_has_space(struct tx_ring *txr)
tx_ring            85 drivers/net/ethernet/qualcomm/qca_spi.h 	struct tx_ring txr;
tx_ring           180 drivers/net/ethernet/rdc/r6040.c 	struct r6040_descriptor *tx_ring;
tx_ring           315 drivers/net/ethernet/rdc/r6040.c 	lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
tx_ring           316 drivers/net/ethernet/rdc/r6040.c 	r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
tx_ring           492 drivers/net/ethernet/rdc/r6040.c 	if (lp->tx_ring) {
tx_ring           494 drivers/net/ethernet/rdc/r6040.c 				TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
tx_ring           495 drivers/net/ethernet/rdc/r6040.c 		lp->tx_ring = NULL;
tx_ring           767 drivers/net/ethernet/rdc/r6040.c 	lp->tx_ring =
tx_ring           769 drivers/net/ethernet/rdc/r6040.c 	if (!lp->tx_ring) {
tx_ring           784 drivers/net/ethernet/rdc/r6040.c 	pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
tx_ring           343 drivers/net/ethernet/realtek/8139cp.c 	struct cp_desc		*tx_ring;
tx_ring           655 drivers/net/ethernet/realtek/8139cp.c 		struct cp_desc *txd = cp->tx_ring + tx_tail;
tx_ring           724 drivers/net/ethernet/realtek/8139cp.c 		txd = &cp->tx_ring[index];
tx_ring           779 drivers/net/ethernet/realtek/8139cp.c 		struct cp_desc *txd = &cp->tx_ring[entry];
tx_ring           842 drivers/net/ethernet/realtek/8139cp.c 			txd = &cp->tx_ring[entry];
tx_ring           854 drivers/net/ethernet/realtek/8139cp.c 		txd = &cp->tx_ring[first_entry];
tx_ring          1104 drivers/net/ethernet/realtek/8139cp.c 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
tx_ring          1105 drivers/net/ethernet/realtek/8139cp.c 	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
tx_ring          1124 drivers/net/ethernet/realtek/8139cp.c 	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
tx_ring          1151 drivers/net/ethernet/realtek/8139cp.c 			desc = cp->tx_ring + i;
tx_ring          1163 drivers/net/ethernet/realtek/8139cp.c 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
tx_ring          1176 drivers/net/ethernet/realtek/8139cp.c 	cp->tx_ring = NULL;
tx_ring          1255 drivers/net/ethernet/realtek/8139cp.c 			  i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
tx_ring          1256 drivers/net/ethernet/realtek/8139cp.c 			  cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
tx_ring          1257 drivers/net/ethernet/realtek/8139cp.c 			  le64_to_cpu(cp->tx_ring[i].addr),
tx_ring          1006 drivers/net/ethernet/renesas/ravb.h 	struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
tx_ring           189 drivers/net/ethernet/renesas/ravb_main.c 		desc = &priv->tx_ring[q][entry];
tx_ring           243 drivers/net/ethernet/renesas/ravb_main.c 	if (priv->tx_ring[q]) {
tx_ring           248 drivers/net/ethernet/renesas/ravb_main.c 		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
tx_ring           250 drivers/net/ethernet/renesas/ravb_main.c 		priv->tx_ring[q] = NULL;
tx_ring           312 drivers/net/ethernet/renesas/ravb_main.c 	memset(priv->tx_ring[q], 0, tx_ring_size);
tx_ring           314 drivers/net/ethernet/renesas/ravb_main.c 	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
tx_ring           382 drivers/net/ethernet/renesas/ravb_main.c 	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
tx_ring           385 drivers/net/ethernet/renesas/ravb_main.c 	if (!priv->tx_ring[q])
tx_ring          1527 drivers/net/ethernet/renesas/ravb_main.c 		desc = &priv->tx_ring[q][entry];
tx_ring          1540 drivers/net/ethernet/renesas/ravb_main.c 		desc = &priv->tx_ring[q][entry];
tx_ring          1276 drivers/net/ethernet/renesas/sh_eth.c 		txdesc = &mdp->tx_ring[entry];
tx_ring          1338 drivers/net/ethernet/renesas/sh_eth.c 	if (mdp->tx_ring) {
tx_ring          1342 drivers/net/ethernet/renesas/sh_eth.c 		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
tx_ring          1344 drivers/net/ethernet/renesas/sh_eth.c 		mdp->tx_ring = NULL;
tx_ring          1412 drivers/net/ethernet/renesas/sh_eth.c 	memset(mdp->tx_ring, 0, tx_ringsize);
tx_ring          1417 drivers/net/ethernet/renesas/sh_eth.c 		txdesc = &mdp->tx_ring[i];
tx_ring          1469 drivers/net/ethernet/renesas/sh_eth.c 	mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
tx_ring          1471 drivers/net/ethernet/renesas/sh_eth.c 	if (!mdp->tx_ring)
tx_ring          1580 drivers/net/ethernet/renesas/sh_eth.c 		mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
tx_ring          2544 drivers/net/ethernet/renesas/sh_eth.c 	txdesc = &mdp->tx_ring[entry];
tx_ring           525 drivers/net/ethernet/renesas/sh_eth.h 	struct sh_eth_txdesc *tx_ring;
tx_ring            48 drivers/net/ethernet/rocker/rocker.h 	struct rocker_dma_ring_info tx_ring;
tx_ring           792 drivers/net/ethernet/rocker/rocker_main.c 				     &rocker_port->tx_ring);
tx_ring           798 drivers/net/ethernet/rocker/rocker_main.c 	err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
tx_ring           838 drivers/net/ethernet/rocker/rocker_main.c 	rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
tx_ring           841 drivers/net/ethernet/rocker/rocker_main.c 	rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
tx_ring           853 drivers/net/ethernet/rocker/rocker_main.c 	rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
tx_ring           855 drivers/net/ethernet/rocker/rocker_main.c 	rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
tx_ring          1908 drivers/net/ethernet/rocker/rocker_main.c 	desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
tx_ring          1942 drivers/net/ethernet/rocker/rocker_main.c 	rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
tx_ring          1944 drivers/net/ethernet/rocker/rocker_main.c 	desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
tx_ring          2426 drivers/net/ethernet/rocker/rocker_main.c 	while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
tx_ring          2451 drivers/net/ethernet/rocker/rocker_main.c 	rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
tx_ring           391 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			struct sxgbe_tx_queue *tx_ring,	int tx_rsize)
tx_ring           394 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	if (!tx_ring) {
tx_ring           400 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_ring->dma_tx = dma_alloc_coherent(dev,
tx_ring           402 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 					     &tx_ring->dma_tx_phy, GFP_KERNEL);
tx_ring           403 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	if (!tx_ring->dma_tx)
tx_ring           407 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
tx_ring           409 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	if (!tx_ring->tx_skbuff_dma)
tx_ring           412 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
tx_ring           415 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	if (!tx_ring->tx_skbuff)
tx_ring           419 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_ring->queue_no = queue_no;
tx_ring           422 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_ring->dirty_tx = 0;
tx_ring           423 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_ring->cur_tx = 0;
tx_ring           429 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			  tx_ring->dma_tx, tx_ring->dma_tx_phy);
tx_ring           542 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
tx_ring           546 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			  tx_ring->dma_tx, tx_ring->dma_tx_phy);
tx_ring           815 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
tx_ring           826 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_free_ring_skbufs(tx_ring);
tx_ring           829 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_ring->cur_tx = 0;
tx_ring           830 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tx_ring->dirty_tx = 0;
tx_ring            76 drivers/net/ethernet/sgi/meth.c 	tx_packet *tx_ring;
tx_ring           212 drivers/net/ethernet/sgi/meth.c 	priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev,
tx_ring           214 drivers/net/ethernet/sgi/meth.c 	if (!priv->tx_ring)
tx_ring           253 drivers/net/ethernet/sgi/meth.c 	dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
tx_ring           491 drivers/net/ethernet/sgi/meth.c 		status = priv->tx_ring[priv->tx_read].header.raw;
tx_ring           525 drivers/net/ethernet/sgi/meth.c 		priv->tx_ring[priv->tx_read].header.raw = 0;
tx_ring           610 drivers/net/ethernet/sgi/meth.c 	tx_packet *desc = &priv->tx_ring[priv->tx_write];
tx_ring           623 drivers/net/ethernet/sgi/meth.c 	tx_packet *desc = &priv->tx_ring[priv->tx_write];
tx_ring           648 drivers/net/ethernet/sgi/meth.c 	tx_packet *desc = &priv->tx_ring[priv->tx_write];
tx_ring           185 drivers/net/ethernet/sis/sis900.c 	BufferDesc *tx_ring;
tx_ring           489 drivers/net/ethernet/sis/sis900.c 	sis_priv->tx_ring = ring_space;
tx_ring           578 drivers/net/ethernet/sis/sis900.c 	pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
tx_ring          1137 drivers/net/ethernet/sis/sis900.c 		sis_priv->tx_ring[i].link = sis_priv->tx_ring_dma +
tx_ring          1139 drivers/net/ethernet/sis/sis900.c 		sis_priv->tx_ring[i].cmdsts = 0;
tx_ring          1140 drivers/net/ethernet/sis/sis900.c 		sis_priv->tx_ring[i].bufptr = 0;
tx_ring          1565 drivers/net/ethernet/sis/sis900.c 				sis_priv->tx_ring[i].bufptr, skb->len,
tx_ring          1569 drivers/net/ethernet/sis/sis900.c 			sis_priv->tx_ring[i].cmdsts = 0;
tx_ring          1570 drivers/net/ethernet/sis/sis900.c 			sis_priv->tx_ring[i].bufptr = 0;
tx_ring          1615 drivers/net/ethernet/sis/sis900.c 	sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
tx_ring          1618 drivers/net/ethernet/sis/sis900.c 		sis_priv->tx_ring[entry].bufptr))) {
tx_ring          1625 drivers/net/ethernet/sis/sis900.c 	sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
tx_ring          1899 drivers/net/ethernet/sis/sis900.c 		tx_status = sis_priv->tx_ring[entry].cmdsts;
tx_ring          1932 drivers/net/ethernet/sis/sis900.c 			sis_priv->tx_ring[entry].bufptr, skb->len,
tx_ring          1936 drivers/net/ethernet/sis/sis900.c 		sis_priv->tx_ring[entry].bufptr = 0;
tx_ring          1937 drivers/net/ethernet/sis/sis900.c 		sis_priv->tx_ring[entry].cmdsts = 0;
tx_ring          1991 drivers/net/ethernet/sis/sis900.c 			pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
tx_ring          2489 drivers/net/ethernet/sis/sis900.c 	pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
tx_ring           254 drivers/net/ethernet/smsc/epic100.c 	struct epic_tx_desc *tx_ring;
tx_ring           379 drivers/net/ethernet/smsc/epic100.c 	ep->tx_ring = ring_space;
tx_ring           497 drivers/net/ethernet/smsc/epic100.c 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
tx_ring           930 drivers/net/ethernet/smsc/epic100.c 		ep->tx_ring[i].txstatus = 0x0000;
tx_ring           931 drivers/net/ethernet/smsc/epic100.c 		ep->tx_ring[i].next = ep->tx_ring_dma +
tx_ring           934 drivers/net/ethernet/smsc/epic100.c 	ep->tx_ring[i-1].next = ep->tx_ring_dma;
tx_ring           957 drivers/net/ethernet/smsc/epic100.c 	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
tx_ring           970 drivers/net/ethernet/smsc/epic100.c 	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
tx_ring           971 drivers/net/ethernet/smsc/epic100.c 	ep->tx_ring[entry].txstatus =
tx_ring          1024 drivers/net/ethernet/smsc/epic100.c 		int txstatus = ep->tx_ring[entry].txstatus;
tx_ring          1038 drivers/net/ethernet/smsc/epic100.c 		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
tx_ring          1307 drivers/net/ethernet/smsc/epic100.c 		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
tx_ring          1502 drivers/net/ethernet/smsc/epic100.c 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
tx_ring            50 drivers/net/ethernet/smsc/smsc9420.c 	struct smsc9420_dma_desc *tx_ring;
tx_ring           499 drivers/net/ethernet/smsc/smsc9420.c 	BUG_ON(!pd->tx_ring);
tx_ring           514 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[i].status = 0;
tx_ring           515 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[i].length = 0;
tx_ring           516 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[i].buffer1 = 0;
tx_ring           517 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[i].buffer2 = 0;
tx_ring           901 drivers/net/ethernet/smsc/smsc9420.c 		status = pd->tx_ring[index].status;
tx_ring           902 drivers/net/ethernet/smsc/smsc9420.c 		length = pd->tx_ring[index].length;
tx_ring           920 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[index].buffer1 = 0;
tx_ring           940 drivers/net/ethernet/smsc/smsc9420.c 	BUG_ON(pd->tx_ring[index].status & TDES0_OWN_);
tx_ring           965 drivers/net/ethernet/smsc/smsc9420.c 	pd->tx_ring[index].buffer1 = mapping;
tx_ring           966 drivers/net/ethernet/smsc/smsc9420.c 	pd->tx_ring[index].length = tmp_desc1;
tx_ring           973 drivers/net/ethernet/smsc/smsc9420.c 	pd->tx_ring[index].status = TDES0_OWN_;
tx_ring          1183 drivers/net/ethernet/smsc/smsc9420.c 	BUG_ON(!pd->tx_ring);
tx_ring          1195 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[i].status = 0;
tx_ring          1196 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[i].length = 0;
tx_ring          1197 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[i].buffer1 = 0;
tx_ring          1198 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_ring[i].buffer2 = 0;
tx_ring          1200 drivers/net/ethernet/smsc/smsc9420.c 	pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_;
tx_ring          1578 drivers/net/ethernet/smsc/smsc9420.c 	pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
tx_ring          1658 drivers/net/ethernet/smsc/smsc9420.c 	BUG_ON(!pd->tx_ring);
tx_ring           817 drivers/net/ethernet/socionext/netsec.c 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
tx_ring           824 drivers/net/ethernet/socionext/netsec.c 	if (tx_ring->head >= tx_ring->tail)
tx_ring           825 drivers/net/ethernet/socionext/netsec.c 		filled = tx_ring->head - tx_ring->tail;
tx_ring           827 drivers/net/ethernet/socionext/netsec.c 		filled = tx_ring->head + DESC_NUM - tx_ring->tail;
tx_ring           861 drivers/net/ethernet/socionext/netsec.c 	netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
tx_ring           868 drivers/net/ethernet/socionext/netsec.c 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
tx_ring           875 drivers/net/ethernet/socionext/netsec.c 	spin_lock(&tx_ring->lock);
tx_ring           877 drivers/net/ethernet/socionext/netsec.c 	spin_unlock(&tx_ring->lock);
tx_ring          1743 drivers/net/ethernet/socionext/netsec.c 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
tx_ring          1750 drivers/net/ethernet/socionext/netsec.c 	spin_lock(&tx_ring->lock);
tx_ring          1760 drivers/net/ethernet/socionext/netsec.c 			tx_ring->xdp_xmit++;
tx_ring          1763 drivers/net/ethernet/socionext/netsec.c 	spin_unlock(&tx_ring->lock);
tx_ring          1766 drivers/net/ethernet/socionext/netsec.c 		netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
tx_ring          1767 drivers/net/ethernet/socionext/netsec.c 		tx_ring->xdp_xmit = 0;
tx_ring           168 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		xlgmac_free_ring(pdata, channel->tx_ring);
tx_ring           184 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ret = xlgmac_init_ring(pdata, channel->tx_ring,
tx_ring           218 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	kfree(pdata->channel_head->tx_ring);
tx_ring           219 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	pdata->channel_head->tx_ring = NULL;
tx_ring           233 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	struct xlgmac_ring *tx_ring, *rx_ring;
tx_ring           245 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
tx_ring           247 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (!tx_ring)
tx_ring           276 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			channel->tx_ring = tx_ring++;
tx_ring           284 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			  channel->tx_ring, channel->rx_ring);
tx_ring           295 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	kfree(tx_ring);
tx_ring           433 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring = channel->tx_ring;
tx_ring           501 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	struct xlgmac_ring *ring = channel->tx_ring;
tx_ring           506 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		if (!channel->tx_ring)
tx_ring           540 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		if (!channel->tx_ring)
tx_ring           563 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		if (!channel->tx_ring)
tx_ring           705 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	struct xlgmac_ring *ring = channel->tx_ring;
tx_ring          1058 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	struct xlgmac_ring *ring = channel->tx_ring;
tx_ring          1385 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		if (!channel->tx_ring)
tx_ring          1747 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		if (!channel->tx_ring)
tx_ring          1796 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		if (!channel->tx_ring)
tx_ring          2476 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		if (channel->tx_ring) {
tx_ring           216 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (channel->tx_ring && channel->rx_ring)
tx_ring           218 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		else if (channel->tx_ring)
tx_ring           238 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (channel->tx_ring && channel->rx_ring)
tx_ring           240 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		else if (channel->tx_ring)
tx_ring           391 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (!channel->tx_ring)
tx_ring           405 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (!channel->tx_ring)
tx_ring           529 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		ring = channel->tx_ring;
tx_ring           605 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (!channel->tx_ring)
tx_ring           718 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	ring = channel->tx_ring;
tx_ring          1037 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct xlgmac_ring *ring = channel->tx_ring;
tx_ring           377 drivers/net/ethernet/synopsys/dwc-xlgmac.h 	struct xlgmac_ring *tx_ring;
tx_ring           443 drivers/net/ethernet/via/via-rhine.c 	struct tx_desc *tx_ring;
tx_ring          1183 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
tx_ring          1199 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring = NULL;
tx_ring          1325 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].tx_status = 0;
tx_ring          1326 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
tx_ring          1328 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
tx_ring          1332 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
tx_ring          1344 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].tx_status = 0;
tx_ring          1345 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
tx_ring          1346 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
tx_ring          1817 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
tx_ring          1830 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
tx_ring          1833 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring[entry].desc_length =
tx_ring          1842 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
tx_ring          1844 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
tx_ring          1847 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].tx_status = 0;
tx_ring          1852 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
tx_ring          1944 drivers/net/ethernet/via/via-rhine.c 		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
tx_ring          1966 drivers/net/ethernet/via/via-rhine.c 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
tx_ring          2176 drivers/net/ethernet/via/via-rhine.c 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
tx_ring           155 drivers/net/hippi/rrunner.c 	rrpriv->tx_ring = tmpptr;
tx_ring           206 drivers/net/hippi/rrunner.c 	if (rrpriv->tx_ring)
tx_ring           207 drivers/net/hippi/rrunner.c 		pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
tx_ring           235 drivers/net/hippi/rrunner.c 	pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
tx_ring           597 drivers/net/hippi/rrunner.c 		rrpriv->tx_ring[i].size = 0;
tx_ring           598 drivers/net/hippi/rrunner.c 		set_rraddr(&rrpriv->tx_ring[i].addr, 0);
tx_ring          1065 drivers/net/hippi/rrunner.c 				desc = &(rrpriv->tx_ring[txcon]);
tx_ring          1078 drivers/net/hippi/rrunner.c 				set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
tx_ring          1111 drivers/net/hippi/rrunner.c 			struct tx_desc *desc = &(rrpriv->tx_ring[i]);
tx_ring          1283 drivers/net/hippi/rrunner.c 		printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
tx_ring          1296 drivers/net/hippi/rrunner.c 		       rrpriv->tx_ring[cons].mode,
tx_ring          1297 drivers/net/hippi/rrunner.c 		       rrpriv->tx_ring[cons].size,
tx_ring          1298 drivers/net/hippi/rrunner.c 		       (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
tx_ring          1304 drivers/net/hippi/rrunner.c 			printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
tx_ring          1312 drivers/net/hippi/rrunner.c 		       rrpriv->tx_ring[i].mode,
tx_ring          1313 drivers/net/hippi/rrunner.c 		       rrpriv->tx_ring[i].size,
tx_ring          1314 drivers/net/hippi/rrunner.c 		       (unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
tx_ring          1433 drivers/net/hippi/rrunner.c 	set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
tx_ring          1435 drivers/net/hippi/rrunner.c 	rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
tx_ring          1436 drivers/net/hippi/rrunner.c 	rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
tx_ring           803 drivers/net/hippi/rrunner.h 	struct tx_desc		*tx_ring;
tx_ring           196 drivers/net/thunderbolt.c 	struct tbnet_ring tx_ring;
tx_ring           376 drivers/net/thunderbolt.c 		tb_ring_stop(net->tx_ring.ring);
tx_ring           378 drivers/net/thunderbolt.c 		tbnet_free_buffers(&net->tx_ring);
tx_ring           514 drivers/net/thunderbolt.c 	struct tbnet_ring *ring = &net->tx_ring;
tx_ring           540 drivers/net/thunderbolt.c 	net->tx_ring.prod++;
tx_ring           542 drivers/net/thunderbolt.c 	if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
tx_ring           548 drivers/net/thunderbolt.c 	struct tbnet_ring *ring = &net->tx_ring;
tx_ring           606 drivers/net/thunderbolt.c 				      net->tx_ring.ring->hop);
tx_ring           612 drivers/net/thunderbolt.c 	tb_ring_start(net->tx_ring.ring);
tx_ring           631 drivers/net/thunderbolt.c 	tb_ring_stop(net->tx_ring.ring);
tx_ring           863 drivers/net/thunderbolt.c 	net->tx_ring.ring = ring;
tx_ring           873 drivers/net/thunderbolt.c 		tb_ring_free(net->tx_ring.ring);
tx_ring           874 drivers/net/thunderbolt.c 		net->tx_ring.ring = NULL;
tx_ring           896 drivers/net/thunderbolt.c 	tb_ring_free(net->tx_ring.ring);
tx_ring           897 drivers/net/thunderbolt.c 	net->tx_ring.ring = NULL;
tx_ring           906 drivers/net/thunderbolt.c 	struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
tx_ring          1028 drivers/net/thunderbolt.c 	if (tbnet_available_buffers(&net->tx_ring) < nframes) {
tx_ring          1125 drivers/net/thunderbolt.c 		tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
tx_ring          1139 drivers/net/thunderbolt.c 	net->tx_ring.cons -= frame_index;
tx_ring           177 drivers/net/tun.c 	struct ptr_ring tx_ring;
tx_ring           670 drivers/net/tun.c 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
tx_ring           727 drivers/net/tun.c 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
tx_ring           826 drivers/net/tun.c 	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
tx_ring          1109 drivers/net/tun.c 	if (ptr_ring_produce(&tfile->tx_ring, skb))
tx_ring          1308 drivers/net/tun.c 	spin_lock(&tfile->tx_ring.producer_lock);
tx_ring          1316 drivers/net/tun.c 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
tx_ring          1322 drivers/net/tun.c 	spin_unlock(&tfile->tx_ring.producer_lock);
tx_ring          1442 drivers/net/tun.c 	if (!ptr_ring_empty(&tfile->tx_ring))
tx_ring          2177 drivers/net/tun.c 	ptr = ptr_ring_consume(&tfile->tx_ring);
tx_ring          2189 drivers/net/tun.c 		ptr = ptr_ring_consume(&tfile->tx_ring);
tx_ring          2640 drivers/net/tun.c 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
tx_ring          3429 drivers/net/tun.c 	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
tx_ring          3629 drivers/net/tun.c 		rings[i] = &tfile->tx_ring;
tx_ring          3632 drivers/net/tun.c 		rings[i++] = &tfile->tx_ring;
tx_ring          3742 drivers/net/tun.c 	return &tfile->tx_ring;
tx_ring           336 drivers/net/vmxnet3/vmxnet3_drv.c 	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
tx_ring           337 drivers/net/vmxnet3/vmxnet3_drv.c 	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
tx_ring           343 drivers/net/vmxnet3/vmxnet3_drv.c 	VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
tx_ring           345 drivers/net/vmxnet3/vmxnet3_drv.c 	while (tq->tx_ring.next2comp != eop_idx) {
tx_ring           346 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
tx_ring           354 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
tx_ring           388 drivers/net/vmxnet3/vmxnet3_drv.c 			     vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
tx_ring           405 drivers/net/vmxnet3/vmxnet3_drv.c 	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
tx_ring           408 drivers/net/vmxnet3/vmxnet3_drv.c 		tbi = tq->buf_info + tq->tx_ring.next2comp;
tx_ring           415 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
tx_ring           419 drivers/net/vmxnet3/vmxnet3_drv.c 	for (i = 0; i < tq->tx_ring.size; i++) {
tx_ring           424 drivers/net/vmxnet3/vmxnet3_drv.c 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
tx_ring           425 drivers/net/vmxnet3/vmxnet3_drv.c 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tx_ring           436 drivers/net/vmxnet3/vmxnet3_drv.c 	if (tq->tx_ring.base) {
tx_ring           437 drivers/net/vmxnet3/vmxnet3_drv.c 		dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
tx_ring           439 drivers/net/vmxnet3/vmxnet3_drv.c 				  tq->tx_ring.base, tq->tx_ring.basePA);
tx_ring           440 drivers/net/vmxnet3/vmxnet3_drv.c 		tq->tx_ring.base = NULL;
tx_ring           456 drivers/net/vmxnet3/vmxnet3_drv.c 				  tq->tx_ring.size * sizeof(tq->buf_info[0]),
tx_ring           481 drivers/net/vmxnet3/vmxnet3_drv.c 	memset(tq->tx_ring.base, 0, tq->tx_ring.size *
tx_ring           483 drivers/net/vmxnet3/vmxnet3_drv.c 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tx_ring           484 drivers/net/vmxnet3/vmxnet3_drv.c 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
tx_ring           496 drivers/net/vmxnet3/vmxnet3_drv.c 	memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
tx_ring           497 drivers/net/vmxnet3/vmxnet3_drv.c 	for (i = 0; i < tq->tx_ring.size; i++)
tx_ring           510 drivers/net/vmxnet3/vmxnet3_drv.c 	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
tx_ring           513 drivers/net/vmxnet3/vmxnet3_drv.c 	tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tx_ring           514 drivers/net/vmxnet3/vmxnet3_drv.c 			tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
tx_ring           515 drivers/net/vmxnet3/vmxnet3_drv.c 			&tq->tx_ring.basePA, GFP_KERNEL);
tx_ring           516 drivers/net/vmxnet3/vmxnet3_drv.c 	if (!tq->tx_ring.base) {
tx_ring           537 drivers/net/vmxnet3/vmxnet3_drv.c 	sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
tx_ring           687 drivers/net/vmxnet3/vmxnet3_drv.c 	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
tx_ring           689 drivers/net/vmxnet3/vmxnet3_drv.c 	ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
tx_ring           695 drivers/net/vmxnet3/vmxnet3_drv.c 					tq->tx_ring.next2fill *
tx_ring           700 drivers/net/vmxnet3/vmxnet3_drv.c 		tbi = tq->buf_info + tq->tx_ring.next2fill;
tx_ring           705 drivers/net/vmxnet3/vmxnet3_drv.c 			tq->tx_ring.next2fill,
tx_ring           708 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
tx_ring           711 drivers/net/vmxnet3/vmxnet3_drv.c 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
tx_ring           728 drivers/net/vmxnet3/vmxnet3_drv.c 		tbi = tq->buf_info + tq->tx_ring.next2fill;
tx_ring           738 drivers/net/vmxnet3/vmxnet3_drv.c 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
tx_ring           739 drivers/net/vmxnet3/vmxnet3_drv.c 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
tx_ring           747 drivers/net/vmxnet3/vmxnet3_drv.c 			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
tx_ring           749 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
tx_ring           750 drivers/net/vmxnet3/vmxnet3_drv.c 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
tx_ring           763 drivers/net/vmxnet3/vmxnet3_drv.c 			tbi = tq->buf_info + tq->tx_ring.next2fill;
tx_ring           780 drivers/net/vmxnet3/vmxnet3_drv.c 			gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
tx_ring           781 drivers/net/vmxnet3/vmxnet3_drv.c 			BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
tx_ring           789 drivers/net/vmxnet3/vmxnet3_drv.c 				tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
tx_ring           791 drivers/net/vmxnet3/vmxnet3_drv.c 			vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
tx_ring           792 drivers/net/vmxnet3/vmxnet3_drv.c 			dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
tx_ring           803 drivers/net/vmxnet3/vmxnet3_drv.c 	tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
tx_ring           922 drivers/net/vmxnet3/vmxnet3_drv.c 					    tq->tx_ring.next2fill *
tx_ring           928 drivers/net/vmxnet3/vmxnet3_drv.c 		ctx->copy_size, tq->tx_ring.next2fill);
tx_ring          1054 drivers/net/vmxnet3/vmxnet3_drv.c 	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tx_ring          1059 drivers/net/vmxnet3/vmxnet3_drv.c 			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
tx_ring          1129 drivers/net/vmxnet3/vmxnet3_drv.c 		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
tx_ring          1138 drivers/net/vmxnet3/vmxnet3_drv.c 				       tq->tx_ring.next2fill);
tx_ring          2445 drivers/net/vmxnet3/vmxnet3_drv.c 		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
tx_ring          2447 drivers/net/vmxnet3/vmxnet3_drv.c 		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
tx_ring          2451 drivers/net/vmxnet3/vmxnet3_drv.c 		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
tx_ring          2570 drivers/net/vmxnet3/vmxnet3_drv.c 		adapter->tx_queue[0].tx_ring.size,
tx_ring          2828 drivers/net/vmxnet3/vmxnet3_drv.c 		tq->tx_ring.size   = tx_ring_size;
tx_ring           397 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA);
tx_ring           398 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA);
tx_ring           399 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = tq->tx_ring.size;
tx_ring           400 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = tq->tx_ring.next2fill;
tx_ring           401 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = tq->tx_ring.next2comp;
tx_ring           402 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = tq->tx_ring.gen;
tx_ring           234 drivers/net/vmxnet3/vmxnet3_int.h 	struct vmxnet3_cmd_ring         tx_ring;
tx_ring           312 drivers/net/wireless/admtek/adm8211.c 		u32 status = le32_to_cpu(priv->tx_ring[entry].status);
tx_ring          1468 drivers/net/wireless/admtek/adm8211.c 		desc = &priv->tx_ring[i];
tx_ring          1655 drivers/net/wireless/admtek/adm8211.c 	priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
tx_ring          1659 drivers/net/wireless/admtek/adm8211.c 	priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len);
tx_ring          1663 drivers/net/wireless/admtek/adm8211.c 	priv->tx_ring[entry].status = cpu_to_le32(flag);
tx_ring          1758 drivers/net/wireless/admtek/adm8211.c 	priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
tx_ring           542 drivers/net/wireless/admtek/adm8211.h 	struct adm8211_desc *tx_ring;
tx_ring           345 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];
tx_ring           474 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	ring = &priv->tx_ring[prio];
tx_ring           862 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				  priv->tx_ring[1].dma);
tx_ring           864 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				  priv->tx_ring[0].dma);
tx_ring           867 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				  priv->tx_ring[4].dma);
tx_ring           869 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				  priv->tx_ring[0].dma);
tx_ring           871 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				  priv->tx_ring[1].dma);
tx_ring           873 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				  priv->tx_ring[2].dma);
tx_ring           875 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				  priv->tx_ring[3].dma);
tx_ring          1084 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	priv->tx_ring[prio].desc = ring;
tx_ring          1085 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	priv->tx_ring[prio].dma = dma;
tx_ring          1086 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	priv->tx_ring[prio].idx = 0;
tx_ring          1087 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	priv->tx_ring[prio].entries = entries;
tx_ring          1088 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	skb_queue_head_init(&priv->tx_ring[prio].queue);
tx_ring          1100 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];
tx_ring          1238 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		if (priv->tx_ring[i].desc)
tx_ring           118 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h 	struct rtl8180_tx_ring tx_ring[RTL818X_NR_TX_QUEUES];
tx_ring          1855 drivers/net/wireless/realtek/rtlwifi/core.c 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
tx_ring           494 drivers/net/wireless/realtek/rtlwifi/pci.c 		struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring           531 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
tx_ring          1088 drivers/net/wireless/realtek/rtlwifi/pci.c 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
tx_ring          1231 drivers/net/wireless/realtek/rtlwifi/pci.c 		rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
tx_ring          1232 drivers/net/wireless/realtek/rtlwifi/pci.c 		rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
tx_ring          1234 drivers/net/wireless/realtek/rtlwifi/pci.c 		rtlpci->tx_ring[prio].cur_tx_rp = 0;
tx_ring          1235 drivers/net/wireless/realtek/rtlwifi/pci.c 		rtlpci->tx_ring[prio].cur_tx_wp = 0;
tx_ring          1247 drivers/net/wireless/realtek/rtlwifi/pci.c 	rtlpci->tx_ring[prio].desc = desc;
tx_ring          1248 drivers/net/wireless/realtek/rtlwifi/pci.c 	rtlpci->tx_ring[prio].dma = desc_dma;
tx_ring          1250 drivers/net/wireless/realtek/rtlwifi/pci.c 	rtlpci->tx_ring[prio].idx = 0;
tx_ring          1251 drivers/net/wireless/realtek/rtlwifi/pci.c 	rtlpci->tx_ring[prio].entries = entries;
tx_ring          1252 drivers/net/wireless/realtek/rtlwifi/pci.c 	skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
tx_ring          1338 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
tx_ring          1435 drivers/net/wireless/realtek/rtlwifi/pci.c 		if (rtlpci->tx_ring[i].desc ||
tx_ring          1436 drivers/net/wireless/realtek/rtlwifi/pci.c 		    rtlpci->tx_ring[i].buffer_desc)
tx_ring          1516 drivers/net/wireless/realtek/rtlwifi/pci.c 		if (rtlpci->tx_ring[i].desc ||
tx_ring          1517 drivers/net/wireless/realtek/rtlwifi/pci.c 		    rtlpci->tx_ring[i].buffer_desc) {
tx_ring          1518 drivers/net/wireless/realtek/rtlwifi/pci.c 			struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
tx_ring          1542 drivers/net/wireless/realtek/rtlwifi/pci.c 				rtlpci->tx_ring[i].cur_tx_rp = 0;
tx_ring          1543 drivers/net/wireless/realtek/rtlwifi/pci.c 				rtlpci->tx_ring[i].cur_tx_wp = 0;
tx_ring          1633 drivers/net/wireless/realtek/rtlwifi/pci.c 	ring = &rtlpci->tx_ring[hw_queue];
tx_ring          1724 drivers/net/wireless/realtek/rtlwifi/pci.c 		ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           179 drivers/net/wireless/realtek/rtlwifi/pci.h 	struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
tx_ring            70 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
tx_ring           186 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		ring = &rtlpci->tx_ring[queue];
tx_ring           891 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 			((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
tx_ring           894 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 			(u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
tx_ring           897 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 			(u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           899 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 			(u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           901 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 			(u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           903 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 			(u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           905 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 			(u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
tx_ring          2225 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring          2269 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           813 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring           737 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c 			((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
tx_ring           740 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c 			(u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
tx_ring           743 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c 			(u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           745 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c 			(u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           747 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c 			(u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           749 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c 			(u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           751 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c 			(u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
tx_ring           495 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 				ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           711 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring           418 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           461 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           465 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
tx_ring           734 drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c 			rtlpci->tx_ring[BEACON_QUEUE].dma);
tx_ring           735 drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c 	rtl_write_dword(rtlpriv, REG_MGQ_DESA, rtlpci->tx_ring[MGNT_QUEUE].dma);
tx_ring           736 drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c 	rtl_write_dword(rtlpriv, REG_VOQ_DESA, rtlpci->tx_ring[VO_QUEUE].dma);
tx_ring           737 drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c 	rtl_write_dword(rtlpriv, REG_VIQ_DESA, rtlpci->tx_ring[VI_QUEUE].dma);
tx_ring           738 drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c 	rtl_write_dword(rtlpriv, REG_BEQ_DESA, rtlpci->tx_ring[BE_QUEUE].dma);
tx_ring           739 drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c 	rtl_write_dword(rtlpriv, REG_BKQ_DESA, rtlpci->tx_ring[BK_QUEUE].dma);
tx_ring           740 drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c 	rtl_write_dword(rtlpriv, REG_HQ_DESA, rtlpci->tx_ring[HIGH_QUEUE].dma);
tx_ring          3115 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           830 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring           164 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 		ring = &rtlpci->tx_ring[queue];
tx_ring           826 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			((u64)rtlpci->tx_ring[BEACON_QUEUE].buffer_desc_dma) >>
tx_ring           829 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[MGNT_QUEUE].buffer_desc_dma >> 32);
tx_ring           831 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[VO_QUEUE].buffer_desc_dma >> 32);
tx_ring           833 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[VI_QUEUE].buffer_desc_dma >> 32);
tx_ring           835 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[BE_QUEUE].buffer_desc_dma >> 32);
tx_ring           837 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[BK_QUEUE].buffer_desc_dma >> 32);
tx_ring           839 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[HIGH_QUEUE].buffer_desc_dma >> 32);
tx_ring           848 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			((u64)rtlpci->tx_ring[BEACON_QUEUE].buffer_desc_dma) &
tx_ring           851 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[MGNT_QUEUE].buffer_desc_dma &
tx_ring           854 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[VO_QUEUE].buffer_desc_dma &
tx_ring           857 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[VI_QUEUE].buffer_desc_dma &
tx_ring           861 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[BE_QUEUE].buffer_desc_dma &
tx_ring           867 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[BK_QUEUE].buffer_desc_dma &
tx_ring           870 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 			(u64)rtlpci->tx_ring[HIGH_QUEUE].buffer_desc_dma &
tx_ring          3095 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring          3139 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           575 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	current_bd_desc = rtlpci->tx_ring[queue_index].cur_tx_wp;
tx_ring           595 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	desc_dma_addr = rtlpci->tx_ring[queue_index].dma +
tx_ring           909 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 			struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx];
tx_ring          1010 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring           119 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	ring = &rtlpci->tx_ring[TXCMD_QUEUE];
tx_ring           699 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, TBKDA, rtlpci->tx_ring[BK_QUEUE].dma);
tx_ring           700 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, TBEDA, rtlpci->tx_ring[BE_QUEUE].dma);
tx_ring           701 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, TVIDA, rtlpci->tx_ring[VI_QUEUE].dma);
tx_ring           702 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, TVODA, rtlpci->tx_ring[VO_QUEUE].dma);
tx_ring           703 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, TBDA, rtlpci->tx_ring[BEACON_QUEUE].dma);
tx_ring           704 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, TCDA, rtlpci->tx_ring[TXCMD_QUEUE].dma);
tx_ring           705 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, TMDA, rtlpci->tx_ring[MGNT_QUEUE].dma);
tx_ring           706 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, THPDA, rtlpci->tx_ring[HIGH_QUEUE].dma);
tx_ring           707 drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c 	rtl_write_dword(rtlpriv, HDA, rtlpci->tx_ring[HCCA_QUEUE].dma);
tx_ring           583 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 				ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           218 drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring           737 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c 			((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
tx_ring           740 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c 			(u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
tx_ring           743 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c 			(u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           745 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c 			(u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           747 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c 			(u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           749 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c 			(u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           751 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c 			(u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
tx_ring          1629 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           672 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring            32 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
tx_ring           189 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		ring = &rtlpci->tx_ring[queue];
tx_ring           884 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 			((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
tx_ring           887 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 			(u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
tx_ring           890 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 			(u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           892 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 			(u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           894 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 			(u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           896 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 			(u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           898 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 			(u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
tx_ring          2554 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring          2603 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           725 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring           229 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
tx_ring            28 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
tx_ring           187 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		ring = &rtlpci->tx_ring[queue];
tx_ring           977 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[BEACON_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           979 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           981 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           983 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           985 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           987 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring           989 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring          1427 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[BEACON_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring          1429 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring          1431 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring          1433 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring          1435 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring          1437 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring          1439 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 			rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32));
tx_ring          4769 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
tx_ring           965 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
tx_ring            86 drivers/net/wireless/realtek/rtw88/pci.c static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
tx_ring            88 drivers/net/wireless/realtek/rtw88/pci.c 	int offset = tx_ring->r.desc_size * idx;
tx_ring            90 drivers/net/wireless/realtek/rtw88/pci.c 	return tx_ring->r.head + offset;
tx_ring            94 drivers/net/wireless/realtek/rtw88/pci.c 				      struct rtw_pci_tx_ring *tx_ring)
tx_ring           102 drivers/net/wireless/realtek/rtw88/pci.c 	skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
tx_ring           103 drivers/net/wireless/realtek/rtw88/pci.c 		__skb_unlink(skb, &tx_ring->queue);
tx_ring           113 drivers/net/wireless/realtek/rtw88/pci.c 				 struct rtw_pci_tx_ring *tx_ring)
tx_ring           116 drivers/net/wireless/realtek/rtw88/pci.c 	u8 *head = tx_ring->r.head;
tx_ring           117 drivers/net/wireless/realtek/rtw88/pci.c 	u32 len = tx_ring->r.len;
tx_ring           118 drivers/net/wireless/realtek/rtw88/pci.c 	int ring_sz = len * tx_ring->r.desc_size;
tx_ring           120 drivers/net/wireless/realtek/rtw88/pci.c 	rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
tx_ring           123 drivers/net/wireless/realtek/rtw88/pci.c 	pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
tx_ring           124 drivers/net/wireless/realtek/rtw88/pci.c 	tx_ring->r.head = NULL;
tx_ring           163 drivers/net/wireless/realtek/rtw88/pci.c 	struct rtw_pci_tx_ring *tx_ring;
tx_ring           168 drivers/net/wireless/realtek/rtw88/pci.c 		tx_ring = &rtwpci->tx_rings[i];
tx_ring           169 drivers/net/wireless/realtek/rtw88/pci.c 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
tx_ring           179 drivers/net/wireless/realtek/rtw88/pci.c 				struct rtw_pci_tx_ring *tx_ring,
tx_ring           193 drivers/net/wireless/realtek/rtw88/pci.c 	skb_queue_head_init(&tx_ring->queue);
tx_ring           194 drivers/net/wireless/realtek/rtw88/pci.c 	tx_ring->r.head = head;
tx_ring           195 drivers/net/wireless/realtek/rtw88/pci.c 	tx_ring->r.dma = dma;
tx_ring           196 drivers/net/wireless/realtek/rtw88/pci.c 	tx_ring->r.len = len;
tx_ring           197 drivers/net/wireless/realtek/rtw88/pci.c 	tx_ring->r.desc_size = desc_size;
tx_ring           198 drivers/net/wireless/realtek/rtw88/pci.c 	tx_ring->r.wp = 0;
tx_ring           199 drivers/net/wireless/realtek/rtw88/pci.c 	tx_ring->r.rp = 0;
tx_ring           313 drivers/net/wireless/realtek/rtw88/pci.c 	struct rtw_pci_tx_ring *tx_ring;
tx_ring           324 drivers/net/wireless/realtek/rtw88/pci.c 		tx_ring = &rtwpci->tx_rings[i];
tx_ring           326 drivers/net/wireless/realtek/rtw88/pci.c 		ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
tx_ring           346 drivers/net/wireless/realtek/rtw88/pci.c 		tx_ring = &rtwpci->tx_rings[i];
tx_ring           347 drivers/net/wireless/realtek/rtw88/pci.c 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
tx_ring           505 drivers/net/wireless/realtek/rtw88/pci.c 	struct rtw_pci_tx_ring *tx_ring;
tx_ring           509 drivers/net/wireless/realtek/rtw88/pci.c 		tx_ring = &rtwpci->tx_rings[queue];
tx_ring           510 drivers/net/wireless/realtek/rtw88/pci.c 		rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
tx_ring            29 drivers/net/xen-netback/xenbus.c 	struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
tx_ring            33 drivers/net/xen-netback/xenbus.c 	if (tx_ring->sring) {
tx_ring            34 drivers/net/xen-netback/xenbus.c 		struct xen_netif_tx_sring *sring = tx_ring->sring;
tx_ring            37 drivers/net/xen-netback/xenbus.c 			   tx_ring->nr_ents);
tx_ring            41 drivers/net/xen-netback/xenbus.c 			   tx_ring->req_cons,
tx_ring            42 drivers/net/xen-netback/xenbus.c 			   tx_ring->req_cons - sring->rsp_prod,
tx_ring            47 drivers/net/xen-netback/xenbus.c 			   tx_ring->rsp_prod_pvt,
tx_ring            48 drivers/net/xen-netback/xenbus.c 			   tx_ring->rsp_prod_pvt - sring->rsp_prod,
tx_ring           149 drivers/staging/mt7621-dma/mtk-hsdma.c 	struct hsdma_desc *tx_ring;
tx_ring           243 drivers/staging/mt7621-dma/mtk-hsdma.c 		tx_desc = &chan->tx_ring[i];
tx_ring           266 drivers/staging/mt7621-dma/mtk-hsdma.c 		chan->tx_ring[i].addr0 = 0;
tx_ring           267 drivers/staging/mt7621-dma/mtk-hsdma.c 		chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
tx_ring           332 drivers/staging/mt7621-dma/mtk-hsdma.c 		tx_desc = &chan->tx_ring[chan->tx_idx];
tx_ring           550 drivers/staging/mt7621-dma/mtk-hsdma.c 	chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
tx_ring           551 drivers/staging/mt7621-dma/mtk-hsdma.c 			2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
tx_ring           553 drivers/staging/mt7621-dma/mtk-hsdma.c 	if (!chan->tx_ring)
tx_ring           556 drivers/staging/mt7621-dma/mtk-hsdma.c 	chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
tx_ring           560 drivers/staging/mt7621-dma/mtk-hsdma.c 		chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
tx_ring           570 drivers/staging/mt7621-dma/mtk-hsdma.c 	if (chan->tx_ring) {
tx_ring           572 drivers/staging/mt7621-dma/mtk-hsdma.c 				2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
tx_ring           573 drivers/staging/mt7621-dma/mtk-hsdma.c 				chan->tx_ring, chan->desc_addr);
tx_ring           574 drivers/staging/mt7621-dma/mtk-hsdma.c 		chan->tx_ring = NULL;
tx_ring          2116 drivers/staging/qlge/qlge.h 	struct tx_ring tx_ring[MAX_TX_RINGS];
tx_ring          2307 drivers/staging/qlge/qlge.h void ql_dump_tx_ring(struct tx_ring *tx_ring);
tx_ring          2314 drivers/staging/qlge/qlge.h #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
tx_ring          2322 drivers/staging/qlge/qlge.h #define QL_DUMP_TX_RING(tx_ring)
tx_ring          1626 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
tx_ring          1651 drivers/staging/qlge/qlge_dbg.c void ql_dump_tx_ring(struct tx_ring *tx_ring)
tx_ring          1653 drivers/staging/qlge/qlge_dbg.c 	if (tx_ring == NULL)
tx_ring          1656 drivers/staging/qlge/qlge_dbg.c 	       tx_ring->wq_id);
tx_ring          1657 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
tx_ring          1659 drivers/staging/qlge/qlge_dbg.c 	       (unsigned long long) tx_ring->wq_base_dma);
tx_ring          1661 drivers/staging/qlge/qlge_dbg.c 	       tx_ring->cnsmr_idx_sh_reg,
tx_ring          1662 drivers/staging/qlge/qlge_dbg.c 	       tx_ring->cnsmr_idx_sh_reg
tx_ring          1663 drivers/staging/qlge/qlge_dbg.c 			? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
tx_ring          1664 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
tx_ring          1665 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
tx_ring          1666 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
tx_ring          1667 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
tx_ring          1668 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
tx_ring          1669 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
tx_ring          1670 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
tx_ring          1671 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->q = %p\n", tx_ring->q);
tx_ring          1672 drivers/staging/qlge/qlge_dbg.c 	pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
tx_ring          2016 drivers/staging/qlge/qlge_dbg.c 		QL_DUMP_TX_RING(&qdev->tx_ring[i]);
tx_ring          2017 drivers/staging/qlge/qlge_dbg.c 		QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
tx_ring          2107 drivers/staging/qlge/qlge_main.c 	struct tx_ring *tx_ring;
tx_ring          2111 drivers/staging/qlge/qlge_main.c 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring          2112 drivers/staging/qlge/qlge_main.c 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
tx_ring          2114 drivers/staging/qlge/qlge_main.c 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
tx_ring          2115 drivers/staging/qlge/qlge_main.c 	tx_ring->tx_packets++;
tx_ring          2140 drivers/staging/qlge/qlge_main.c 	atomic_inc(&tx_ring->tx_count);
tx_ring          2209 drivers/staging/qlge/qlge_main.c 	struct tx_ring *tx_ring;
tx_ring          2237 drivers/staging/qlge/qlge_main.c 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
tx_ring          2238 drivers/staging/qlge/qlge_main.c 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
tx_ring          2239 drivers/staging/qlge/qlge_main.c 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
tx_ring          2244 drivers/staging/qlge/qlge_main.c 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
tx_ring          2640 drivers/staging/qlge/qlge_main.c 	struct tx_ring *tx_ring;
tx_ring          2643 drivers/staging/qlge/qlge_main.c 	tx_ring = &qdev->tx_ring[tx_ring_idx];
tx_ring          2648 drivers/staging/qlge/qlge_main.c 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
tx_ring          2652 drivers/staging/qlge/qlge_main.c 		netif_stop_subqueue(ndev, tx_ring->wq_id);
tx_ring          2653 drivers/staging/qlge/qlge_main.c 		tx_ring->tx_errors++;
tx_ring          2656 drivers/staging/qlge/qlge_main.c 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
tx_ring          2688 drivers/staging/qlge/qlge_main.c 		tx_ring->tx_errors++;
tx_ring          2692 drivers/staging/qlge/qlge_main.c 	tx_ring->prod_idx++;
tx_ring          2693 drivers/staging/qlge/qlge_main.c 	if (tx_ring->prod_idx == tx_ring->wq_len)
tx_ring          2694 drivers/staging/qlge/qlge_main.c 		tx_ring->prod_idx = 0;
tx_ring          2697 drivers/staging/qlge/qlge_main.c 	ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
tx_ring          2700 drivers/staging/qlge/qlge_main.c 		     tx_ring->prod_idx, skb->len);
tx_ring          2702 drivers/staging/qlge/qlge_main.c 	atomic_dec(&tx_ring->tx_count);
tx_ring          2704 drivers/staging/qlge/qlge_main.c 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
tx_ring          2705 drivers/staging/qlge/qlge_main.c 		netif_stop_subqueue(ndev, tx_ring->wq_id);
tx_ring          2706 drivers/staging/qlge/qlge_main.c 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
tx_ring          2711 drivers/staging/qlge/qlge_main.c 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
tx_ring          2764 drivers/staging/qlge/qlge_main.c static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
tx_ring          2770 drivers/staging/qlge/qlge_main.c 	mac_iocb_ptr = tx_ring->wq_base;
tx_ring          2771 drivers/staging/qlge/qlge_main.c 	tx_ring_desc = tx_ring->q;
tx_ring          2772 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring          2779 drivers/staging/qlge/qlge_main.c 	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
tx_ring          2783 drivers/staging/qlge/qlge_main.c 				 struct tx_ring *tx_ring)
tx_ring          2785 drivers/staging/qlge/qlge_main.c 	if (tx_ring->wq_base) {
tx_ring          2786 drivers/staging/qlge/qlge_main.c 		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring          2787 drivers/staging/qlge/qlge_main.c 				    tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring          2788 drivers/staging/qlge/qlge_main.c 		tx_ring->wq_base = NULL;
tx_ring          2790 drivers/staging/qlge/qlge_main.c 	kfree(tx_ring->q);
tx_ring          2791 drivers/staging/qlge/qlge_main.c 	tx_ring->q = NULL;
tx_ring          2795 drivers/staging/qlge/qlge_main.c 				 struct tx_ring *tx_ring)
tx_ring          2797 drivers/staging/qlge/qlge_main.c 	tx_ring->wq_base =
tx_ring          2798 drivers/staging/qlge/qlge_main.c 	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring          2799 drivers/staging/qlge/qlge_main.c 				 &tx_ring->wq_base_dma);
tx_ring          2801 drivers/staging/qlge/qlge_main.c 	if ((tx_ring->wq_base == NULL) ||
tx_ring          2802 drivers/staging/qlge/qlge_main.c 	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
tx_ring          2805 drivers/staging/qlge/qlge_main.c 	tx_ring->q =
tx_ring          2806 drivers/staging/qlge/qlge_main.c 	    kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
tx_ring          2808 drivers/staging/qlge/qlge_main.c 	if (tx_ring->q == NULL)
tx_ring          2813 drivers/staging/qlge/qlge_main.c 	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring          2814 drivers/staging/qlge/qlge_main.c 			    tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring          2815 drivers/staging/qlge/qlge_main.c 	tx_ring->wq_base = NULL;
tx_ring          3055 drivers/staging/qlge/qlge_main.c 	struct tx_ring *tx_ring;
tx_ring          3064 drivers/staging/qlge/qlge_main.c 		tx_ring = &qdev->tx_ring[j];
tx_ring          3065 drivers/staging/qlge/qlge_main.c 		for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring          3066 drivers/staging/qlge/qlge_main.c 			tx_ring_desc = &tx_ring->q[i];
tx_ring          3086 drivers/staging/qlge/qlge_main.c 		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
tx_ring          3109 drivers/staging/qlge/qlge_main.c 		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
tx_ring          3258 drivers/staging/qlge/qlge_main.c static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
tx_ring          3260 drivers/staging/qlge/qlge_main.c 	struct wqicb *wqicb = (struct wqicb *)tx_ring;
tx_ring          3262 drivers/staging/qlge/qlge_main.c 	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
tx_ring          3264 drivers/staging/qlge/qlge_main.c 	    (tx_ring->wq_id * sizeof(u64));
tx_ring          3266 drivers/staging/qlge/qlge_main.c 	    (tx_ring->wq_id * sizeof(u64));
tx_ring          3273 drivers/staging/qlge/qlge_main.c 	tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
tx_ring          3274 drivers/staging/qlge/qlge_main.c 	tx_ring->prod_idx = 0;
tx_ring          3276 drivers/staging/qlge/qlge_main.c 	tx_ring->valid_db_reg = doorbell_area + 0x04;
tx_ring          3281 drivers/staging/qlge/qlge_main.c 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
tx_ring          3282 drivers/staging/qlge/qlge_main.c 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
tx_ring          3284 drivers/staging/qlge/qlge_main.c 	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
tx_ring          3287 drivers/staging/qlge/qlge_main.c 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
tx_ring          3289 drivers/staging/qlge/qlge_main.c 	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
tx_ring          3291 drivers/staging/qlge/qlge_main.c 	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
tx_ring          3293 drivers/staging/qlge/qlge_main.c 	ql_init_tx_ring(qdev, tx_ring);
tx_ring          3296 drivers/staging/qlge/qlge_main.c 			   (u16) tx_ring->wq_id);
tx_ring          3834 drivers/staging/qlge/qlge_main.c 		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
tx_ring          4101 drivers/staging/qlge/qlge_main.c 	struct tx_ring *tx_ring;
tx_ring          4123 drivers/staging/qlge/qlge_main.c 		tx_ring = &qdev->tx_ring[i];
tx_ring          4124 drivers/staging/qlge/qlge_main.c 		memset((void *)tx_ring, 0, sizeof(*tx_ring));
tx_ring          4125 drivers/staging/qlge/qlge_main.c 		tx_ring->qdev = qdev;
tx_ring          4126 drivers/staging/qlge/qlge_main.c 		tx_ring->wq_id = i;
tx_ring          4127 drivers/staging/qlge/qlge_main.c 		tx_ring->wq_len = qdev->tx_ring_size;
tx_ring          4128 drivers/staging/qlge/qlge_main.c 		tx_ring->wq_size =
tx_ring          4129 drivers/staging/qlge/qlge_main.c 		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
tx_ring          4135 drivers/staging/qlge/qlge_main.c 		tx_ring->cq_id = qdev->rss_ring_count + i;
tx_ring          4294 drivers/staging/qlge/qlge_main.c 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
tx_ring          4315 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
tx_ring          4316 drivers/staging/qlge/qlge_main.c 			pkts += tx_ring->tx_packets;
tx_ring          4317 drivers/staging/qlge/qlge_main.c 			bytes += tx_ring->tx_bytes;
tx_ring          4318 drivers/staging/qlge/qlge_main.c 			errors += tx_ring->tx_errors;
tx_ring          2248 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 		rtl92e_writel(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
tx_ring          1479 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 				ring = &priv->tx_ring[QueueID];
tx_ring          1508 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 				ring = &priv->tx_ring[QueueID];
tx_ring           263 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
tx_ring           527 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring = &priv->tx_ring[BEACON_QUEUE];
tx_ring          1101 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		if (skb_queue_len(&(&priv->tx_ring[i])->queue) > 0) {
tx_ring          1103 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			       i, skb_queue_len(&(&priv->tx_ring[i])->queue));
tx_ring          1136 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		ring = &priv->tx_ring[QueueID];
tx_ring          1580 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
tx_ring          1667 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
tx_ring          1699 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring = &priv->tx_ring[TXCMD_QUEUE];
tx_ring          1749 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring = &priv->tx_ring[tcb_desc->queue_index];
tx_ring          1841 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	priv->tx_ring[prio].desc = ring;
tx_ring          1842 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	priv->tx_ring[prio].dma = dma;
tx_ring          1843 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	priv->tx_ring[prio].idx = 0;
tx_ring          1844 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	priv->tx_ring[prio].entries = entries;
tx_ring          1845 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	skb_queue_head_init(&priv->tx_ring[prio].queue);
tx_ring          1876 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		if (priv->tx_ring[i].desc)
tx_ring          1901 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		if (priv->tx_ring[i].desc) {
tx_ring          1902 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			struct rtl8192_tx_ring *ring = &priv->tx_ring[i];
tx_ring           258 drivers/staging/rtl8192e/rtl8192e/rtl_core.h 	struct tx_ring *next;
tx_ring           388 drivers/staging/rtl8192e/rtl8192e/rtl_core.h 	struct rtl8192_tx_ring tx_ring[MAX_TX_QUEUE_COUNT];
tx_ring           139 include/linux/fs_enet_pd.h 	int rx_ring, tx_ring;	/* number of buffers on rx     */
tx_ring           165 net/packet/af_packet.c 		int closing, int tx_ring);
tx_ring          1185 net/packet/af_packet.c 	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
tx_ring          1186 net/packet/af_packet.c 	if (unlikely(po->tx_ring.pending_refcnt == NULL))
tx_ring          1194 net/packet/af_packet.c 	free_percpu(po->tx_ring.pending_refcnt);
tx_ring          2425 net/packet/af_packet.c 	if (likely(po->tx_ring.pg_vec)) {
tx_ring          2430 net/packet/af_packet.c 		packet_dec_pending(&po->tx_ring);
tx_ring          2435 net/packet/af_packet.c 		if (!packet_read_pending(&po->tx_ring))
tx_ring          2583 net/packet/af_packet.c 		off_max = po->tx_ring.frame_size - tp_len;
tx_ring          2643 net/packet/af_packet.c 	if (unlikely(!po->tx_ring.pg_vec)) {
tx_ring          2684 net/packet/af_packet.c 	size_max = po->tx_ring.frame_size
tx_ring          2693 net/packet/af_packet.c 		ph = packet_current_frame(po, &po->tx_ring,
tx_ring          2753 net/packet/af_packet.c 				packet_increment_head(&po->tx_ring);
tx_ring          2773 net/packet/af_packet.c 		packet_inc_pending(&po->tx_ring);
tx_ring          2791 net/packet/af_packet.c 		packet_increment_head(&po->tx_ring);
tx_ring          2800 net/packet/af_packet.c 		 (need_wait && packet_read_pending(&po->tx_ring))));
tx_ring          3000 net/packet/af_packet.c 	if (po->tx_ring.pg_vec)
tx_ring          3051 net/packet/af_packet.c 	if (po->tx_ring.pg_vec) {
tx_ring          3743 net/packet/af_packet.c 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
tx_ring          3763 net/packet/af_packet.c 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
tx_ring          3782 net/packet/af_packet.c 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
tx_ring          3831 net/packet/af_packet.c 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
tx_ring          3894 net/packet/af_packet.c 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
tx_ring          4179 net/packet/af_packet.c 	if (po->tx_ring.pg_vec) {
tx_ring          4180 net/packet/af_packet.c 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
tx_ring          4286 net/packet/af_packet.c 		int closing, int tx_ring)
tx_ring          4299 net/packet/af_packet.c 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
tx_ring          4300 net/packet/af_packet.c 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
tx_ring          4362 net/packet/af_packet.c 			if (!tx_ring) {
tx_ring          4376 net/packet/af_packet.c 			if (!tx_ring) {
tx_ring          4439 net/packet/af_packet.c 		if (!tx_ring)
tx_ring          4468 net/packet/af_packet.c 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
tx_ring          4484 net/packet/af_packet.c 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
tx_ring           106 net/packet/diag.c 		ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
tx_ring           114 net/packet/internal.h 	struct packet_ring_buffer	tx_ring;