Lines Matching refs:tx_ring
80 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) in i40evf_clean_tx_ring() argument
86 if (!tx_ring->tx_bi) in i40evf_clean_tx_ring()
90 for (i = 0; i < tx_ring->count; i++) in i40evf_clean_tx_ring()
91 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in i40evf_clean_tx_ring()
93 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_clean_tx_ring()
94 memset(tx_ring->tx_bi, 0, bi_size); in i40evf_clean_tx_ring()
97 memset(tx_ring->desc, 0, tx_ring->size); in i40evf_clean_tx_ring()
99 tx_ring->next_to_use = 0; in i40evf_clean_tx_ring()
100 tx_ring->next_to_clean = 0; in i40evf_clean_tx_ring()
102 if (!tx_ring->netdev) in i40evf_clean_tx_ring()
106 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in i40evf_clean_tx_ring()
107 tx_ring->queue_index)); in i40evf_clean_tx_ring()
116 void i40evf_free_tx_resources(struct i40e_ring *tx_ring) in i40evf_free_tx_resources() argument
118 i40evf_clean_tx_ring(tx_ring); in i40evf_free_tx_resources()
119 kfree(tx_ring->tx_bi); in i40evf_free_tx_resources()
120 tx_ring->tx_bi = NULL; in i40evf_free_tx_resources()
122 if (tx_ring->desc) { in i40evf_free_tx_resources()
123 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40evf_free_tx_resources()
124 tx_ring->desc, tx_ring->dma); in i40evf_free_tx_resources()
125 tx_ring->desc = NULL; in i40evf_free_tx_resources()
136 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) in i40e_get_head() argument
138 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; in i40e_get_head()
152 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) in i40e_clean_tx_irq() argument
154 u16 i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
161 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
162 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
163 i -= tx_ring->count; in i40e_clean_tx_irq()
165 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
192 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
208 i -= tx_ring->count; in i40e_clean_tx_irq()
209 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
210 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
215 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
228 i -= tx_ring->count; in i40e_clean_tx_irq()
229 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
230 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
239 i += tx_ring->count; in i40e_clean_tx_irq()
240 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
241 u64_stats_update_begin(&tx_ring->syncp); in i40e_clean_tx_irq()
242 tx_ring->stats.bytes += total_bytes; in i40e_clean_tx_irq()
243 tx_ring->stats.packets += total_packets; in i40e_clean_tx_irq()
244 u64_stats_update_end(&tx_ring->syncp); in i40e_clean_tx_irq()
245 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_clean_tx_irq()
246 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_clean_tx_irq()
254 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && in i40e_clean_tx_irq()
255 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) in i40e_clean_tx_irq()
256 tx_ring->arm_wb = true; in i40e_clean_tx_irq()
258 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, in i40e_clean_tx_irq()
259 tx_ring->queue_index), in i40e_clean_tx_irq()
263 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
264 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
269 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
270 tx_ring->queue_index) && in i40e_clean_tx_irq()
271 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { in i40e_clean_tx_irq()
272 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
273 tx_ring->queue_index); in i40e_clean_tx_irq()
274 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
423 int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40evf_setup_tx_descriptors() argument
425 struct device *dev = tx_ring->dev; in i40evf_setup_tx_descriptors()
432 WARN_ON(tx_ring->tx_bi); in i40evf_setup_tx_descriptors()
433 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_setup_tx_descriptors()
434 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40evf_setup_tx_descriptors()
435 if (!tx_ring->tx_bi) in i40evf_setup_tx_descriptors()
439 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40evf_setup_tx_descriptors()
443 tx_ring->size += sizeof(u32); in i40evf_setup_tx_descriptors()
444 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40evf_setup_tx_descriptors()
445 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40evf_setup_tx_descriptors()
446 &tx_ring->dma, GFP_KERNEL); in i40evf_setup_tx_descriptors()
447 if (!tx_ring->desc) { in i40evf_setup_tx_descriptors()
449 tx_ring->size); in i40evf_setup_tx_descriptors()
453 tx_ring->next_to_use = 0; in i40evf_setup_tx_descriptors()
454 tx_ring->next_to_clean = 0; in i40evf_setup_tx_descriptors()
458 kfree(tx_ring->tx_bi); in i40evf_setup_tx_descriptors()
459 tx_ring->tx_bi = NULL; in i40evf_setup_tx_descriptors()
1394 struct i40e_ring *tx_ring, in i40evf_tx_prepare_vlan_flags() argument
1401 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40evf_tx_prepare_vlan_flags()
1444 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tso() argument
1504 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
1559 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && in i40e_tx_enable_csum()
1633 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
1638 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
1645 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
1648 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
1720 static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40evf_maybe_stop_tx() argument
1722 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40evf_maybe_stop_tx()
1727 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40evf_maybe_stop_tx()
1731 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40evf_maybe_stop_tx()
1732 ++tx_ring->tx_stats.restart_queue; in __i40evf_maybe_stop_tx()
1743 static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in i40evf_maybe_stop_tx() argument
1745 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) in i40evf_maybe_stop_tx()
1747 return __i40evf_maybe_stop_tx(tx_ring, size); in i40evf_maybe_stop_tx()
1760 static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40evf_tx_map() argument
1769 u16 i = tx_ring->next_to_use; in i40evf_tx_map()
1791 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40evf_tx_map()
1793 tx_desc = I40E_TX_DESC(tx_ring, i); in i40evf_tx_map()
1797 if (dma_mapping_error(tx_ring->dev, dma)) in i40evf_tx_map()
1813 if (i == tx_ring->count) { in i40evf_tx_map()
1814 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40evf_tx_map()
1832 if (i == tx_ring->count) { in i40evf_tx_map()
1833 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40evf_tx_map()
1840 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40evf_tx_map()
1843 tx_bi = &tx_ring->tx_bi[i]; in i40evf_tx_map()
1851 (first <= &tx_ring->tx_bi[i]) && in i40evf_tx_map()
1852 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { in i40evf_tx_map()
1864 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, in i40evf_tx_map()
1865 tx_ring->queue_index), in i40evf_tx_map()
1879 if (i == tx_ring->count) in i40evf_tx_map()
1882 tx_ring->next_to_use = i; in i40evf_tx_map()
1884 i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40evf_tx_map()
1887 netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, in i40evf_tx_map()
1888 tx_ring->queue_index))) in i40evf_tx_map()
1889 writel(i, tx_ring->tail); in i40evf_tx_map()
1896 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40evf_tx_map()
1900 tx_bi = &tx_ring->tx_bi[i]; in i40evf_tx_map()
1901 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40evf_tx_map()
1905 i = tx_ring->count; in i40evf_tx_map()
1909 tx_ring->next_to_use = i; in i40evf_tx_map()
1922 struct i40e_ring *tx_ring) in i40evf_xmit_descriptor_count() argument
1937 if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40evf_xmit_descriptor_count()
1938 tx_ring->tx_stats.tx_busy++; in i40evf_xmit_descriptor_count()
1952 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
1964 if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) in i40e_xmit_frame_ring()
1968 if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
1975 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
1983 tso = i40e_tso(tx_ring, skb, &hdr_len, in i40e_xmit_frame_ring()
1994 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
2006 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
2009 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
2012 i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
2032 struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; in i40evf_xmit_frame() local
2044 return i40e_xmit_frame_ring(skb, tx_ring); in i40evf_xmit_frame()