Lines Matching refs:tx_ring

80 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)  in i40evf_clean_tx_ring()  argument
86 if (!tx_ring->tx_bi) in i40evf_clean_tx_ring()
90 for (i = 0; i < tx_ring->count; i++) in i40evf_clean_tx_ring()
91 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in i40evf_clean_tx_ring()
93 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_clean_tx_ring()
94 memset(tx_ring->tx_bi, 0, bi_size); in i40evf_clean_tx_ring()
97 memset(tx_ring->desc, 0, tx_ring->size); in i40evf_clean_tx_ring()
99 tx_ring->next_to_use = 0; in i40evf_clean_tx_ring()
100 tx_ring->next_to_clean = 0; in i40evf_clean_tx_ring()
102 if (!tx_ring->netdev) in i40evf_clean_tx_ring()
106 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in i40evf_clean_tx_ring()
107 tx_ring->queue_index)); in i40evf_clean_tx_ring()
116 void i40evf_free_tx_resources(struct i40e_ring *tx_ring) in i40evf_free_tx_resources() argument
118 i40evf_clean_tx_ring(tx_ring); in i40evf_free_tx_resources()
119 kfree(tx_ring->tx_bi); in i40evf_free_tx_resources()
120 tx_ring->tx_bi = NULL; in i40evf_free_tx_resources()
122 if (tx_ring->desc) { in i40evf_free_tx_resources()
123 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40evf_free_tx_resources()
124 tx_ring->desc, tx_ring->dma); in i40evf_free_tx_resources()
125 tx_ring->desc = NULL; in i40evf_free_tx_resources()
136 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) in i40e_get_head() argument
138 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; in i40e_get_head()
168 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) in i40e_check_tx_hang() argument
170 u32 tx_done = tx_ring->stats.packets; in i40e_check_tx_hang()
171 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in i40e_check_tx_hang()
172 u32 tx_pending = i40e_get_tx_pending(tx_ring); in i40e_check_tx_hang()
175 clear_check_for_tx_hang(tx_ring); in i40e_check_tx_hang()
191 &tx_ring->state); in i40e_check_tx_hang()
195 tx_ring->tx_stats.tx_done_old = tx_done; in i40e_check_tx_hang()
196 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); in i40e_check_tx_hang()
211 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) in i40e_clean_tx_irq() argument
213 u16 i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
220 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
221 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
222 i -= tx_ring->count; in i40e_clean_tx_irq()
224 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
251 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
267 i -= tx_ring->count; in i40e_clean_tx_irq()
268 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
269 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
274 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
287 i -= tx_ring->count; in i40e_clean_tx_irq()
288 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
289 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
298 i += tx_ring->count; in i40e_clean_tx_irq()
299 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
300 u64_stats_update_begin(&tx_ring->syncp); in i40e_clean_tx_irq()
301 tx_ring->stats.bytes += total_bytes; in i40e_clean_tx_irq()
302 tx_ring->stats.packets += total_packets; in i40e_clean_tx_irq()
303 u64_stats_update_end(&tx_ring->syncp); in i40e_clean_tx_irq()
304 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_clean_tx_irq()
305 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_clean_tx_irq()
309 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && in i40e_clean_tx_irq()
310 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) in i40e_clean_tx_irq()
311 tx_ring->arm_wb = true; in i40e_clean_tx_irq()
313 tx_ring->arm_wb = false; in i40e_clean_tx_irq()
315 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { in i40e_clean_tx_irq()
317 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" in i40e_clean_tx_irq()
322 tx_ring->vsi->seid, in i40e_clean_tx_irq()
323 tx_ring->queue_index, in i40e_clean_tx_irq()
324 tx_ring->next_to_use, i); in i40e_clean_tx_irq()
325 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" in i40e_clean_tx_irq()
328 tx_ring->tx_bi[i].time_stamp, jiffies); in i40e_clean_tx_irq()
330 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in i40e_clean_tx_irq()
332 dev_info(tx_ring->dev, in i40e_clean_tx_irq()
334 tx_ring->queue_index); in i40e_clean_tx_irq()
336 tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); in i40e_clean_tx_irq()
342 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, in i40e_clean_tx_irq()
343 tx_ring->queue_index), in i40e_clean_tx_irq()
347 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
348 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
353 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
354 tx_ring->queue_index) && in i40e_clean_tx_irq()
355 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { in i40e_clean_tx_irq()
356 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
357 tx_ring->queue_index); in i40e_clean_tx_irq()
358 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
483 int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40evf_setup_tx_descriptors() argument
485 struct device *dev = tx_ring->dev; in i40evf_setup_tx_descriptors()
491 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_setup_tx_descriptors()
492 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40evf_setup_tx_descriptors()
493 if (!tx_ring->tx_bi) in i40evf_setup_tx_descriptors()
497 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40evf_setup_tx_descriptors()
501 tx_ring->size += sizeof(u32); in i40evf_setup_tx_descriptors()
502 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40evf_setup_tx_descriptors()
503 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40evf_setup_tx_descriptors()
504 &tx_ring->dma, GFP_KERNEL); in i40evf_setup_tx_descriptors()
505 if (!tx_ring->desc) { in i40evf_setup_tx_descriptors()
507 tx_ring->size); in i40evf_setup_tx_descriptors()
511 tx_ring->next_to_use = 0; in i40evf_setup_tx_descriptors()
512 tx_ring->next_to_clean = 0; in i40evf_setup_tx_descriptors()
516 kfree(tx_ring->tx_bi); in i40evf_setup_tx_descriptors()
517 tx_ring->tx_bi = NULL; in i40evf_setup_tx_descriptors()
1367 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags() argument
1374 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
1418 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tso() argument
1478 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
1594 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
1599 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
1606 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
1609 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
1684 static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
1693 u16 i = tx_ring->next_to_use; in i40e_tx_map()
1715 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
1717 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_tx_map()
1721 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
1737 if (i == tx_ring->count) { in i40e_tx_map()
1738 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
1756 if (i == tx_ring->count) { in i40e_tx_map()
1757 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
1764 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
1767 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
1775 (first <= &tx_ring->tx_bi[i]) && in i40e_tx_map()
1776 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { in i40e_tx_map()
1788 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, in i40e_tx_map()
1789 tx_ring->queue_index), in i40e_tx_map()
1806 if (i == tx_ring->count) in i40e_tx_map()
1809 tx_ring->next_to_use = i; in i40e_tx_map()
1812 writel(i, tx_ring->tail); in i40e_tx_map()
1817 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
1821 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
1822 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40e_tx_map()
1826 i = tx_ring->count; in i40e_tx_map()
1830 tx_ring->next_to_use = i; in i40e_tx_map()
1840 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40e_maybe_stop_tx() argument
1842 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
1847 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40e_maybe_stop_tx()
1851 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
1852 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
1863 static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in i40e_maybe_stop_tx() argument
1865 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) in i40e_maybe_stop_tx()
1867 return __i40e_maybe_stop_tx(tx_ring, size); in i40e_maybe_stop_tx()
1880 struct i40e_ring *tx_ring) in i40e_xmit_descriptor_count() argument
1895 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_descriptor_count()
1896 tx_ring->tx_stats.tx_busy++; in i40e_xmit_descriptor_count()
1910 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
1921 if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) in i40e_xmit_frame_ring()
1925 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
1932 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
1940 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, in i40e_xmit_frame_ring()
1962 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
1965 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
1968 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
1971 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40e_xmit_frame_ring()
1990 struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; in i40evf_xmit_frame() local
2002 return i40e_xmit_frame_ring(skb, tx_ring); in i40evf_xmit_frame()