Lines Matching refs:tx_ring

196 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,  in ixgbevf_unmap_and_free_tx_resource()  argument
202 dma_unmap_single(tx_ring->dev, in ixgbevf_unmap_and_free_tx_resource()
207 dma_unmap_page(tx_ring->dev, in ixgbevf_unmap_and_free_tx_resource()
238 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) in ixgbevf_check_tx_hang() argument
240 u32 tx_done = ixgbevf_get_tx_completed(tx_ring); in ixgbevf_check_tx_hang()
241 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang()
242 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); in ixgbevf_check_tx_hang()
244 clear_check_for_tx_hang(tx_ring); in ixgbevf_check_tx_hang()
254 &tx_ring->state); in ixgbevf_check_tx_hang()
257 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang()
260 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbevf_check_tx_hang()
291 struct ixgbevf_ring *tx_ring) in ixgbevf_clean_tx_irq() argument
297 unsigned int budget = tx_ring->count / 2; in ixgbevf_clean_tx_irq()
298 unsigned int i = tx_ring->next_to_clean; in ixgbevf_clean_tx_irq()
303 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_irq()
304 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); in ixgbevf_clean_tx_irq()
305 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
332 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_irq()
347 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
348 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_clean_tx_irq()
354 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_irq()
367 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
368 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
369 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_clean_tx_irq()
379 i += tx_ring->count; in ixgbevf_clean_tx_irq()
380 tx_ring->next_to_clean = i; in ixgbevf_clean_tx_irq()
381 u64_stats_update_begin(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
382 tx_ring->stats.bytes += total_bytes; in ixgbevf_clean_tx_irq()
383 tx_ring->stats.packets += total_packets; in ixgbevf_clean_tx_irq()
384 u64_stats_update_end(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
388 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { in ixgbevf_clean_tx_irq()
392 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; in ixgbevf_clean_tx_irq()
404 tx_ring->queue_index, in ixgbevf_clean_tx_irq()
405 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
406 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
407 tx_ring->next_to_use, i, in ixgbevf_clean_tx_irq()
409 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbevf_clean_tx_irq()
411 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in ixgbevf_clean_tx_irq()
420 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbevf_clean_tx_irq()
421 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in ixgbevf_clean_tx_irq()
427 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbevf_clean_tx_irq()
428 tx_ring->queue_index) && in ixgbevf_clean_tx_irq()
430 netif_wake_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
431 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
432 ++tx_ring->tx_stats.restart_queue; in ixgbevf_clean_tx_irq()
1310 a->tx_ring[t_idx]->next = q_vector->tx.ring; in map_vector_to_txq()
1311 q_vector->tx.ring = a->tx_ring[t_idx]; in map_vector_to_txq()
1614 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbevf_configure_tx()
1961 adapter->tx_ring[0]->reg_idx = def_q; in ixgbevf_configure_dcb()
2137 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) in ixgbevf_clean_tx_ring() argument
2143 if (!tx_ring->tx_buffer_info) in ixgbevf_clean_tx_ring()
2147 for (i = 0; i < tx_ring->count; i++) { in ixgbevf_clean_tx_ring()
2148 tx_buffer_info = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_ring()
2149 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); in ixgbevf_clean_tx_ring()
2152 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; in ixgbevf_clean_tx_ring()
2153 memset(tx_ring->tx_buffer_info, 0, size); in ixgbevf_clean_tx_ring()
2155 memset(tx_ring->desc, 0, tx_ring->size); in ixgbevf_clean_tx_ring()
2179 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); in ixgbevf_clean_all_tx_rings()
2212 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbevf_down()
2369 adapter->tx_ring[tx] = ring; in ixgbevf_alloc_queues()
2391 kfree(adapter->tx_ring[--tx]); in ixgbevf_alloc_queues()
2392 adapter->tx_ring[tx] = NULL; in ixgbevf_alloc_queues()
2590 kfree(adapter->tx_ring[i]); in ixgbevf_clear_interrupt_scheme()
2591 adapter->tx_ring[i] = NULL; in ixgbevf_clear_interrupt_scheme()
2786 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbevf_check_hang_subtask()
2928 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) in ixgbevf_free_tx_resources() argument
2930 ixgbevf_clean_tx_ring(tx_ring); in ixgbevf_free_tx_resources()
2932 vfree(tx_ring->tx_buffer_info); in ixgbevf_free_tx_resources()
2933 tx_ring->tx_buffer_info = NULL; in ixgbevf_free_tx_resources()
2936 if (!tx_ring->desc) in ixgbevf_free_tx_resources()
2939 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, in ixgbevf_free_tx_resources()
2940 tx_ring->dma); in ixgbevf_free_tx_resources()
2942 tx_ring->desc = NULL; in ixgbevf_free_tx_resources()
2956 if (adapter->tx_ring[i]->desc) in ixgbevf_free_all_tx_resources()
2957 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_free_all_tx_resources()
2966 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) in ixgbevf_setup_tx_resources() argument
2970 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; in ixgbevf_setup_tx_resources()
2971 tx_ring->tx_buffer_info = vzalloc(size); in ixgbevf_setup_tx_resources()
2972 if (!tx_ring->tx_buffer_info) in ixgbevf_setup_tx_resources()
2976 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbevf_setup_tx_resources()
2977 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbevf_setup_tx_resources()
2979 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, in ixgbevf_setup_tx_resources()
2980 &tx_ring->dma, GFP_KERNEL); in ixgbevf_setup_tx_resources()
2981 if (!tx_ring->desc) in ixgbevf_setup_tx_resources()
2987 vfree(tx_ring->tx_buffer_info); in ixgbevf_setup_tx_resources()
2988 tx_ring->tx_buffer_info = NULL; in ixgbevf_setup_tx_resources()
3008 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3245 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, in ixgbevf_tx_ctxtdesc() argument
3250 u16 i = tx_ring->next_to_use; in ixgbevf_tx_ctxtdesc()
3252 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); in ixgbevf_tx_ctxtdesc()
3255 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ixgbevf_tx_ctxtdesc()
3266 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, in ixgbevf_tso() argument
3330 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, in ixgbevf_tso()
3336 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, in ixgbevf_tx_csum() argument
3359 dev_warn(tx_ring->dev, in ixgbevf_tx_csum()
3383 dev_warn(tx_ring->dev, in ixgbevf_tx_csum()
3398 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, in ixgbevf_tx_csum()
3445 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, in ixgbevf_tx_map() argument
3459 u16 i = tx_ring->next_to_use; in ixgbevf_tx_map()
3461 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); in ixgbevf_tx_map()
3466 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbevf_tx_map()
3467 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbevf_tx_map()
3483 if (i == tx_ring->count) { in ixgbevf_tx_map()
3484 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_tx_map()
3502 if (i == tx_ring->count) { in ixgbevf_tx_map()
3503 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_tx_map()
3510 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbevf_tx_map()
3512 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbevf_tx_map()
3515 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
3545 if (i == tx_ring->count) in ixgbevf_tx_map()
3548 tx_ring->next_to_use = i; in ixgbevf_tx_map()
3551 ixgbevf_write_tail(tx_ring, i); in ixgbevf_tx_map()
3555 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbevf_tx_map()
3559 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
3560 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); in ixgbevf_tx_map()
3564 i = tx_ring->count; in ixgbevf_tx_map()
3568 tx_ring->next_to_use = i; in ixgbevf_tx_map()
3571 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) in __ixgbevf_maybe_stop_tx() argument
3573 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
3583 if (likely(ixgbevf_desc_unused(tx_ring) < size)) in __ixgbevf_maybe_stop_tx()
3587 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
3588 ++tx_ring->tx_stats.restart_queue; in __ixgbevf_maybe_stop_tx()
3593 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) in ixgbevf_maybe_stop_tx() argument
3595 if (likely(ixgbevf_desc_unused(tx_ring) >= size)) in ixgbevf_maybe_stop_tx()
3597 return __ixgbevf_maybe_stop_tx(tx_ring, size); in ixgbevf_maybe_stop_tx()
3604 struct ixgbevf_ring *tx_ring; in ixgbevf_xmit_frame() local
3619 tx_ring = adapter->tx_ring[skb->queue_mapping]; in ixgbevf_xmit_frame()
3633 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { in ixgbevf_xmit_frame()
3634 tx_ring->tx_stats.tx_busy++; in ixgbevf_xmit_frame()
3639 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbevf_xmit_frame()
3654 tso = ixgbevf_tso(tx_ring, first, &hdr_len); in ixgbevf_xmit_frame()
3658 ixgbevf_tx_csum(tx_ring, first); in ixgbevf_xmit_frame()
3660 ixgbevf_tx_map(tx_ring, first, hdr_len); in ixgbevf_xmit_frame()
3662 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); in ixgbevf_xmit_frame()
3866 ring = adapter->tx_ring[i]; in ixgbevf_get_stats()