Lines Matching refs:tx_ring

554 	struct ixgbe_ring *tx_ring;  in ixgbe_dump()  local
596 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
597 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in ixgbe_dump()
599 n, tx_ring->next_to_use, tx_ring->next_to_clean, in ixgbe_dump()
648 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
650 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in ixgbe_dump()
657 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in ixgbe_dump()
658 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_dump()
659 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_dump()
671 if (i == tx_ring->next_to_use && in ixgbe_dump()
672 i == tx_ring->next_to_clean) in ixgbe_dump()
674 else if (i == tx_ring->next_to_use) in ixgbe_dump()
676 else if (i == tx_ring->next_to_clean) in ixgbe_dump()
952 &adapter->tx_ring[i]->state); in ixgbe_update_xoff_rx_lfc()
991 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_xoff_received() local
993 tc = tx_ring->dcb_tc; in ixgbe_update_xoff_received()
995 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_update_xoff_received()
1026 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) in ixgbe_check_tx_hang() argument
1028 u32 tx_done = ixgbe_get_tx_completed(tx_ring); in ixgbe_check_tx_hang()
1029 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbe_check_tx_hang()
1030 u32 tx_pending = ixgbe_get_tx_pending(tx_ring); in ixgbe_check_tx_hang()
1032 clear_check_for_tx_hang(tx_ring); in ixgbe_check_tx_hang()
1049 &tx_ring->state); in ixgbe_check_tx_hang()
1051 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbe_check_tx_hang()
1053 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_check_tx_hang()
1079 struct ixgbe_ring *tx_ring) in ixgbe_clean_tx_irq() argument
1086 unsigned int i = tx_ring->next_to_clean; in ixgbe_clean_tx_irq()
1091 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_irq()
1092 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_clean_tx_irq()
1093 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1120 dma_unmap_single(tx_ring->dev, in ixgbe_clean_tx_irq()
1135 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1136 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1137 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_clean_tx_irq()
1142 dma_unmap_page(tx_ring->dev, in ixgbe_clean_tx_irq()
1155 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1156 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1157 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_clean_tx_irq()
1167 i += tx_ring->count; in ixgbe_clean_tx_irq()
1168 tx_ring->next_to_clean = i; in ixgbe_clean_tx_irq()
1169 u64_stats_update_begin(&tx_ring->syncp); in ixgbe_clean_tx_irq()
1170 tx_ring->stats.bytes += total_bytes; in ixgbe_clean_tx_irq()
1171 tx_ring->stats.packets += total_packets; in ixgbe_clean_tx_irq()
1172 u64_stats_update_end(&tx_ring->syncp); in ixgbe_clean_tx_irq()
1176 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { in ixgbe_clean_tx_irq()
1187 tx_ring->queue_index, in ixgbe_clean_tx_irq()
1188 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), in ixgbe_clean_tx_irq()
1189 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), in ixgbe_clean_tx_irq()
1190 tx_ring->next_to_use, i, in ixgbe_clean_tx_irq()
1191 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbe_clean_tx_irq()
1193 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1197 adapter->tx_timeout_count + 1, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1206 netdev_tx_completed_queue(txring_txq(tx_ring), in ixgbe_clean_tx_irq()
1210 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbe_clean_tx_irq()
1211 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in ixgbe_clean_tx_irq()
1216 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbe_clean_tx_irq()
1217 tx_ring->queue_index) in ixgbe_clean_tx_irq()
1219 netif_wake_subqueue(tx_ring->netdev, in ixgbe_clean_tx_irq()
1220 tx_ring->queue_index); in ixgbe_clean_tx_irq()
1221 ++tx_ring->tx_stats.restart_queue; in ixgbe_clean_tx_irq()
1230 struct ixgbe_ring *tx_ring, in ixgbe_update_tx_dca() argument
1234 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); in ixgbe_update_tx_dca()
1239 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); in ixgbe_update_tx_dca()
1243 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); in ixgbe_update_tx_dca()
2640 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_msix_other()
3142 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbe_configure_tx()
4569 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; in ixgbe_fwd_ring_down()
4570 adapter->tx_ring[txbase + i]->netdev = adapter->netdev; in ixgbe_fwd_ring_down()
4607 adapter->tx_ring[txbase + i]->netdev = vdev; in ixgbe_fwd_ring_up()
4608 adapter->tx_ring[txbase + i]->l2_accel_priv = accel; in ixgbe_fwd_ring_up()
5001 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) in ixgbe_clean_tx_ring() argument
5008 if (!tx_ring->tx_buffer_info) in ixgbe_clean_tx_ring()
5012 for (i = 0; i < tx_ring->count; i++) { in ixgbe_clean_tx_ring()
5013 tx_buffer_info = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_ring()
5014 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); in ixgbe_clean_tx_ring()
5017 netdev_tx_reset_queue(txring_txq(tx_ring)); in ixgbe_clean_tx_ring()
5019 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; in ixgbe_clean_tx_ring()
5020 memset(tx_ring->tx_buffer_info, 0, size); in ixgbe_clean_tx_ring()
5023 memset(tx_ring->desc, 0, tx_ring->size); in ixgbe_clean_tx_ring()
5025 tx_ring->next_to_use = 0; in ixgbe_clean_tx_ring()
5026 tx_ring->next_to_clean = 0; in ixgbe_clean_tx_ring()
5050 ixgbe_clean_tx_ring(adapter->tx_ring[i]); in ixgbe_clean_all_tx_rings()
5138 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbe_down()
5377 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) in ixgbe_setup_tx_resources() argument
5379 struct device *dev = tx_ring->dev; in ixgbe_setup_tx_resources()
5384 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; in ixgbe_setup_tx_resources()
5386 if (tx_ring->q_vector) in ixgbe_setup_tx_resources()
5387 ring_node = tx_ring->q_vector->numa_node; in ixgbe_setup_tx_resources()
5389 tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); in ixgbe_setup_tx_resources()
5390 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
5391 tx_ring->tx_buffer_info = vzalloc(size); in ixgbe_setup_tx_resources()
5392 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
5395 u64_stats_init(&tx_ring->syncp); in ixgbe_setup_tx_resources()
5398 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbe_setup_tx_resources()
5399 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbe_setup_tx_resources()
5402 tx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_tx_resources()
5403 tx_ring->size, in ixgbe_setup_tx_resources()
5404 &tx_ring->dma, in ixgbe_setup_tx_resources()
5407 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
5408 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in ixgbe_setup_tx_resources()
5409 &tx_ring->dma, GFP_KERNEL); in ixgbe_setup_tx_resources()
5410 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
5413 tx_ring->next_to_use = 0; in ixgbe_setup_tx_resources()
5414 tx_ring->next_to_clean = 0; in ixgbe_setup_tx_resources()
5418 vfree(tx_ring->tx_buffer_info); in ixgbe_setup_tx_resources()
5419 tx_ring->tx_buffer_info = NULL; in ixgbe_setup_tx_resources()
5439 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
5451 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
5549 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) in ixgbe_free_tx_resources() argument
5551 ixgbe_clean_tx_ring(tx_ring); in ixgbe_free_tx_resources()
5553 vfree(tx_ring->tx_buffer_info); in ixgbe_free_tx_resources()
5554 tx_ring->tx_buffer_info = NULL; in ixgbe_free_tx_resources()
5557 if (!tx_ring->desc) in ixgbe_free_tx_resources()
5560 dma_free_coherent(tx_ring->dev, tx_ring->size, in ixgbe_free_tx_resources()
5561 tx_ring->desc, tx_ring->dma); in ixgbe_free_tx_resources()
5563 tx_ring->desc = NULL; in ixgbe_free_tx_resources()
5577 if (adapter->tx_ring[i]->desc) in ixgbe_free_all_tx_resources()
5578 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_free_all_tx_resources()
5984 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_stats() local
5985 restart_queue += tx_ring->tx_stats.restart_queue; in ixgbe_update_stats()
5986 tx_busy += tx_ring->tx_stats.tx_busy; in ixgbe_update_stats()
5987 bytes += tx_ring->stats.bytes; in ixgbe_update_stats()
5988 packets += tx_ring->stats.packets; in ixgbe_update_stats()
6179 &(adapter->tx_ring[i]->state)); in ixgbe_fdir_reinit_subtask()
6212 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbe_check_hang_subtask()
6418 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_ring_tx_pending() local
6420 if (tx_ring->next_to_use != tx_ring->next_to_clean) in ixgbe_ring_tx_pending()
6777 static int ixgbe_tso(struct ixgbe_ring *tx_ring, in ixgbe_tso() argument
6838 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, in ixgbe_tso()
6844 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, in ixgbe_tx_csum() argument
6870 dev_warn(tx_ring->dev, in ixgbe_tx_csum()
6894 dev_warn(tx_ring->dev, in ixgbe_tx_csum()
6909 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, in ixgbe_tx_csum()
6969 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) in __ixgbe_maybe_stop_tx() argument
6971 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbe_maybe_stop_tx()
6982 if (likely(ixgbe_desc_unused(tx_ring) < size)) in __ixgbe_maybe_stop_tx()
6986 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbe_maybe_stop_tx()
6987 ++tx_ring->tx_stats.restart_queue; in __ixgbe_maybe_stop_tx()
6991 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) in ixgbe_maybe_stop_tx() argument
6993 if (likely(ixgbe_desc_unused(tx_ring) >= size)) in ixgbe_maybe_stop_tx()
6996 return __ixgbe_maybe_stop_tx(tx_ring, size); in ixgbe_maybe_stop_tx()
7002 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, in ixgbe_tx_map() argument
7014 u16 i = tx_ring->next_to_use; in ixgbe_tx_map()
7016 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_tx_map()
7034 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbe_tx_map()
7039 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbe_tx_map()
7054 if (i == tx_ring->count) { in ixgbe_tx_map()
7055 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_tx_map()
7073 if (i == tx_ring->count) { in ixgbe_tx_map()
7074 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_tx_map()
7086 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbe_tx_map()
7089 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
7096 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ixgbe_tx_map()
7115 if (i == tx_ring->count) in ixgbe_tx_map()
7118 tx_ring->next_to_use = i; in ixgbe_tx_map()
7120 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); in ixgbe_tx_map()
7122 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { in ixgbe_tx_map()
7123 writel(i, tx_ring->tail); in ixgbe_tx_map()
7133 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbe_tx_map()
7137 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
7138 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); in ixgbe_tx_map()
7142 i = tx_ring->count; in ixgbe_tx_map()
7146 tx_ring->next_to_use = i; in ixgbe_tx_map()
7283 struct ixgbe_ring *tx_ring) in ixgbe_xmit_frame_ring() argument
7303 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { in ixgbe_xmit_frame_ring()
7304 tx_ring->tx_stats.tx_busy++; in ixgbe_xmit_frame_ring()
7309 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbe_xmit_frame_ring()
7382 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { in ixgbe_xmit_frame_ring()
7383 tso = ixgbe_fso(tx_ring, first, &hdr_len); in ixgbe_xmit_frame_ring()
7391 tso = ixgbe_tso(tx_ring, first, &hdr_len); in ixgbe_xmit_frame_ring()
7395 ixgbe_tx_csum(tx_ring, first); in ixgbe_xmit_frame_ring()
7398 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) in ixgbe_xmit_frame_ring()
7399 ixgbe_atr(tx_ring, first); in ixgbe_xmit_frame_ring()
7404 ixgbe_tx_map(tx_ring, first, hdr_len); in ixgbe_xmit_frame_ring()
7420 struct ixgbe_ring *tx_ring; in __ixgbe_xmit_frame() local
7429 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; in __ixgbe_xmit_frame()
7431 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); in __ixgbe_xmit_frame()
7596 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); in ixgbe_get_stats64()