Lines Matching refs:tx_ring

568 	struct ixgbe_ring *tx_ring;  in ixgbe_dump()  local
610 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
611 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in ixgbe_dump()
613 n, tx_ring->next_to_use, tx_ring->next_to_clean, in ixgbe_dump()
662 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
664 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in ixgbe_dump()
671 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in ixgbe_dump()
672 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_dump()
673 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_dump()
685 if (i == tx_ring->next_to_use && in ixgbe_dump()
686 i == tx_ring->next_to_clean) in ixgbe_dump()
688 else if (i == tx_ring->next_to_use) in ixgbe_dump()
690 else if (i == tx_ring->next_to_clean) in ixgbe_dump()
966 &adapter->tx_ring[i]->state); in ixgbe_update_xoff_rx_lfc()
1005 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_xoff_received() local
1007 tc = tx_ring->dcb_tc; in ixgbe_update_xoff_received()
1009 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_update_xoff_received()
1040 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) in ixgbe_check_tx_hang() argument
1042 u32 tx_done = ixgbe_get_tx_completed(tx_ring); in ixgbe_check_tx_hang()
1043 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbe_check_tx_hang()
1044 u32 tx_pending = ixgbe_get_tx_pending(tx_ring); in ixgbe_check_tx_hang()
1046 clear_check_for_tx_hang(tx_ring); in ixgbe_check_tx_hang()
1063 &tx_ring->state); in ixgbe_check_tx_hang()
1065 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbe_check_tx_hang()
1067 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_check_tx_hang()
1093 struct ixgbe_ring *tx_ring) in ixgbe_clean_tx_irq() argument
1100 unsigned int i = tx_ring->next_to_clean; in ixgbe_clean_tx_irq()
1105 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_irq()
1106 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_clean_tx_irq()
1107 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1134 dma_unmap_single(tx_ring->dev, in ixgbe_clean_tx_irq()
1149 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1150 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1151 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_clean_tx_irq()
1156 dma_unmap_page(tx_ring->dev, in ixgbe_clean_tx_irq()
1169 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1170 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1171 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_clean_tx_irq()
1181 i += tx_ring->count; in ixgbe_clean_tx_irq()
1182 tx_ring->next_to_clean = i; in ixgbe_clean_tx_irq()
1183 u64_stats_update_begin(&tx_ring->syncp); in ixgbe_clean_tx_irq()
1184 tx_ring->stats.bytes += total_bytes; in ixgbe_clean_tx_irq()
1185 tx_ring->stats.packets += total_packets; in ixgbe_clean_tx_irq()
1186 u64_stats_update_end(&tx_ring->syncp); in ixgbe_clean_tx_irq()
1190 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { in ixgbe_clean_tx_irq()
1201 tx_ring->queue_index, in ixgbe_clean_tx_irq()
1202 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), in ixgbe_clean_tx_irq()
1203 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), in ixgbe_clean_tx_irq()
1204 tx_ring->next_to_use, i, in ixgbe_clean_tx_irq()
1205 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbe_clean_tx_irq()
1207 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1211 adapter->tx_timeout_count + 1, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1220 netdev_tx_completed_queue(txring_txq(tx_ring), in ixgbe_clean_tx_irq()
1224 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbe_clean_tx_irq()
1225 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in ixgbe_clean_tx_irq()
1230 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbe_clean_tx_irq()
1231 tx_ring->queue_index) in ixgbe_clean_tx_irq()
1233 netif_wake_subqueue(tx_ring->netdev, in ixgbe_clean_tx_irq()
1234 tx_ring->queue_index); in ixgbe_clean_tx_irq()
1235 ++tx_ring->tx_stats.restart_queue; in ixgbe_clean_tx_irq()
1244 struct ixgbe_ring *tx_ring, in ixgbe_update_tx_dca() argument
1252 txctrl = dca3_get_tag(tx_ring->dev, cpu); in ixgbe_update_tx_dca()
1256 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); in ixgbe_update_tx_dca()
1260 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); in ixgbe_update_tx_dca()
2722 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_msix_other()
3228 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbe_configure_tx()
4679 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; in ixgbe_fwd_ring_down()
4680 adapter->tx_ring[txbase + i]->netdev = adapter->netdev; in ixgbe_fwd_ring_down()
4717 adapter->tx_ring[txbase + i]->netdev = vdev; in ixgbe_fwd_ring_up()
4718 adapter->tx_ring[txbase + i]->l2_accel_priv = accel; in ixgbe_fwd_ring_up()
5105 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) in ixgbe_clean_tx_ring() argument
5112 if (!tx_ring->tx_buffer_info) in ixgbe_clean_tx_ring()
5116 for (i = 0; i < tx_ring->count; i++) { in ixgbe_clean_tx_ring()
5117 tx_buffer_info = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_ring()
5118 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); in ixgbe_clean_tx_ring()
5121 netdev_tx_reset_queue(txring_txq(tx_ring)); in ixgbe_clean_tx_ring()
5123 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; in ixgbe_clean_tx_ring()
5124 memset(tx_ring->tx_buffer_info, 0, size); in ixgbe_clean_tx_ring()
5127 memset(tx_ring->desc, 0, tx_ring->size); in ixgbe_clean_tx_ring()
5129 tx_ring->next_to_use = 0; in ixgbe_clean_tx_ring()
5130 tx_ring->next_to_clean = 0; in ixgbe_clean_tx_ring()
5154 ixgbe_clean_tx_ring(adapter->tx_ring[i]); in ixgbe_clean_all_tx_rings()
5242 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbe_down()
5477 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) in ixgbe_setup_tx_resources() argument
5479 struct device *dev = tx_ring->dev; in ixgbe_setup_tx_resources()
5484 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; in ixgbe_setup_tx_resources()
5486 if (tx_ring->q_vector) in ixgbe_setup_tx_resources()
5487 ring_node = tx_ring->q_vector->numa_node; in ixgbe_setup_tx_resources()
5489 tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); in ixgbe_setup_tx_resources()
5490 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
5491 tx_ring->tx_buffer_info = vzalloc(size); in ixgbe_setup_tx_resources()
5492 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
5495 u64_stats_init(&tx_ring->syncp); in ixgbe_setup_tx_resources()
5498 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbe_setup_tx_resources()
5499 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbe_setup_tx_resources()
5502 tx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_tx_resources()
5503 tx_ring->size, in ixgbe_setup_tx_resources()
5504 &tx_ring->dma, in ixgbe_setup_tx_resources()
5507 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
5508 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in ixgbe_setup_tx_resources()
5509 &tx_ring->dma, GFP_KERNEL); in ixgbe_setup_tx_resources()
5510 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
5513 tx_ring->next_to_use = 0; in ixgbe_setup_tx_resources()
5514 tx_ring->next_to_clean = 0; in ixgbe_setup_tx_resources()
5518 vfree(tx_ring->tx_buffer_info); in ixgbe_setup_tx_resources()
5519 tx_ring->tx_buffer_info = NULL; in ixgbe_setup_tx_resources()
5539 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
5551 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
5649 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) in ixgbe_free_tx_resources() argument
5651 ixgbe_clean_tx_ring(tx_ring); in ixgbe_free_tx_resources()
5653 vfree(tx_ring->tx_buffer_info); in ixgbe_free_tx_resources()
5654 tx_ring->tx_buffer_info = NULL; in ixgbe_free_tx_resources()
5657 if (!tx_ring->desc) in ixgbe_free_tx_resources()
5660 dma_free_coherent(tx_ring->dev, tx_ring->size, in ixgbe_free_tx_resources()
5661 tx_ring->desc, tx_ring->dma); in ixgbe_free_tx_resources()
5663 tx_ring->desc = NULL; in ixgbe_free_tx_resources()
5677 if (adapter->tx_ring[i]->desc) in ixgbe_free_all_tx_resources()
5678 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_free_all_tx_resources()
6098 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_stats() local
6099 restart_queue += tx_ring->tx_stats.restart_queue; in ixgbe_update_stats()
6100 tx_busy += tx_ring->tx_stats.tx_busy; in ixgbe_update_stats()
6101 bytes += tx_ring->stats.bytes; in ixgbe_update_stats()
6102 packets += tx_ring->stats.packets; in ixgbe_update_stats()
6293 &(adapter->tx_ring[i]->state)); in ixgbe_fdir_reinit_subtask()
6326 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbe_check_hang_subtask()
6543 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_ring_tx_pending() local
6545 if (tx_ring->next_to_use != tx_ring->next_to_clean) in ixgbe_ring_tx_pending()
6935 static int ixgbe_tso(struct ixgbe_ring *tx_ring, in ixgbe_tso() argument
6996 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, in ixgbe_tso()
7002 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, in ixgbe_tx_csum() argument
7053 dev_warn(tx_ring->dev, in ixgbe_tx_csum()
7076 dev_warn(tx_ring->dev, in ixgbe_tx_csum()
7090 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, in ixgbe_tx_csum()
7150 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) in __ixgbe_maybe_stop_tx() argument
7152 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbe_maybe_stop_tx()
7163 if (likely(ixgbe_desc_unused(tx_ring) < size)) in __ixgbe_maybe_stop_tx()
7167 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbe_maybe_stop_tx()
7168 ++tx_ring->tx_stats.restart_queue; in __ixgbe_maybe_stop_tx()
7172 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) in ixgbe_maybe_stop_tx() argument
7174 if (likely(ixgbe_desc_unused(tx_ring) >= size)) in ixgbe_maybe_stop_tx()
7177 return __ixgbe_maybe_stop_tx(tx_ring, size); in ixgbe_maybe_stop_tx()
7183 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, in ixgbe_tx_map() argument
7195 u16 i = tx_ring->next_to_use; in ixgbe_tx_map()
7197 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_tx_map()
7215 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbe_tx_map()
7220 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbe_tx_map()
7235 if (i == tx_ring->count) { in ixgbe_tx_map()
7236 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_tx_map()
7254 if (i == tx_ring->count) { in ixgbe_tx_map()
7255 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_tx_map()
7267 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbe_tx_map()
7270 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
7277 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ixgbe_tx_map()
7296 if (i == tx_ring->count) in ixgbe_tx_map()
7299 tx_ring->next_to_use = i; in ixgbe_tx_map()
7301 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); in ixgbe_tx_map()
7303 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { in ixgbe_tx_map()
7304 writel(i, tx_ring->tail); in ixgbe_tx_map()
7314 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbe_tx_map()
7318 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
7319 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); in ixgbe_tx_map()
7323 i = tx_ring->count; in ixgbe_tx_map()
7327 tx_ring->next_to_use = i; in ixgbe_tx_map()
7493 struct ixgbe_ring *tx_ring) in ixgbe_xmit_frame_ring() argument
7513 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { in ixgbe_xmit_frame_ring()
7514 tx_ring->tx_stats.tx_busy++; in ixgbe_xmit_frame_ring()
7519 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbe_xmit_frame_ring()
7592 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { in ixgbe_xmit_frame_ring()
7593 tso = ixgbe_fso(tx_ring, first, &hdr_len); in ixgbe_xmit_frame_ring()
7601 tso = ixgbe_tso(tx_ring, first, &hdr_len); in ixgbe_xmit_frame_ring()
7605 ixgbe_tx_csum(tx_ring, first); in ixgbe_xmit_frame_ring()
7608 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) in ixgbe_xmit_frame_ring()
7609 ixgbe_atr(tx_ring, first); in ixgbe_xmit_frame_ring()
7614 ixgbe_tx_map(tx_ring, first, hdr_len); in ixgbe_xmit_frame_ring()
7630 struct ixgbe_ring *tx_ring; in __ixgbe_xmit_frame() local
7639 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; in __ixgbe_xmit_frame()
7641 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); in __ixgbe_xmit_frame()
7806 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); in ixgbe_get_stats64()