Lines Matching refs:tx_ring
793 static int fm10k_tso(struct fm10k_ring *tx_ring, in fm10k_tso() argument
826 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso()
832 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso()
834 netdev_err(tx_ring->netdev, in fm10k_tso()
839 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument
859 dev_warn(tx_ring->dev, in fm10k_tx_csum()
861 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
880 dev_warn(tx_ring->dev, in fm10k_tx_csum()
884 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
897 dev_warn(tx_ring->dev, in fm10k_tx_csum()
901 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
910 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tx_csum()
937 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, in fm10k_tx_desc_push() argument
951 return i == tx_ring->count; in fm10k_tx_desc_push()
954 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) in __fm10k_maybe_stop_tx() argument
956 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __fm10k_maybe_stop_tx()
962 if (likely(fm10k_desc_unused(tx_ring) < size)) in __fm10k_maybe_stop_tx()
966 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __fm10k_maybe_stop_tx()
967 ++tx_ring->tx_stats.restart_queue; in __fm10k_maybe_stop_tx()
971 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) in fm10k_maybe_stop_tx() argument
973 if (likely(fm10k_desc_unused(tx_ring) >= size)) in fm10k_maybe_stop_tx()
975 return __fm10k_maybe_stop_tx(tx_ring, size); in fm10k_maybe_stop_tx()
978 static void fm10k_tx_map(struct fm10k_ring *tx_ring, in fm10k_tx_map() argument
989 u16 i = tx_ring->next_to_use; in fm10k_tx_map()
992 tx_desc = FM10K_TX_DESC(tx_ring, i); in fm10k_tx_map()
1003 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); in fm10k_tx_map()
1009 if (dma_mapping_error(tx_ring->dev, dma)) in fm10k_tx_map()
1017 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, in fm10k_tx_map()
1019 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_tx_map()
1030 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, in fm10k_tx_map()
1032 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_tx_map()
1039 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in fm10k_tx_map()
1042 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_tx_map()
1048 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) in fm10k_tx_map()
1052 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in fm10k_tx_map()
1069 tx_ring->next_to_use = i; in fm10k_tx_map()
1072 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); in fm10k_tx_map()
1075 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { in fm10k_tx_map()
1076 writel(i, tx_ring->tail); in fm10k_tx_map()
1086 dev_err(tx_ring->dev, "TX DMA map failed\n"); in fm10k_tx_map()
1090 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_tx_map()
1091 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); in fm10k_tx_map()
1095 i = tx_ring->count; in fm10k_tx_map()
1099 tx_ring->next_to_use = i; in fm10k_tx_map()
1103 struct fm10k_ring *tx_ring) in fm10k_xmit_frame_ring() argument
1124 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { in fm10k_xmit_frame_ring()
1125 tx_ring->tx_stats.tx_busy++; in fm10k_xmit_frame_ring()
1130 first = &tx_ring->tx_buffer[tx_ring->next_to_use]; in fm10k_xmit_frame_ring()
1138 tso = fm10k_tso(tx_ring, first); in fm10k_xmit_frame_ring()
1142 fm10k_tx_csum(tx_ring, first); in fm10k_xmit_frame_ring()
1144 fm10k_tx_map(tx_ring, first); in fm10k_xmit_frame_ring()
1169 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) in fm10k_check_tx_hang() argument
1171 u32 tx_done = fm10k_get_tx_completed(tx_ring); in fm10k_check_tx_hang()
1172 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in fm10k_check_tx_hang()
1173 u32 tx_pending = fm10k_get_tx_pending(tx_ring); in fm10k_check_tx_hang()
1175 clear_check_for_tx_hang(tx_ring); in fm10k_check_tx_hang()
1187 tx_ring->tx_stats.tx_done_old = tx_done; in fm10k_check_tx_hang()
1189 clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); in fm10k_check_tx_hang()
1195 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); in fm10k_check_tx_hang()
1218 struct fm10k_ring *tx_ring) in fm10k_clean_tx_irq() argument
1225 unsigned int i = tx_ring->next_to_clean; in fm10k_clean_tx_irq()
1230 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_clean_tx_irq()
1231 tx_desc = FM10K_TX_DESC(tx_ring, i); in fm10k_clean_tx_irq()
1232 i -= tx_ring->count; in fm10k_clean_tx_irq()
1259 dma_unmap_single(tx_ring->dev, in fm10k_clean_tx_irq()
1274 i -= tx_ring->count; in fm10k_clean_tx_irq()
1275 tx_buffer = tx_ring->tx_buffer; in fm10k_clean_tx_irq()
1276 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_clean_tx_irq()
1281 dma_unmap_page(tx_ring->dev, in fm10k_clean_tx_irq()
1294 i -= tx_ring->count; in fm10k_clean_tx_irq()
1295 tx_buffer = tx_ring->tx_buffer; in fm10k_clean_tx_irq()
1296 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_clean_tx_irq()
1306 i += tx_ring->count; in fm10k_clean_tx_irq()
1307 tx_ring->next_to_clean = i; in fm10k_clean_tx_irq()
1308 u64_stats_update_begin(&tx_ring->syncp); in fm10k_clean_tx_irq()
1309 tx_ring->stats.bytes += total_bytes; in fm10k_clean_tx_irq()
1310 tx_ring->stats.packets += total_packets; in fm10k_clean_tx_irq()
1311 u64_stats_update_end(&tx_ring->syncp); in fm10k_clean_tx_irq()
1315 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { in fm10k_clean_tx_irq()
1319 netif_err(interface, drv, tx_ring->netdev, in fm10k_clean_tx_irq()
1325 tx_ring->queue_index, in fm10k_clean_tx_irq()
1326 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), in fm10k_clean_tx_irq()
1327 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), in fm10k_clean_tx_irq()
1328 tx_ring->next_to_use, i); in fm10k_clean_tx_irq()
1330 netif_stop_subqueue(tx_ring->netdev, in fm10k_clean_tx_irq()
1331 tx_ring->queue_index); in fm10k_clean_tx_irq()
1333 netif_info(interface, probe, tx_ring->netdev, in fm10k_clean_tx_irq()
1336 tx_ring->queue_index); in fm10k_clean_tx_irq()
1345 netdev_tx_completed_queue(txring_txq(tx_ring), in fm10k_clean_tx_irq()
1349 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in fm10k_clean_tx_irq()
1350 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in fm10k_clean_tx_irq()
1355 if (__netif_subqueue_stopped(tx_ring->netdev, in fm10k_clean_tx_irq()
1356 tx_ring->queue_index) && in fm10k_clean_tx_irq()
1358 netif_wake_subqueue(tx_ring->netdev, in fm10k_clean_tx_irq()
1359 tx_ring->queue_index); in fm10k_clean_tx_irq()
1360 ++tx_ring->tx_stats.restart_queue; in fm10k_clean_tx_irq()
1628 interface->tx_ring[txr_idx] = ring; in fm10k_alloc_q_vector()
1689 interface->tx_ring[ring->queue_index] = NULL; in fm10k_free_q_vector()
1871 interface->tx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1872 interface->tx_ring[offset + i]->qos_pc = pc; in fm10k_cache_ring_qos()
1896 interface->tx_ring[i]->reg_idx = i; in fm10k_cache_ring_rss()