Lines Matching refs:tx_ring

784 static int fm10k_tso(struct fm10k_ring *tx_ring,  in fm10k_tso()  argument
817 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso()
823 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso()
825 netdev_err(tx_ring->netdev, in fm10k_tso()
830 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument
850 dev_warn(tx_ring->dev, in fm10k_tx_csum()
852 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
871 dev_warn(tx_ring->dev, in fm10k_tx_csum()
875 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
888 dev_warn(tx_ring->dev, in fm10k_tx_csum()
892 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
898 tx_ring->tx_stats.csum_good++; in fm10k_tx_csum()
902 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tx_csum()
929 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, in fm10k_tx_desc_push() argument
943 return i == tx_ring->count; in fm10k_tx_desc_push()
946 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) in __fm10k_maybe_stop_tx() argument
948 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __fm10k_maybe_stop_tx()
954 if (likely(fm10k_desc_unused(tx_ring) < size)) in __fm10k_maybe_stop_tx()
958 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __fm10k_maybe_stop_tx()
959 ++tx_ring->tx_stats.restart_queue; in __fm10k_maybe_stop_tx()
963 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) in fm10k_maybe_stop_tx() argument
965 if (likely(fm10k_desc_unused(tx_ring) >= size)) in fm10k_maybe_stop_tx()
967 return __fm10k_maybe_stop_tx(tx_ring, size); in fm10k_maybe_stop_tx()
970 static void fm10k_tx_map(struct fm10k_ring *tx_ring, in fm10k_tx_map() argument
981 u16 i = tx_ring->next_to_use; in fm10k_tx_map()
984 tx_desc = FM10K_TX_DESC(tx_ring, i); in fm10k_tx_map()
995 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); in fm10k_tx_map()
1001 if (dma_mapping_error(tx_ring->dev, dma)) in fm10k_tx_map()
1009 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, in fm10k_tx_map()
1011 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_tx_map()
1022 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, in fm10k_tx_map()
1024 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_tx_map()
1031 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in fm10k_tx_map()
1034 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_tx_map()
1040 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) in fm10k_tx_map()
1044 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in fm10k_tx_map()
1061 tx_ring->next_to_use = i; in fm10k_tx_map()
1064 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); in fm10k_tx_map()
1067 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { in fm10k_tx_map()
1068 writel(i, tx_ring->tail); in fm10k_tx_map()
1078 dev_err(tx_ring->dev, "TX DMA map failed\n"); in fm10k_tx_map()
1082 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_tx_map()
1083 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); in fm10k_tx_map()
1087 i = tx_ring->count; in fm10k_tx_map()
1091 tx_ring->next_to_use = i; in fm10k_tx_map()
1095 struct fm10k_ring *tx_ring) in fm10k_xmit_frame_ring() argument
1111 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { in fm10k_xmit_frame_ring()
1112 tx_ring->tx_stats.tx_busy++; in fm10k_xmit_frame_ring()
1117 first = &tx_ring->tx_buffer[tx_ring->next_to_use]; in fm10k_xmit_frame_ring()
1125 tso = fm10k_tso(tx_ring, first); in fm10k_xmit_frame_ring()
1129 fm10k_tx_csum(tx_ring, first); in fm10k_xmit_frame_ring()
1131 fm10k_tx_map(tx_ring, first); in fm10k_xmit_frame_ring()
1156 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) in fm10k_check_tx_hang() argument
1158 u32 tx_done = fm10k_get_tx_completed(tx_ring); in fm10k_check_tx_hang()
1159 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in fm10k_check_tx_hang()
1160 u32 tx_pending = fm10k_get_tx_pending(tx_ring); in fm10k_check_tx_hang()
1162 clear_check_for_tx_hang(tx_ring); in fm10k_check_tx_hang()
1174 tx_ring->tx_stats.tx_done_old = tx_done; in fm10k_check_tx_hang()
1176 clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); in fm10k_check_tx_hang()
1182 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); in fm10k_check_tx_hang()
1205 struct fm10k_ring *tx_ring) in fm10k_clean_tx_irq() argument
1212 unsigned int i = tx_ring->next_to_clean; in fm10k_clean_tx_irq()
1217 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_clean_tx_irq()
1218 tx_desc = FM10K_TX_DESC(tx_ring, i); in fm10k_clean_tx_irq()
1219 i -= tx_ring->count; in fm10k_clean_tx_irq()
1246 dma_unmap_single(tx_ring->dev, in fm10k_clean_tx_irq()
1261 i -= tx_ring->count; in fm10k_clean_tx_irq()
1262 tx_buffer = tx_ring->tx_buffer; in fm10k_clean_tx_irq()
1263 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_clean_tx_irq()
1268 dma_unmap_page(tx_ring->dev, in fm10k_clean_tx_irq()
1281 i -= tx_ring->count; in fm10k_clean_tx_irq()
1282 tx_buffer = tx_ring->tx_buffer; in fm10k_clean_tx_irq()
1283 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_clean_tx_irq()
1293 i += tx_ring->count; in fm10k_clean_tx_irq()
1294 tx_ring->next_to_clean = i; in fm10k_clean_tx_irq()
1295 u64_stats_update_begin(&tx_ring->syncp); in fm10k_clean_tx_irq()
1296 tx_ring->stats.bytes += total_bytes; in fm10k_clean_tx_irq()
1297 tx_ring->stats.packets += total_packets; in fm10k_clean_tx_irq()
1298 u64_stats_update_end(&tx_ring->syncp); in fm10k_clean_tx_irq()
1302 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { in fm10k_clean_tx_irq()
1306 netif_err(interface, drv, tx_ring->netdev, in fm10k_clean_tx_irq()
1312 tx_ring->queue_index, in fm10k_clean_tx_irq()
1313 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), in fm10k_clean_tx_irq()
1314 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), in fm10k_clean_tx_irq()
1315 tx_ring->next_to_use, i); in fm10k_clean_tx_irq()
1317 netif_stop_subqueue(tx_ring->netdev, in fm10k_clean_tx_irq()
1318 tx_ring->queue_index); in fm10k_clean_tx_irq()
1320 netif_info(interface, probe, tx_ring->netdev, in fm10k_clean_tx_irq()
1323 tx_ring->queue_index); in fm10k_clean_tx_irq()
1332 netdev_tx_completed_queue(txring_txq(tx_ring), in fm10k_clean_tx_irq()
1336 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in fm10k_clean_tx_irq()
1337 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in fm10k_clean_tx_irq()
1342 if (__netif_subqueue_stopped(tx_ring->netdev, in fm10k_clean_tx_irq()
1343 tx_ring->queue_index) && in fm10k_clean_tx_irq()
1345 netif_wake_subqueue(tx_ring->netdev, in fm10k_clean_tx_irq()
1346 tx_ring->queue_index); in fm10k_clean_tx_irq()
1347 ++tx_ring->tx_stats.restart_queue; in fm10k_clean_tx_irq()
1618 interface->tx_ring[txr_idx] = ring; in fm10k_alloc_q_vector()
1679 interface->tx_ring[ring->queue_index] = NULL; in fm10k_free_q_vector()
1861 interface->tx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1862 interface->tx_ring[offset + i]->qos_pc = pc; in fm10k_cache_ring_qos()
1886 interface->tx_ring[i]->reg_idx = i; in fm10k_cache_ring_rss()