Lines Matching refs:tx_ring

57 	struct i40e_ring *tx_ring;  in i40e_program_fdir_filter()  local
74 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
75 dev = tx_ring->dev; in i40e_program_fdir_filter()
79 if (I40E_DESC_UNUSED(tx_ring) > 1) in i40e_program_fdir_filter()
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1)) in i40e_program_fdir_filter()
94 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_program_fdir_filter()
96 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
147 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
148 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_program_fdir_filter()
149 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
179 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
555 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) in i40e_clean_tx_ring() argument
561 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
565 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
566 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
568 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
569 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
572 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
574 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
575 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
577 if (!tx_ring->netdev) in i40e_clean_tx_ring()
581 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in i40e_clean_tx_ring()
582 tx_ring->queue_index)); in i40e_clean_tx_ring()
591 void i40e_free_tx_resources(struct i40e_ring *tx_ring) in i40e_free_tx_resources() argument
593 i40e_clean_tx_ring(tx_ring); in i40e_free_tx_resources()
594 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
595 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
597 if (tx_ring->desc) { in i40e_free_tx_resources()
598 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
599 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
600 tx_ring->desc = NULL; in i40e_free_tx_resources()
611 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) in i40e_get_head() argument
613 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; in i40e_get_head()
643 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) in i40e_check_tx_hang() argument
645 u32 tx_done = tx_ring->stats.packets; in i40e_check_tx_hang()
646 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in i40e_check_tx_hang()
647 u32 tx_pending = i40e_get_tx_pending(tx_ring); in i40e_check_tx_hang()
648 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_check_tx_hang()
651 clear_check_for_tx_hang(tx_ring); in i40e_check_tx_hang()
667 &tx_ring->state); in i40e_check_tx_hang()
671 …dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", in i40e_check_tx_hang()
672 tx_pending, tx_ring->queue_index); in i40e_check_tx_hang()
676 tx_ring->tx_stats.tx_done_old = tx_done; in i40e_check_tx_hang()
677 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); in i40e_check_tx_hang()
692 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) in i40e_clean_tx_irq() argument
694 u16 i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
701 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
702 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
703 i -= tx_ring->count; in i40e_clean_tx_irq()
705 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
732 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
748 i -= tx_ring->count; in i40e_clean_tx_irq()
749 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
750 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
755 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
768 i -= tx_ring->count; in i40e_clean_tx_irq()
769 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
770 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
779 i += tx_ring->count; in i40e_clean_tx_irq()
780 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
781 u64_stats_update_begin(&tx_ring->syncp); in i40e_clean_tx_irq()
782 tx_ring->stats.bytes += total_bytes; in i40e_clean_tx_irq()
783 tx_ring->stats.packets += total_packets; in i40e_clean_tx_irq()
784 u64_stats_update_end(&tx_ring->syncp); in i40e_clean_tx_irq()
785 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_clean_tx_irq()
786 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_clean_tx_irq()
794 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && in i40e_clean_tx_irq()
795 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) in i40e_clean_tx_irq()
796 tx_ring->arm_wb = true; in i40e_clean_tx_irq()
798 tx_ring->arm_wb = false; in i40e_clean_tx_irq()
800 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { in i40e_clean_tx_irq()
802 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" in i40e_clean_tx_irq()
807 tx_ring->vsi->seid, in i40e_clean_tx_irq()
808 tx_ring->queue_index, in i40e_clean_tx_irq()
809 tx_ring->next_to_use, i); in i40e_clean_tx_irq()
810 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" in i40e_clean_tx_irq()
813 tx_ring->tx_bi[i].time_stamp, jiffies); in i40e_clean_tx_irq()
815 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in i40e_clean_tx_irq()
817 dev_info(tx_ring->dev, in i40e_clean_tx_irq()
819 tx_ring->queue_index); in i40e_clean_tx_irq()
830 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, in i40e_clean_tx_irq()
831 tx_ring->queue_index), in i40e_clean_tx_irq()
835 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
836 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
841 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
842 tx_ring->queue_index) && in i40e_clean_tx_irq()
843 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { in i40e_clean_tx_irq()
844 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
845 tx_ring->queue_index); in i40e_clean_tx_irq()
846 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
1000 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40e_setup_tx_descriptors() argument
1002 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1008 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1009 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1010 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1014 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1018 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1019 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1020 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1021 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1022 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1024 tx_ring->size); in i40e_setup_tx_descriptors()
1028 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1029 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1033 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1034 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
1931 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_atr() argument
1935 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
1954 if (!tx_ring->atr_sample_rate) in i40e_atr()
1982 tx_ring->atr_count++; in i40e_atr()
1988 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
1991 tx_ring->atr_count = 0; in i40e_atr()
1994 i = tx_ring->next_to_use; in i40e_atr()
1995 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_atr()
1998 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2000 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & in i40e_atr()
2008 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
2049 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags() argument
2053 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags()
2061 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
2089 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) in i40e_tx_prepare_vlan_flags()
2129 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tso() argument
2187 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tsyn() argument
2202 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
2230 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
2344 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
2349 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
2356 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
2359 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
2375 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40e_maybe_stop_tx() argument
2377 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
2382 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40e_maybe_stop_tx()
2386 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
2387 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
2399 int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in i40e_maybe_stop_tx() argument
2401 static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in i40e_maybe_stop_tx()
2404 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) in i40e_maybe_stop_tx()
2406 return __i40e_maybe_stop_tx(tx_ring, size); in i40e_maybe_stop_tx()
2476 void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
2480 static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map()
2490 u16 i = tx_ring->next_to_use; in i40e_tx_map()
2512 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
2514 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_tx_map()
2518 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
2534 if (i == tx_ring->count) { in i40e_tx_map()
2535 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
2553 if (i == tx_ring->count) { in i40e_tx_map()
2554 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
2561 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
2564 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
2571 (first <= &tx_ring->tx_bi[i]) && in i40e_tx_map()
2572 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { in i40e_tx_map()
2584 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, in i40e_tx_map()
2585 tx_ring->queue_index), in i40e_tx_map()
2602 if (i == tx_ring->count) in i40e_tx_map()
2605 tx_ring->next_to_use = i; in i40e_tx_map()
2607 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40e_tx_map()
2610 netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, in i40e_tx_map()
2611 tx_ring->queue_index))) in i40e_tx_map()
2612 writel(i, tx_ring->tail); in i40e_tx_map()
2617 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
2621 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
2622 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40e_tx_map()
2626 i = tx_ring->count; in i40e_tx_map()
2630 tx_ring->next_to_use = i; in i40e_tx_map()
2644 struct i40e_ring *tx_ring) in i40e_xmit_descriptor_count() argument
2647 struct i40e_ring *tx_ring) in i40e_xmit_descriptor_count()
2663 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_descriptor_count()
2664 tx_ring->tx_stats.tx_busy++; in i40e_xmit_descriptor_count()
2678 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
2690 if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) in i40e_xmit_frame_ring()
2694 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
2701 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
2709 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, in i40e_xmit_frame_ring()
2717 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); in i40e_xmit_frame_ring()
2736 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
2739 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
2746 i40e_atr(tx_ring, skb, tx_flags, protocol); in i40e_xmit_frame_ring()
2748 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
2769 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame() local
2777 return i40e_xmit_frame_ring(skb, tx_ring); in i40e_lan_xmit_frame()