Lines Matching refs:tx_ring
106 struct e1000_tx_ring *tx_ring);
125 struct e1000_tx_ring *tx_ring);
139 struct e1000_tx_ring *tx_ring);
1245 kfree(adapter->tx_ring); in e1000_probe()
1283 kfree(adapter->tx_ring); in e1000_remove()
1336 adapter->tx_ring = kcalloc(adapter->num_tx_queues, in e1000_alloc_queues()
1338 if (!adapter->tx_ring) in e1000_alloc_queues()
1344 kfree(adapter->tx_ring); in e1000_alloc_queues()
1578 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); in e1000_setup_all_tx_resources()
1583 &adapter->tx_ring[i]); in e1000_setup_all_tx_resources()
1609 tdba = adapter->tx_ring[0].dma; in e1000_configure_tx()
1610 tdlen = adapter->tx_ring[0].count * in e1000_configure_tx()
1617 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? in e1000_configure_tx()
1619 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? in e1000_configure_tx()
1929 struct e1000_tx_ring *tx_ring) in e1000_free_tx_resources() argument
1933 e1000_clean_tx_ring(adapter, tx_ring); in e1000_free_tx_resources()
1935 vfree(tx_ring->buffer_info); in e1000_free_tx_resources()
1936 tx_ring->buffer_info = NULL; in e1000_free_tx_resources()
1938 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in e1000_free_tx_resources()
1939 tx_ring->dma); in e1000_free_tx_resources()
1941 tx_ring->desc = NULL; in e1000_free_tx_resources()
1955 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); in e1000_free_all_tx_resources()
1986 struct e1000_tx_ring *tx_ring) in e1000_clean_tx_ring() argument
1995 for (i = 0; i < tx_ring->count; i++) { in e1000_clean_tx_ring()
1996 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_ring()
2001 size = sizeof(struct e1000_tx_buffer) * tx_ring->count; in e1000_clean_tx_ring()
2002 memset(tx_ring->buffer_info, 0, size); in e1000_clean_tx_ring()
2006 memset(tx_ring->desc, 0, tx_ring->size); in e1000_clean_tx_ring()
2008 tx_ring->next_to_use = 0; in e1000_clean_tx_ring()
2009 tx_ring->next_to_clean = 0; in e1000_clean_tx_ring()
2010 tx_ring->last_tx_tso = false; in e1000_clean_tx_ring()
2012 writel(0, hw->hw_addr + tx_ring->tdh); in e1000_clean_tx_ring()
2013 writel(0, hw->hw_addr + tx_ring->tdt); in e1000_clean_tx_ring()
2025 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); in e1000_clean_all_tx_rings()
2436 struct e1000_tx_ring *txdr = adapter->tx_ring; in e1000_watchdog()
2701 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, in e1000_tso() argument
2747 i = tx_ring->next_to_use; in e1000_tso()
2748 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); in e1000_tso()
2749 buffer_info = &tx_ring->buffer_info[i]; in e1000_tso()
2764 if (++i == tx_ring->count) i = 0; in e1000_tso()
2765 tx_ring->next_to_use = i; in e1000_tso()
2773 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, in e1000_tx_csum() argument
2804 i = tx_ring->next_to_use; in e1000_tx_csum()
2805 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_csum()
2806 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); in e1000_tx_csum()
2819 if (unlikely(++i == tx_ring->count)) i = 0; in e1000_tx_csum()
2820 tx_ring->next_to_use = i; in e1000_tx_csum()
2829 struct e1000_tx_ring *tx_ring, in e1000_tx_map() argument
2841 i = tx_ring->next_to_use; in e1000_tx_map()
2844 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
2851 if (!skb->data_len && tx_ring->last_tx_tso && in e1000_tx_map()
2853 tx_ring->last_tx_tso = false; in e1000_tx_map()
2895 if (unlikely(i == tx_ring->count)) in e1000_tx_map()
2910 if (unlikely(i == tx_ring->count)) in e1000_tx_map()
2913 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
2952 tx_ring->buffer_info[i].skb = skb; in e1000_tx_map()
2953 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
2954 tx_ring->buffer_info[i].bytecount = bytecount; in e1000_tx_map()
2955 tx_ring->buffer_info[first].next_to_watch = i; in e1000_tx_map()
2967 i += tx_ring->count; in e1000_tx_map()
2969 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
2977 struct e1000_tx_ring *tx_ring, int tx_flags, in e1000_tx_queue() argument
3007 i = tx_ring->next_to_use; in e1000_tx_queue()
3010 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_queue()
3011 tx_desc = E1000_TX_DESC(*tx_ring, i); in e1000_tx_queue()
3016 if (unlikely(++i == tx_ring->count)) i = 0; in e1000_tx_queue()
3032 tx_ring->next_to_use = i; in e1000_tx_queue()
3075 struct e1000_tx_ring *tx_ring = adapter->tx_ring; in __e1000_maybe_stop_tx() local
3087 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) in __e1000_maybe_stop_tx()
3097 struct e1000_tx_ring *tx_ring, int size) in e1000_maybe_stop_tx() argument
3099 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) in e1000_maybe_stop_tx()
3110 struct e1000_tx_ring *tx_ring; in e1000_xmit_frame() local
3127 tx_ring = adapter->tx_ring; in e1000_xmit_frame()
3187 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) in e1000_xmit_frame()
3212 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) in e1000_xmit_frame()
3229 first = tx_ring->next_to_use; in e1000_xmit_frame()
3231 tso = e1000_tso(adapter, tx_ring, skb, protocol); in e1000_xmit_frame()
3239 tx_ring->last_tx_tso = true; in e1000_xmit_frame()
3241 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) in e1000_xmit_frame()
3250 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, in e1000_xmit_frame()
3257 e1000_tx_queue(adapter, tx_ring, tx_flags, count); in e1000_xmit_frame()
3259 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); in e1000_xmit_frame()
3263 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); in e1000_xmit_frame()
3272 tx_ring->buffer_info[first].time_stamp = 0; in e1000_xmit_frame()
3273 tx_ring->next_to_use = first; in e1000_xmit_frame()
3352 struct e1000_tx_ring *tx_ring = adapter->tx_ring; in e1000_dump() local
3398 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000_dump()
3399 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); in e1000_dump()
3400 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; in e1000_dump()
3405 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) in e1000_dump()
3407 else if (i == tx_ring->next_to_use) in e1000_dump()
3409 else if (i == tx_ring->next_to_clean) in e1000_dump()
3812 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); in e1000_clean()
3836 struct e1000_tx_ring *tx_ring) in e1000_clean_tx_irq() argument
3847 i = tx_ring->next_to_clean; in e1000_clean_tx_irq()
3848 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
3849 eop_desc = E1000_TX_DESC(*tx_ring, eop); in e1000_clean_tx_irq()
3852 (count < tx_ring->count)) { in e1000_clean_tx_irq()
3856 tx_desc = E1000_TX_DESC(*tx_ring, i); in e1000_clean_tx_irq()
3857 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_irq()
3872 if (unlikely(++i == tx_ring->count)) i = 0; in e1000_clean_tx_irq()
3875 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
3876 eop_desc = E1000_TX_DESC(*tx_ring, eop); in e1000_clean_tx_irq()
3879 tx_ring->next_to_clean = i; in e1000_clean_tx_irq()
3885 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { in e1000_clean_tx_irq()
3903 if (tx_ring->buffer_info[eop].time_stamp && in e1000_clean_tx_irq()
3904 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + in e1000_clean_tx_irq()
3920 (unsigned long)(tx_ring - adapter->tx_ring), in e1000_clean_tx_irq()
3921 readl(hw->hw_addr + tx_ring->tdh), in e1000_clean_tx_irq()
3922 readl(hw->hw_addr + tx_ring->tdt), in e1000_clean_tx_irq()
3923 tx_ring->next_to_use, in e1000_clean_tx_irq()
3924 tx_ring->next_to_clean, in e1000_clean_tx_irq()
3925 tx_ring->buffer_info[eop].time_stamp, in e1000_clean_tx_irq()
3937 return count < tx_ring->count; in e1000_clean_tx_irq()