Lines Matching refs:tx_ring
367 struct igb_ring *tx_ring; in igb_dump() local
402 tx_ring = adapter->tx_ring[n]; in igb_dump()
403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igb_dump()
405 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igb_dump()
430 tx_ring = adapter->tx_ring[n]; in igb_dump()
432 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in igb_dump()
436 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igb_dump()
439 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_dump()
440 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_dump()
442 if (i == tx_ring->next_to_use && in igb_dump()
443 i == tx_ring->next_to_clean) in igb_dump()
445 else if (i == tx_ring->next_to_use) in igb_dump()
447 else if (i == tx_ring->next_to_clean) in igb_dump()
739 adapter->tx_ring[j]->reg_idx = rbase_offset + j; in igb_cache_ring_register()
1036 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igb_reset_q_vector()
1271 adapter->tx_ring[txr_idx] = ring; in igb_alloc_q_vector()
3179 int igb_setup_tx_resources(struct igb_ring *tx_ring) in igb_setup_tx_resources() argument
3181 struct device *dev = tx_ring->dev; in igb_setup_tx_resources()
3184 size = sizeof(struct igb_tx_buffer) * tx_ring->count; in igb_setup_tx_resources()
3186 tx_ring->tx_buffer_info = vzalloc(size); in igb_setup_tx_resources()
3187 if (!tx_ring->tx_buffer_info) in igb_setup_tx_resources()
3191 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igb_setup_tx_resources()
3192 tx_ring->size = ALIGN(tx_ring->size, 4096); in igb_setup_tx_resources()
3194 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igb_setup_tx_resources()
3195 &tx_ring->dma, GFP_KERNEL); in igb_setup_tx_resources()
3196 if (!tx_ring->desc) in igb_setup_tx_resources()
3199 tx_ring->next_to_use = 0; in igb_setup_tx_resources()
3200 tx_ring->next_to_clean = 0; in igb_setup_tx_resources()
3205 vfree(tx_ring->tx_buffer_info); in igb_setup_tx_resources()
3206 tx_ring->tx_buffer_info = NULL; in igb_setup_tx_resources()
3224 err = igb_setup_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3229 igb_free_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3312 igb_configure_tx_ring(adapter, adapter->tx_ring[i]); in igb_configure_tx()
3703 void igb_free_tx_resources(struct igb_ring *tx_ring) in igb_free_tx_resources() argument
3705 igb_clean_tx_ring(tx_ring); in igb_free_tx_resources()
3707 vfree(tx_ring->tx_buffer_info); in igb_free_tx_resources()
3708 tx_ring->tx_buffer_info = NULL; in igb_free_tx_resources()
3711 if (!tx_ring->desc) in igb_free_tx_resources()
3714 dma_free_coherent(tx_ring->dev, tx_ring->size, in igb_free_tx_resources()
3715 tx_ring->desc, tx_ring->dma); in igb_free_tx_resources()
3717 tx_ring->desc = NULL; in igb_free_tx_resources()
3731 if (adapter->tx_ring[i]) in igb_free_all_tx_resources()
3732 igb_free_tx_resources(adapter->tx_ring[i]); in igb_free_all_tx_resources()
3761 static void igb_clean_tx_ring(struct igb_ring *tx_ring) in igb_clean_tx_ring() argument
3767 if (!tx_ring->tx_buffer_info) in igb_clean_tx_ring()
3771 for (i = 0; i < tx_ring->count; i++) { in igb_clean_tx_ring()
3772 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_ring()
3773 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); in igb_clean_tx_ring()
3776 netdev_tx_reset_queue(txring_txq(tx_ring)); in igb_clean_tx_ring()
3778 size = sizeof(struct igb_tx_buffer) * tx_ring->count; in igb_clean_tx_ring()
3779 memset(tx_ring->tx_buffer_info, 0, size); in igb_clean_tx_ring()
3782 memset(tx_ring->desc, 0, tx_ring->size); in igb_clean_tx_ring()
3784 tx_ring->next_to_use = 0; in igb_clean_tx_ring()
3785 tx_ring->next_to_clean = 0; in igb_clean_tx_ring()
3797 if (adapter->tx_ring[i]) in igb_clean_all_tx_rings()
3798 igb_clean_tx_ring(adapter->tx_ring[i]); in igb_clean_all_tx_rings()
4366 struct igb_ring *tx_ring = adapter->tx_ring[i]; in igb_watchdog_task() local
4373 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { in igb_watchdog_task()
4382 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_watchdog_task()
4623 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, in igb_tx_ctxtdesc() argument
4627 u16 i = tx_ring->next_to_use; in igb_tx_ctxtdesc()
4629 context_desc = IGB_TX_CTXTDESC(tx_ring, i); in igb_tx_ctxtdesc()
4632 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igb_tx_ctxtdesc()
4638 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_ctxtdesc()
4639 mss_l4len_idx |= tx_ring->reg_idx << 4; in igb_tx_ctxtdesc()
4647 static int igb_tso(struct igb_ring *tx_ring, in igb_tso() argument
4707 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); in igb_tso()
4712 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) in igb_tx_csum() argument
4737 dev_warn(tx_ring->dev, in igb_tx_csum()
4761 dev_warn(tx_ring->dev, in igb_tx_csum()
4775 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); in igb_tx_csum()
4808 static void igb_tx_olinfo_status(struct igb_ring *tx_ring, in igb_tx_olinfo_status() argument
4815 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_olinfo_status()
4816 olinfo_status |= tx_ring->reg_idx << 4; in igb_tx_olinfo_status()
4831 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) in __igb_maybe_stop_tx() argument
4833 struct net_device *netdev = tx_ring->netdev; in __igb_maybe_stop_tx()
4835 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
4846 if (igb_desc_unused(tx_ring) < size) in __igb_maybe_stop_tx()
4850 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
4852 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
4853 tx_ring->tx_stats.restart_queue2++; in __igb_maybe_stop_tx()
4854 u64_stats_update_end(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
4859 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) in igb_maybe_stop_tx() argument
4861 if (igb_desc_unused(tx_ring) >= size) in igb_maybe_stop_tx()
4863 return __igb_maybe_stop_tx(tx_ring, size); in igb_maybe_stop_tx()
4866 static void igb_tx_map(struct igb_ring *tx_ring, in igb_tx_map() argument
4878 u16 i = tx_ring->next_to_use; in igb_tx_map()
4880 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_tx_map()
4882 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igb_tx_map()
4887 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igb_tx_map()
4892 if (dma_mapping_error(tx_ring->dev, dma)) in igb_tx_map()
4907 if (i == tx_ring->count) { in igb_tx_map()
4908 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_tx_map()
4926 if (i == tx_ring->count) { in igb_tx_map()
4927 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_tx_map()
4935 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igb_tx_map()
4938 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
4945 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igb_tx_map()
4963 if (i == tx_ring->count) in igb_tx_map()
4966 tx_ring->next_to_use = i; in igb_tx_map()
4969 igb_maybe_stop_tx(tx_ring, DESC_NEEDED); in igb_tx_map()
4971 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { in igb_tx_map()
4972 writel(i, tx_ring->tail); in igb_tx_map()
4982 dev_err(tx_ring->dev, "TX DMA map failed\n"); in igb_tx_map()
4986 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
4987 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); in igb_tx_map()
4991 i = tx_ring->count; in igb_tx_map()
4995 tx_ring->next_to_use = i; in igb_tx_map()
4999 struct igb_ring *tx_ring) in igb_xmit_frame_ring() argument
5023 if (igb_maybe_stop_tx(tx_ring, count + 3)) { in igb_xmit_frame_ring()
5029 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igb_xmit_frame_ring()
5035 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring()
5060 tso = igb_tso(tx_ring, first, &hdr_len); in igb_xmit_frame_ring()
5064 igb_tx_csum(tx_ring, first); in igb_xmit_frame_ring()
5066 igb_tx_map(tx_ring, first, hdr_len); in igb_xmit_frame_ring()
5071 igb_unmap_and_free_tx_resource(tx_ring, first); in igb_xmit_frame_ring()
5084 return adapter->tx_ring[r_idx]; in igb_tx_queue_mapping()
5263 struct igb_ring *ring = adapter->tx_ring[i]; in igb_update_stats()
5547 struct igb_ring *tx_ring, in igb_update_tx_dca() argument
5551 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); in igb_update_tx_dca()
5564 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); in igb_update_tx_dca()
6406 struct igb_ring *tx_ring = q_vector->tx.ring; in igb_clean_tx_irq() local
6411 unsigned int i = tx_ring->next_to_clean; in igb_clean_tx_irq()
6416 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_irq()
6417 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_clean_tx_irq()
6418 i -= tx_ring->count; in igb_clean_tx_irq()
6445 dma_unmap_single(tx_ring->dev, in igb_clean_tx_irq()
6460 i -= tx_ring->count; in igb_clean_tx_irq()
6461 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
6462 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_clean_tx_irq()
6467 dma_unmap_page(tx_ring->dev, in igb_clean_tx_irq()
6480 i -= tx_ring->count; in igb_clean_tx_irq()
6481 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
6482 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_clean_tx_irq()
6492 netdev_tx_completed_queue(txring_txq(tx_ring), in igb_clean_tx_irq()
6494 i += tx_ring->count; in igb_clean_tx_irq()
6495 tx_ring->next_to_clean = i; in igb_clean_tx_irq()
6496 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6497 tx_ring->tx_stats.bytes += total_bytes; in igb_clean_tx_irq()
6498 tx_ring->tx_stats.packets += total_packets; in igb_clean_tx_irq()
6499 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6503 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igb_clean_tx_irq()
6509 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_clean_tx_irq()
6516 dev_err(tx_ring->dev, in igb_clean_tx_irq()
6528 tx_ring->queue_index, in igb_clean_tx_irq()
6529 rd32(E1000_TDH(tx_ring->reg_idx)), in igb_clean_tx_irq()
6530 readl(tx_ring->tail), in igb_clean_tx_irq()
6531 tx_ring->next_to_use, in igb_clean_tx_irq()
6532 tx_ring->next_to_clean, in igb_clean_tx_irq()
6537 netif_stop_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
6538 tx_ring->queue_index); in igb_clean_tx_irq()
6547 netif_carrier_ok(tx_ring->netdev) && in igb_clean_tx_irq()
6548 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { in igb_clean_tx_irq()
6553 if (__netif_subqueue_stopped(tx_ring->netdev, in igb_clean_tx_irq()
6554 tx_ring->queue_index) && in igb_clean_tx_irq()
6556 netif_wake_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
6557 tx_ring->queue_index); in igb_clean_tx_irq()
6559 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6560 tx_ring->tx_stats.restart_queue++; in igb_clean_tx_irq()
6561 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()