Lines Matching refs:tx_ring
369 struct igb_ring *tx_ring; in igb_dump() local
404 tx_ring = adapter->tx_ring[n]; in igb_dump()
405 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igb_dump()
407 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igb_dump()
432 tx_ring = adapter->tx_ring[n]; in igb_dump()
434 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in igb_dump()
438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igb_dump()
441 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_dump()
442 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_dump()
444 if (i == tx_ring->next_to_use && in igb_dump()
445 i == tx_ring->next_to_clean) in igb_dump()
447 else if (i == tx_ring->next_to_use) in igb_dump()
449 else if (i == tx_ring->next_to_clean) in igb_dump()
741 adapter->tx_ring[j]->reg_idx = rbase_offset + j; in igb_cache_ring_register()
1038 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igb_reset_q_vector()
1273 adapter->tx_ring[txr_idx] = ring; in igb_alloc_q_vector()
3175 int igb_setup_tx_resources(struct igb_ring *tx_ring) in igb_setup_tx_resources() argument
3177 struct device *dev = tx_ring->dev; in igb_setup_tx_resources()
3180 size = sizeof(struct igb_tx_buffer) * tx_ring->count; in igb_setup_tx_resources()
3182 tx_ring->tx_buffer_info = vzalloc(size); in igb_setup_tx_resources()
3183 if (!tx_ring->tx_buffer_info) in igb_setup_tx_resources()
3187 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igb_setup_tx_resources()
3188 tx_ring->size = ALIGN(tx_ring->size, 4096); in igb_setup_tx_resources()
3190 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igb_setup_tx_resources()
3191 &tx_ring->dma, GFP_KERNEL); in igb_setup_tx_resources()
3192 if (!tx_ring->desc) in igb_setup_tx_resources()
3195 tx_ring->next_to_use = 0; in igb_setup_tx_resources()
3196 tx_ring->next_to_clean = 0; in igb_setup_tx_resources()
3201 vfree(tx_ring->tx_buffer_info); in igb_setup_tx_resources()
3202 tx_ring->tx_buffer_info = NULL; in igb_setup_tx_resources()
3220 err = igb_setup_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3225 igb_free_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3308 igb_configure_tx_ring(adapter, adapter->tx_ring[i]); in igb_configure_tx()
3699 void igb_free_tx_resources(struct igb_ring *tx_ring) in igb_free_tx_resources() argument
3701 igb_clean_tx_ring(tx_ring); in igb_free_tx_resources()
3703 vfree(tx_ring->tx_buffer_info); in igb_free_tx_resources()
3704 tx_ring->tx_buffer_info = NULL; in igb_free_tx_resources()
3707 if (!tx_ring->desc) in igb_free_tx_resources()
3710 dma_free_coherent(tx_ring->dev, tx_ring->size, in igb_free_tx_resources()
3711 tx_ring->desc, tx_ring->dma); in igb_free_tx_resources()
3713 tx_ring->desc = NULL; in igb_free_tx_resources()
3727 if (adapter->tx_ring[i]) in igb_free_all_tx_resources()
3728 igb_free_tx_resources(adapter->tx_ring[i]); in igb_free_all_tx_resources()
3757 static void igb_clean_tx_ring(struct igb_ring *tx_ring) in igb_clean_tx_ring() argument
3763 if (!tx_ring->tx_buffer_info) in igb_clean_tx_ring()
3767 for (i = 0; i < tx_ring->count; i++) { in igb_clean_tx_ring()
3768 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_ring()
3769 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); in igb_clean_tx_ring()
3772 netdev_tx_reset_queue(txring_txq(tx_ring)); in igb_clean_tx_ring()
3774 size = sizeof(struct igb_tx_buffer) * tx_ring->count; in igb_clean_tx_ring()
3775 memset(tx_ring->tx_buffer_info, 0, size); in igb_clean_tx_ring()
3778 memset(tx_ring->desc, 0, tx_ring->size); in igb_clean_tx_ring()
3780 tx_ring->next_to_use = 0; in igb_clean_tx_ring()
3781 tx_ring->next_to_clean = 0; in igb_clean_tx_ring()
3793 if (adapter->tx_ring[i]) in igb_clean_all_tx_rings()
3794 igb_clean_tx_ring(adapter->tx_ring[i]); in igb_clean_all_tx_rings()
4362 struct igb_ring *tx_ring = adapter->tx_ring[i]; in igb_watchdog_task() local
4369 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { in igb_watchdog_task()
4378 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_watchdog_task()
4619 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, in igb_tx_ctxtdesc() argument
4623 u16 i = tx_ring->next_to_use; in igb_tx_ctxtdesc()
4625 context_desc = IGB_TX_CTXTDESC(tx_ring, i); in igb_tx_ctxtdesc()
4628 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igb_tx_ctxtdesc()
4634 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_ctxtdesc()
4635 mss_l4len_idx |= tx_ring->reg_idx << 4; in igb_tx_ctxtdesc()
4643 static int igb_tso(struct igb_ring *tx_ring, in igb_tso() argument
4703 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); in igb_tso()
4708 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) in igb_tx_csum() argument
4733 dev_warn(tx_ring->dev, in igb_tx_csum()
4757 dev_warn(tx_ring->dev, in igb_tx_csum()
4771 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); in igb_tx_csum()
4804 static void igb_tx_olinfo_status(struct igb_ring *tx_ring, in igb_tx_olinfo_status() argument
4811 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_olinfo_status()
4812 olinfo_status |= tx_ring->reg_idx << 4; in igb_tx_olinfo_status()
4827 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) in __igb_maybe_stop_tx() argument
4829 struct net_device *netdev = tx_ring->netdev; in __igb_maybe_stop_tx()
4831 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
4842 if (igb_desc_unused(tx_ring) < size) in __igb_maybe_stop_tx()
4846 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
4848 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
4849 tx_ring->tx_stats.restart_queue2++; in __igb_maybe_stop_tx()
4850 u64_stats_update_end(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
4855 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) in igb_maybe_stop_tx() argument
4857 if (igb_desc_unused(tx_ring) >= size) in igb_maybe_stop_tx()
4859 return __igb_maybe_stop_tx(tx_ring, size); in igb_maybe_stop_tx()
4862 static void igb_tx_map(struct igb_ring *tx_ring, in igb_tx_map() argument
4874 u16 i = tx_ring->next_to_use; in igb_tx_map()
4876 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_tx_map()
4878 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igb_tx_map()
4883 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igb_tx_map()
4888 if (dma_mapping_error(tx_ring->dev, dma)) in igb_tx_map()
4903 if (i == tx_ring->count) { in igb_tx_map()
4904 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_tx_map()
4922 if (i == tx_ring->count) { in igb_tx_map()
4923 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_tx_map()
4931 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igb_tx_map()
4934 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
4941 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igb_tx_map()
4959 if (i == tx_ring->count) in igb_tx_map()
4962 tx_ring->next_to_use = i; in igb_tx_map()
4965 igb_maybe_stop_tx(tx_ring, DESC_NEEDED); in igb_tx_map()
4967 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { in igb_tx_map()
4968 writel(i, tx_ring->tail); in igb_tx_map()
4978 dev_err(tx_ring->dev, "TX DMA map failed\n"); in igb_tx_map()
4982 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
4983 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); in igb_tx_map()
4987 i = tx_ring->count; in igb_tx_map()
4991 tx_ring->next_to_use = i; in igb_tx_map()
4995 struct igb_ring *tx_ring) in igb_xmit_frame_ring() argument
5014 if (igb_maybe_stop_tx(tx_ring, count + 3)) { in igb_xmit_frame_ring()
5020 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igb_xmit_frame_ring()
5026 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring()
5051 tso = igb_tso(tx_ring, first, &hdr_len); in igb_xmit_frame_ring()
5055 igb_tx_csum(tx_ring, first); in igb_xmit_frame_ring()
5057 igb_tx_map(tx_ring, first, hdr_len); in igb_xmit_frame_ring()
5062 igb_unmap_and_free_tx_resource(tx_ring, first); in igb_xmit_frame_ring()
5075 return adapter->tx_ring[r_idx]; in igb_tx_queue_mapping()
5254 struct igb_ring *ring = adapter->tx_ring[i]; in igb_update_stats()
5539 struct igb_ring *tx_ring, in igb_update_tx_dca() argument
5543 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); in igb_update_tx_dca()
5556 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); in igb_update_tx_dca()
6403 struct igb_ring *tx_ring = q_vector->tx.ring; in igb_clean_tx_irq() local
6408 unsigned int i = tx_ring->next_to_clean; in igb_clean_tx_irq()
6413 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_irq()
6414 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_clean_tx_irq()
6415 i -= tx_ring->count; in igb_clean_tx_irq()
6442 dma_unmap_single(tx_ring->dev, in igb_clean_tx_irq()
6457 i -= tx_ring->count; in igb_clean_tx_irq()
6458 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
6459 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_clean_tx_irq()
6464 dma_unmap_page(tx_ring->dev, in igb_clean_tx_irq()
6477 i -= tx_ring->count; in igb_clean_tx_irq()
6478 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
6479 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_clean_tx_irq()
6489 netdev_tx_completed_queue(txring_txq(tx_ring), in igb_clean_tx_irq()
6491 i += tx_ring->count; in igb_clean_tx_irq()
6492 tx_ring->next_to_clean = i; in igb_clean_tx_irq()
6493 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6494 tx_ring->tx_stats.bytes += total_bytes; in igb_clean_tx_irq()
6495 tx_ring->tx_stats.packets += total_packets; in igb_clean_tx_irq()
6496 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6500 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igb_clean_tx_irq()
6506 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_clean_tx_irq()
6513 dev_err(tx_ring->dev, in igb_clean_tx_irq()
6525 tx_ring->queue_index, in igb_clean_tx_irq()
6526 rd32(E1000_TDH(tx_ring->reg_idx)), in igb_clean_tx_irq()
6527 readl(tx_ring->tail), in igb_clean_tx_irq()
6528 tx_ring->next_to_use, in igb_clean_tx_irq()
6529 tx_ring->next_to_clean, in igb_clean_tx_irq()
6534 netif_stop_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
6535 tx_ring->queue_index); in igb_clean_tx_irq()
6544 netif_carrier_ok(tx_ring->netdev) && in igb_clean_tx_irq()
6545 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { in igb_clean_tx_irq()
6550 if (__netif_subqueue_stopped(tx_ring->netdev, in igb_clean_tx_irq()
6551 tx_ring->queue_index) && in igb_clean_tx_irq()
6553 netif_wake_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
6554 tx_ring->queue_index); in igb_clean_tx_irq()
6556 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6557 tx_ring->tx_stats.restart_queue++; in igb_clean_tx_irq()
6558 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()