Lines Matching refs:efx
67 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in efx_dequeue_buffer()
82 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in efx_dequeue_buffer()
97 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) in efx_max_tx_len() argument
108 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) in efx_max_tx_len()
114 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) in efx_tx_max_skb_descs() argument
124 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0) in efx_tx_max_skb_descs()
139 struct efx_nic *efx = txq1->efx; in efx_tx_maybe_stop_queue() local
144 if (likely(fill_level < efx->txq_stop_thresh)) in efx_tx_maybe_stop_queue()
168 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); in efx_tx_maybe_stop_queue()
169 if (likely(fill_level < efx->txq_stop_thresh)) { in efx_tx_maybe_stop_queue()
171 if (likely(!efx->loopback_selftest)) in efx_tx_maybe_stop_queue()
186 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, in efx_memcpy_toio_aligned() argument
208 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, in efx_memcpy_toio_aligned_cb() argument
232 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); in efx_memcpy_toio_aligned_cb()
235 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, in efx_flush_copy_buffer() argument
247 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, in efx_skb_copy_bits_to_pio() argument
253 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), in efx_skb_copy_bits_to_pio()
262 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, in efx_skb_copy_bits_to_pio()
290 efx_skb_copy_bits_to_pio(tx_queue->efx, skb, in efx_enqueue_skb_pio()
292 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); in efx_enqueue_skb_pio()
335 struct efx_nic *efx = tx_queue->efx; in efx_enqueue_skb() local
336 struct device *dma_dev = &efx->pci_dev->dev; in efx_enqueue_skb()
353 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { in efx_enqueue_skb()
391 dma_len = efx_max_tx_len(efx, dma_addr); in efx_enqueue_skb()
454 netif_err(efx, tx_err, efx->net_dev, in efx_enqueue_skb()
493 struct efx_nic *efx = tx_queue->efx; in efx_dequeue_buffers() local
504 netif_err(efx, tx_err, efx->net_dev, in efx_dequeue_buffers()
507 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); in efx_dequeue_buffers()
530 struct efx_nic *efx = netdev_priv(net_dev); in efx_hard_start_xmit() local
538 unlikely(efx_ptp_is_ptp_tx(efx, skb))) { in efx_hard_start_xmit()
539 return efx_ptp_tx(efx, skb); in efx_hard_start_xmit()
544 if (index >= efx->n_tx_channels) { in efx_hard_start_xmit()
545 index -= efx->n_tx_channels; in efx_hard_start_xmit()
548 tx_queue = efx_get_tx_queue(efx, index, type); in efx_hard_start_xmit()
555 struct efx_nic *efx = tx_queue->efx; in efx_init_tx_queue_core_txq() local
559 netdev_get_tx_queue(efx->net_dev, in efx_init_tx_queue_core_txq()
562 efx->n_tx_channels : 0)); in efx_init_tx_queue_core_txq()
567 struct efx_nic *efx = netdev_priv(net_dev); in efx_setup_tc() local
573 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) in efx_setup_tc()
580 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; in efx_setup_tc()
581 net_dev->tc_to_txq[tc].count = efx->n_tx_channels; in efx_setup_tc()
586 efx_for_each_channel(channel, efx) { in efx_setup_tc()
608 efx->n_tx_channels); in efx_setup_tc()
625 struct efx_nic *efx = tx_queue->efx; in efx_xmit_done() local
643 likely(efx->port_enabled) && in efx_xmit_done()
644 likely(netif_device_present(efx->net_dev))) { in efx_xmit_done()
648 if (fill_level <= efx->txq_wake_thresh) in efx_xmit_done()
680 struct efx_nic *efx = tx_queue->efx; in efx_probe_tx_queue() local
685 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); in efx_probe_tx_queue()
689 netif_dbg(efx, probe, efx->net_dev, in efx_probe_tx_queue()
691 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_probe_tx_queue()
727 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_init_tx_queue()
748 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_fini_tx_queue()
773 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_remove_tx_queue()
779 efx_nic_free_buffer(tx_queue->efx, in efx_remove_tx_queue()
893 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tsoh_get_buffer()
929 struct efx_nic *efx = tx_queue->efx; in efx_tx_queue_insert() local
940 efx->txq_entries); in efx_tx_queue_insert()
944 dma_len = efx_max_tx_len(efx, dma_addr); in efx_tx_queue_insert()
973 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, in efx_tso_put_header()
976 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, in efx_tso_put_header()
1011 static int tso_start(struct tso_state *st, struct efx_nic *efx, in tso_start() argument
1014 bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; in tso_start()
1015 struct device *dma_dev = &efx->pci_dev->dev; in tso_start()
1068 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, in tso_get_fragment() argument
1071 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, in tso_get_fragment()
1073 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { in tso_get_fragment()
1265 struct efx_nic *efx = tx_queue->efx; in efx_enqueue_skb_tso() local
1273 rc = tso_start(&state, efx, skb); in efx_enqueue_skb_tso()
1281 rc = tso_get_fragment(&state, efx, in efx_enqueue_skb_tso()
1301 rc = tso_get_fragment(&state, efx, in efx_enqueue_skb_tso()
1337 netif_err(efx, tx_err, efx->net_dev, in efx_enqueue_skb_tso()
1344 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, in efx_enqueue_skb_tso()
1347 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, in efx_enqueue_skb_tso()
1353 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, in efx_enqueue_skb_tso()