Lines Matching refs:tx_queue
37 efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) in efx_tx_queue_get_insert_index() argument
39 return tx_queue->insert_count & tx_queue->ptr_mask; in efx_tx_queue_get_insert_index()
43 __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) in __efx_tx_queue_get_insert_buffer() argument
45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; in __efx_tx_queue_get_insert_buffer()
49 efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) in efx_tx_queue_get_insert_buffer() argument
52 __efx_tx_queue_get_insert_buffer(tx_queue); in efx_tx_queue_get_insert_buffer()
61 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, in efx_dequeue_buffer() argument
67 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in efx_dequeue_buffer()
82 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in efx_dequeue_buffer()
84 tx_queue->queue, tx_queue->read_count); in efx_dequeue_buffer()
93 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
271 efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in efx_enqueue_skb_pio() argument
274 efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_pio()
275 u8 __iomem *piobuf = tx_queue->piobuf; in efx_enqueue_skb_pio()
290 efx_skb_copy_bits_to_pio(tx_queue->efx, skb, in efx_enqueue_skb_pio()
292 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); in efx_enqueue_skb_pio()
300 __iowrite64_copy(tx_queue->piobuf, skb->data, in efx_enqueue_skb_pio()
310 tx_queue->piobuf_offset); in efx_enqueue_skb_pio()
311 ++tx_queue->pio_packets; in efx_enqueue_skb_pio()
312 ++tx_queue->insert_count; in efx_enqueue_skb_pio()
333 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in efx_enqueue_skb() argument
335 struct efx_nic *efx = tx_queue->efx; in efx_enqueue_skb()
338 unsigned int old_insert_count = tx_queue->insert_count; in efx_enqueue_skb()
347 return efx_enqueue_skb_tso(tx_queue, skb); in efx_enqueue_skb()
363 efx_nic_may_tx_pio(tx_queue)) { in efx_enqueue_skb()
364 buffer = efx_enqueue_skb_pio(tx_queue, skb); in efx_enqueue_skb()
389 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb()
401 ++tx_queue->insert_count; in efx_enqueue_skb()
429 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); in efx_enqueue_skb()
431 efx_tx_maybe_stop_queue(tx_queue); in efx_enqueue_skb()
434 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { in efx_enqueue_skb()
435 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); in efx_enqueue_skb()
444 efx_nic_push_buffers(tx_queue); in efx_enqueue_skb()
446 tx_queue->xmit_more_available = skb->xmit_more; in efx_enqueue_skb()
449 tx_queue->tx_packets++; in efx_enqueue_skb()
456 "fragments for DMA\n", tx_queue->queue, skb->len, in efx_enqueue_skb()
463 while (tx_queue->insert_count != old_insert_count) { in efx_enqueue_skb()
465 --tx_queue->insert_count; in efx_enqueue_skb()
466 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb()
467 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); in efx_enqueue_skb()
488 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, in efx_dequeue_buffers() argument
493 struct efx_nic *efx = tx_queue->efx; in efx_dequeue_buffers()
496 stop_index = (index + 1) & tx_queue->ptr_mask; in efx_dequeue_buffers()
497 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers()
500 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; in efx_dequeue_buffers()
506 tx_queue->queue, read_ptr); in efx_dequeue_buffers()
511 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); in efx_dequeue_buffers()
513 ++tx_queue->read_count; in efx_dequeue_buffers()
514 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers()
531 struct efx_tx_queue *tx_queue; in efx_hard_start_xmit() local
548 tx_queue = efx_get_tx_queue(efx, index, type); in efx_hard_start_xmit()
550 return efx_enqueue_skb(tx_queue, skb); in efx_hard_start_xmit()
553 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) in efx_init_tx_queue_core_txq() argument
555 struct efx_nic *efx = tx_queue->efx; in efx_init_tx_queue_core_txq()
558 tx_queue->core_txq = in efx_init_tx_queue_core_txq()
560 tx_queue->queue / EFX_TXQ_TYPES + in efx_init_tx_queue_core_txq()
561 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? in efx_init_tx_queue_core_txq()
569 struct efx_tx_queue *tx_queue; in efx_setup_tc() local
587 efx_for_each_possible_channel_tx_queue(tx_queue, in efx_setup_tc()
589 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) in efx_setup_tc()
591 if (!tx_queue->buffer) { in efx_setup_tc()
592 rc = efx_probe_tx_queue(tx_queue); in efx_setup_tc()
596 if (!tx_queue->initialised) in efx_setup_tc()
597 efx_init_tx_queue(tx_queue); in efx_setup_tc()
598 efx_init_tx_queue_core_txq(tx_queue); in efx_setup_tc()
622 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) in efx_xmit_done() argument
625 struct efx_nic *efx = tx_queue->efx; in efx_xmit_done()
629 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); in efx_xmit_done()
631 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); in efx_xmit_done()
632 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); in efx_xmit_done()
635 ++tx_queue->merge_events; in efx_xmit_done()
642 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && in efx_xmit_done()
645 txq2 = efx_tx_queue_partner(tx_queue); in efx_xmit_done()
646 fill_level = max(tx_queue->insert_count - tx_queue->read_count, in efx_xmit_done()
649 netif_tx_wake_queue(tx_queue->core_txq); in efx_xmit_done()
653 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { in efx_xmit_done()
654 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); in efx_xmit_done()
655 if (tx_queue->read_count == tx_queue->old_write_count) { in efx_xmit_done()
657 tx_queue->empty_read_count = in efx_xmit_done()
658 tx_queue->read_count | EFX_EMPTY_COUNT_VALID; in efx_xmit_done()
673 static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) in efx_tsoh_page_count() argument
675 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); in efx_tsoh_page_count()
678 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) in efx_probe_tx_queue() argument
680 struct efx_nic *efx = tx_queue->efx; in efx_probe_tx_queue()
687 tx_queue->ptr_mask = entries - 1; in efx_probe_tx_queue()
691 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_probe_tx_queue()
694 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in efx_probe_tx_queue()
696 if (!tx_queue->buffer) in efx_probe_tx_queue()
699 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { in efx_probe_tx_queue()
700 tx_queue->tsoh_page = in efx_probe_tx_queue()
701 kcalloc(efx_tsoh_page_count(tx_queue), in efx_probe_tx_queue()
702 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); in efx_probe_tx_queue()
703 if (!tx_queue->tsoh_page) { in efx_probe_tx_queue()
710 rc = efx_nic_probe_tx(tx_queue); in efx_probe_tx_queue()
717 kfree(tx_queue->tsoh_page); in efx_probe_tx_queue()
718 tx_queue->tsoh_page = NULL; in efx_probe_tx_queue()
720 kfree(tx_queue->buffer); in efx_probe_tx_queue()
721 tx_queue->buffer = NULL; in efx_probe_tx_queue()
725 void efx_init_tx_queue(struct efx_tx_queue *tx_queue) in efx_init_tx_queue() argument
727 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_init_tx_queue()
728 "initialising TX queue %d\n", tx_queue->queue); in efx_init_tx_queue()
730 tx_queue->insert_count = 0; in efx_init_tx_queue()
731 tx_queue->write_count = 0; in efx_init_tx_queue()
732 tx_queue->old_write_count = 0; in efx_init_tx_queue()
733 tx_queue->read_count = 0; in efx_init_tx_queue()
734 tx_queue->old_read_count = 0; in efx_init_tx_queue()
735 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; in efx_init_tx_queue()
736 tx_queue->xmit_more_available = false; in efx_init_tx_queue()
739 efx_nic_init_tx(tx_queue); in efx_init_tx_queue()
741 tx_queue->initialised = true; in efx_init_tx_queue()
744 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) in efx_fini_tx_queue() argument
748 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_fini_tx_queue()
749 "shutting down TX queue %d\n", tx_queue->queue); in efx_fini_tx_queue()
751 if (!tx_queue->buffer) in efx_fini_tx_queue()
755 while (tx_queue->read_count != tx_queue->write_count) { in efx_fini_tx_queue()
757 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in efx_fini_tx_queue()
758 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); in efx_fini_tx_queue()
760 ++tx_queue->read_count; in efx_fini_tx_queue()
762 tx_queue->xmit_more_available = false; in efx_fini_tx_queue()
763 netdev_tx_reset_queue(tx_queue->core_txq); in efx_fini_tx_queue()
766 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) in efx_remove_tx_queue() argument
770 if (!tx_queue->buffer) in efx_remove_tx_queue()
773 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_remove_tx_queue()
774 "destroying TX queue %d\n", tx_queue->queue); in efx_remove_tx_queue()
775 efx_nic_remove_tx(tx_queue); in efx_remove_tx_queue()
777 if (tx_queue->tsoh_page) { in efx_remove_tx_queue()
778 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) in efx_remove_tx_queue()
779 efx_nic_free_buffer(tx_queue->efx, in efx_remove_tx_queue()
780 &tx_queue->tsoh_page[i]); in efx_remove_tx_queue()
781 kfree(tx_queue->tsoh_page); in efx_remove_tx_queue()
782 tx_queue->tsoh_page = NULL; in efx_remove_tx_queue()
785 kfree(tx_queue->buffer); in efx_remove_tx_queue()
786 tx_queue->buffer = NULL; in efx_remove_tx_queue()
875 static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, in efx_tsoh_get_buffer() argument
886 (tx_queue->insert_count & tx_queue->ptr_mask) / 2; in efx_tsoh_get_buffer()
888 &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; in efx_tsoh_get_buffer()
893 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tsoh_get_buffer()
901 tx_queue->tso_long_headers++; in efx_tsoh_get_buffer()
924 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, in efx_tx_queue_insert() argument
929 struct efx_nic *efx = tx_queue->efx; in efx_tx_queue_insert()
935 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_tx_queue_insert()
936 ++tx_queue->insert_count; in efx_tx_queue_insert()
938 EFX_BUG_ON_PARANOID(tx_queue->insert_count - in efx_tx_queue_insert()
939 tx_queue->read_count >= in efx_tx_queue_insert()
969 static int efx_tso_put_header(struct efx_tx_queue *tx_queue, in efx_tso_put_header() argument
973 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, in efx_tso_put_header()
976 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, in efx_tso_put_header()
988 ++tx_queue->insert_count; in efx_tso_put_header()
996 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, in efx_enqueue_unwind() argument
1002 while (tx_queue->insert_count != insert_count) { in efx_enqueue_unwind()
1003 --tx_queue->insert_count; in efx_enqueue_unwind()
1004 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_unwind()
1005 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); in efx_enqueue_unwind()
1093 static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, in tso_fill_packet_with_fragment() argument
1114 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); in tso_fill_packet_with_fragment()
1145 static int tso_start_new_packet(struct efx_tx_queue *tx_queue, in tso_start_new_packet() argument
1150 efx_tx_queue_get_insert_buffer(tx_queue); in tso_start_new_packet()
1169 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); in tso_start_new_packet()
1196 rc = efx_tso_put_header(tx_queue, buffer, header); in tso_start_new_packet()
1215 ++tx_queue->insert_count; in tso_start_new_packet()
1220 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in tso_start_new_packet()
1235 ++tx_queue->insert_count; in tso_start_new_packet()
1243 ++tx_queue->tso_packets; in tso_start_new_packet()
1245 ++tx_queue->tx_packets; in tso_start_new_packet()
1262 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_tso() argument
1265 struct efx_nic *efx = tx_queue->efx; in efx_enqueue_skb_tso()
1266 unsigned int old_insert_count = tx_queue->insert_count; in efx_enqueue_skb_tso()
1290 if (tso_start_new_packet(tx_queue, skb, &state) < 0) in efx_enqueue_skb_tso()
1294 tso_fill_packet_with_fragment(tx_queue, skb, &state); in efx_enqueue_skb_tso()
1309 tso_start_new_packet(tx_queue, skb, &state) < 0) in efx_enqueue_skb_tso()
1313 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); in efx_enqueue_skb_tso()
1315 efx_tx_maybe_stop_queue(tx_queue); in efx_enqueue_skb_tso()
1318 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { in efx_enqueue_skb_tso()
1319 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); in efx_enqueue_skb_tso()
1328 efx_nic_push_buffers(tx_queue); in efx_enqueue_skb_tso()
1330 tx_queue->xmit_more_available = skb->xmit_more; in efx_enqueue_skb_tso()
1333 tx_queue->tso_bursts++; in efx_enqueue_skb_tso()
1356 efx_enqueue_unwind(tx_queue, old_insert_count); in efx_enqueue_skb_tso()