Lines Matching refs:txq
279 #define IS_TSO_HEADER(txq, addr) \ argument
280 ((addr >= txq->tso_hdrs_phys) && \
281 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
558 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument
560 txq->txq_get_index++; in mvneta_txq_inc_get()
561 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get()
562 txq->txq_get_index = 0; in mvneta_txq_inc_get()
566 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument
568 txq->txq_put_index++; in mvneta_txq_inc_put()
569 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put()
570 txq->txq_put_index = 0; in mvneta_txq_inc_put()
747 struct mvneta_tx_queue *txq, in mvneta_txq_pend_desc_add() argument
756 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
761 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) in mvneta_txq_next_desc_get() argument
763 int tx_desc = txq->next_desc_to_proc; in mvneta_txq_next_desc_get()
765 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); in mvneta_txq_next_desc_get()
766 return txq->descs + tx_desc; in mvneta_txq_next_desc_get()
772 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) in mvneta_txq_desc_put() argument
774 if (txq->next_desc_to_proc == 0) in mvneta_txq_desc_put()
775 txq->next_desc_to_proc = txq->last_desc - 1; in mvneta_txq_desc_put()
777 txq->next_desc_to_proc--; in mvneta_txq_desc_put()
815 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up() local
816 if (txq->descs != NULL) in mvneta_port_up()
1212 struct mvneta_tx_queue *txq, u32 value) in mvneta_tx_done_pkts_coal_set() argument
1216 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1221 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1223 txq->done_pkts_coal = value; in mvneta_tx_done_pkts_coal_set()
1236 struct mvneta_tx_queue *txq, in mvneta_txq_sent_desc_dec() argument
1244 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1249 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1254 struct mvneta_tx_queue *txq) in mvneta_txq_sent_desc_num_get() argument
1259 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1270 struct mvneta_tx_queue *txq) in mvneta_txq_sent_desc_proc() argument
1275 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); in mvneta_txq_sent_desc_proc()
1279 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); in mvneta_txq_sent_desc_proc()
1374 struct mvneta_tx_queue *txq, int num) in mvneta_txq_bufs_free() argument
1379 struct mvneta_tx_desc *tx_desc = txq->descs + in mvneta_txq_bufs_free()
1380 txq->txq_get_index; in mvneta_txq_bufs_free()
1381 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; in mvneta_txq_bufs_free()
1383 mvneta_txq_inc_get(txq); in mvneta_txq_bufs_free()
1385 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) in mvneta_txq_bufs_free()
1397 struct mvneta_tx_queue *txq) in mvneta_txq_done() argument
1399 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1402 tx_done = mvneta_txq_sent_desc_proc(pp, txq); in mvneta_txq_done()
1406 mvneta_txq_bufs_free(pp, txq, tx_done); in mvneta_txq_done()
1408 txq->count -= tx_done; in mvneta_txq_done()
1411 if (txq->count <= txq->tx_wake_threshold) in mvneta_txq_done()
1625 struct mvneta_port *pp, struct mvneta_tx_queue *txq) in mvneta_tso_put_hdr() argument
1630 txq->tx_skb[txq->txq_put_index] = NULL; in mvneta_tso_put_hdr()
1631 tx_desc = mvneta_txq_next_desc_get(txq); in mvneta_tso_put_hdr()
1635 tx_desc->buf_phys_addr = txq->tso_hdrs_phys + in mvneta_tso_put_hdr()
1636 txq->txq_put_index * TSO_HEADER_SIZE; in mvneta_tso_put_hdr()
1637 mvneta_txq_inc_put(txq); in mvneta_tso_put_hdr()
1641 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, in mvneta_tso_put_data() argument
1647 tx_desc = mvneta_txq_next_desc_get(txq); in mvneta_tso_put_data()
1653 mvneta_txq_desc_put(txq); in mvneta_tso_put_data()
1658 txq->tx_skb[txq->txq_put_index] = NULL; in mvneta_tso_put_data()
1666 txq->tx_skb[txq->txq_put_index] = skb; in mvneta_tso_put_data()
1668 mvneta_txq_inc_put(txq); in mvneta_tso_put_data()
1673 struct mvneta_tx_queue *txq) in mvneta_tx_tso() argument
1683 if ((txq->count + tso_count_descs(skb)) >= txq->size) in mvneta_tx_tso()
1703 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; in mvneta_tx_tso()
1706 mvneta_tso_put_hdr(skb, pp, txq); in mvneta_tx_tso()
1714 if (mvneta_tso_put_data(dev, txq, skb, in mvneta_tx_tso()
1732 struct mvneta_tx_desc *tx_desc = txq->descs + i; in mvneta_tx_tso()
1733 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) in mvneta_tx_tso()
1738 mvneta_txq_desc_put(txq); in mvneta_tx_tso()
1745 struct mvneta_tx_queue *txq) in mvneta_tx_frag_process() argument
1754 tx_desc = mvneta_txq_next_desc_get(txq); in mvneta_tx_frag_process()
1763 mvneta_txq_desc_put(txq); in mvneta_tx_frag_process()
1770 txq->tx_skb[txq->txq_put_index] = skb; in mvneta_tx_frag_process()
1774 txq->tx_skb[txq->txq_put_index] = NULL; in mvneta_tx_frag_process()
1776 mvneta_txq_inc_put(txq); in mvneta_tx_frag_process()
1786 tx_desc = txq->descs + i; in mvneta_tx_frag_process()
1791 mvneta_txq_desc_put(txq); in mvneta_tx_frag_process()
1802 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx() local
1812 frags = mvneta_tx_tso(skb, dev, txq); in mvneta_tx()
1819 tx_desc = mvneta_txq_next_desc_get(txq); in mvneta_tx()
1830 mvneta_txq_desc_put(txq); in mvneta_tx()
1839 txq->tx_skb[txq->txq_put_index] = skb; in mvneta_tx()
1840 mvneta_txq_inc_put(txq); in mvneta_tx()
1844 txq->tx_skb[txq->txq_put_index] = NULL; in mvneta_tx()
1845 mvneta_txq_inc_put(txq); in mvneta_tx()
1848 if (mvneta_tx_frag_process(pp, skb, txq)) { in mvneta_tx()
1853 mvneta_txq_desc_put(txq); in mvneta_tx()
1864 txq->count += frags; in mvneta_tx()
1865 mvneta_txq_pend_desc_add(pp, txq, frags); in mvneta_tx()
1867 if (txq->count >= txq->tx_stop_threshold) in mvneta_tx()
1885 struct mvneta_tx_queue *txq) in mvneta_txq_done_force() argument
1888 int tx_done = txq->count; in mvneta_txq_done_force()
1890 mvneta_txq_bufs_free(pp, txq, tx_done); in mvneta_txq_done_force()
1893 txq->count = 0; in mvneta_txq_done_force()
1894 txq->txq_put_index = 0; in mvneta_txq_done_force()
1895 txq->txq_get_index = 0; in mvneta_txq_done_force()
1903 struct mvneta_tx_queue *txq; in mvneta_tx_done_gbe() local
1907 txq = mvneta_tx_done_policy(pp, cause_tx_done); in mvneta_tx_done_gbe()
1909 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
1912 if (txq->count) in mvneta_tx_done_gbe()
1913 mvneta_txq_done(pp, txq); in mvneta_tx_done_gbe()
1916 cause_tx_done &= ~((1 << txq->id)); in mvneta_tx_done_gbe()
2304 struct mvneta_tx_queue *txq) in mvneta_txq_init() argument
2306 txq->size = pp->tx_ring_size; in mvneta_txq_init()
2312 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; in mvneta_txq_init()
2313 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; in mvneta_txq_init()
2317 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2318 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_init()
2319 &txq->descs_phys, GFP_KERNEL); in mvneta_txq_init()
2320 if (txq->descs == NULL) in mvneta_txq_init()
2324 BUG_ON(txq->descs != in mvneta_txq_init()
2325 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); in mvneta_txq_init()
2327 txq->last_desc = txq->size - 1; in mvneta_txq_init()
2330 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_init()
2331 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_init()
2334 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_init()
2335 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_init()
2337 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); in mvneta_txq_init()
2338 if (txq->tx_skb == NULL) { in mvneta_txq_init()
2340 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_init()
2341 txq->descs, txq->descs_phys); in mvneta_txq_init()
2346 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2347 txq->size * TSO_HEADER_SIZE, in mvneta_txq_init()
2348 &txq->tso_hdrs_phys, GFP_KERNEL); in mvneta_txq_init()
2349 if (txq->tso_hdrs == NULL) { in mvneta_txq_init()
2350 kfree(txq->tx_skb); in mvneta_txq_init()
2352 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_init()
2353 txq->descs, txq->descs_phys); in mvneta_txq_init()
2356 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_init()
2363 struct mvneta_tx_queue *txq) in mvneta_txq_deinit() argument
2365 kfree(txq->tx_skb); in mvneta_txq_deinit()
2367 if (txq->tso_hdrs) in mvneta_txq_deinit()
2369 txq->size * TSO_HEADER_SIZE, in mvneta_txq_deinit()
2370 txq->tso_hdrs, txq->tso_hdrs_phys); in mvneta_txq_deinit()
2371 if (txq->descs) in mvneta_txq_deinit()
2373 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_deinit()
2374 txq->descs, txq->descs_phys); in mvneta_txq_deinit()
2376 txq->descs = NULL; in mvneta_txq_deinit()
2377 txq->last_desc = 0; in mvneta_txq_deinit()
2378 txq->next_desc_to_proc = 0; in mvneta_txq_deinit()
2379 txq->descs_phys = 0; in mvneta_txq_deinit()
2382 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_deinit()
2383 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_deinit()
2386 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_deinit()
2387 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_deinit()
2969 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce() local
2970 txq->done_pkts_coal = c->tx_max_coalesced_frames; in mvneta_ethtool_set_coalesce()
2971 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
3145 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init() local
3146 txq->id = queue; in mvneta_init()
3147 txq->size = pp->tx_ring_size; in mvneta_init()
3148 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; in mvneta_init()