Lines Matching refs:txq

274 #define IS_TSO_HEADER(txq, addr) \  argument
275 ((addr >= txq->tso_hdrs_phys) && \
276 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
496 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument
498 txq->txq_get_index++; in mvneta_txq_inc_get()
499 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get()
500 txq->txq_get_index = 0; in mvneta_txq_inc_get()
504 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument
506 txq->txq_put_index++; in mvneta_txq_inc_put()
507 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put()
508 txq->txq_put_index = 0; in mvneta_txq_inc_put()
683 struct mvneta_tx_queue *txq, in mvneta_txq_pend_desc_add() argument
692 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
697 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) in mvneta_txq_next_desc_get() argument
699 int tx_desc = txq->next_desc_to_proc; in mvneta_txq_next_desc_get()
701 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); in mvneta_txq_next_desc_get()
702 return txq->descs + tx_desc; in mvneta_txq_next_desc_get()
708 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) in mvneta_txq_desc_put() argument
710 if (txq->next_desc_to_proc == 0) in mvneta_txq_desc_put()
711 txq->next_desc_to_proc = txq->last_desc - 1; in mvneta_txq_desc_put()
713 txq->next_desc_to_proc--; in mvneta_txq_desc_put()
752 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up() local
753 if (txq->descs != NULL) in mvneta_port_up()
1154 struct mvneta_tx_queue *txq, u32 value) in mvneta_tx_done_pkts_coal_set() argument
1158 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1163 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1165 txq->done_pkts_coal = value; in mvneta_tx_done_pkts_coal_set()
1178 struct mvneta_tx_queue *txq, in mvneta_txq_sent_desc_dec() argument
1186 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1191 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1196 struct mvneta_tx_queue *txq) in mvneta_txq_sent_desc_num_get() argument
1201 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1212 struct mvneta_tx_queue *txq) in mvneta_txq_sent_desc_proc() argument
1217 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); in mvneta_txq_sent_desc_proc()
1221 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); in mvneta_txq_sent_desc_proc()
1316 struct mvneta_tx_queue *txq, int num) in mvneta_txq_bufs_free() argument
1321 struct mvneta_tx_desc *tx_desc = txq->descs + in mvneta_txq_bufs_free()
1322 txq->txq_get_index; in mvneta_txq_bufs_free()
1323 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; in mvneta_txq_bufs_free()
1325 mvneta_txq_inc_get(txq); in mvneta_txq_bufs_free()
1327 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) in mvneta_txq_bufs_free()
1339 struct mvneta_tx_queue *txq) in mvneta_txq_done() argument
1341 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1344 tx_done = mvneta_txq_sent_desc_proc(pp, txq); in mvneta_txq_done()
1348 mvneta_txq_bufs_free(pp, txq, tx_done); in mvneta_txq_done()
1350 txq->count -= tx_done; in mvneta_txq_done()
1353 if (txq->count <= txq->tx_wake_threshold) in mvneta_txq_done()
1573 struct mvneta_port *pp, struct mvneta_tx_queue *txq) in mvneta_tso_put_hdr() argument
1578 txq->tx_skb[txq->txq_put_index] = NULL; in mvneta_tso_put_hdr()
1579 tx_desc = mvneta_txq_next_desc_get(txq); in mvneta_tso_put_hdr()
1583 tx_desc->buf_phys_addr = txq->tso_hdrs_phys + in mvneta_tso_put_hdr()
1584 txq->txq_put_index * TSO_HEADER_SIZE; in mvneta_tso_put_hdr()
1585 mvneta_txq_inc_put(txq); in mvneta_tso_put_hdr()
1589 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, in mvneta_tso_put_data() argument
1595 tx_desc = mvneta_txq_next_desc_get(txq); in mvneta_tso_put_data()
1601 mvneta_txq_desc_put(txq); in mvneta_tso_put_data()
1606 txq->tx_skb[txq->txq_put_index] = NULL; in mvneta_tso_put_data()
1614 txq->tx_skb[txq->txq_put_index] = skb; in mvneta_tso_put_data()
1616 mvneta_txq_inc_put(txq); in mvneta_tso_put_data()
1621 struct mvneta_tx_queue *txq) in mvneta_tx_tso() argument
1631 if ((txq->count + tso_count_descs(skb)) >= txq->size) in mvneta_tx_tso()
1651 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; in mvneta_tx_tso()
1654 mvneta_tso_put_hdr(skb, pp, txq); in mvneta_tx_tso()
1662 if (mvneta_tso_put_data(dev, txq, skb, in mvneta_tx_tso()
1680 struct mvneta_tx_desc *tx_desc = txq->descs + i; in mvneta_tx_tso()
1681 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) in mvneta_tx_tso()
1686 mvneta_txq_desc_put(txq); in mvneta_tx_tso()
1693 struct mvneta_tx_queue *txq) in mvneta_tx_frag_process() argument
1702 tx_desc = mvneta_txq_next_desc_get(txq); in mvneta_tx_frag_process()
1711 mvneta_txq_desc_put(txq); in mvneta_tx_frag_process()
1718 txq->tx_skb[txq->txq_put_index] = skb; in mvneta_tx_frag_process()
1722 txq->tx_skb[txq->txq_put_index] = NULL; in mvneta_tx_frag_process()
1724 mvneta_txq_inc_put(txq); in mvneta_tx_frag_process()
1734 tx_desc = txq->descs + i; in mvneta_tx_frag_process()
1739 mvneta_txq_desc_put(txq); in mvneta_tx_frag_process()
1750 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx() local
1760 frags = mvneta_tx_tso(skb, dev, txq); in mvneta_tx()
1767 tx_desc = mvneta_txq_next_desc_get(txq); in mvneta_tx()
1778 mvneta_txq_desc_put(txq); in mvneta_tx()
1787 txq->tx_skb[txq->txq_put_index] = skb; in mvneta_tx()
1788 mvneta_txq_inc_put(txq); in mvneta_tx()
1792 txq->tx_skb[txq->txq_put_index] = NULL; in mvneta_tx()
1793 mvneta_txq_inc_put(txq); in mvneta_tx()
1796 if (mvneta_tx_frag_process(pp, skb, txq)) { in mvneta_tx()
1801 mvneta_txq_desc_put(txq); in mvneta_tx()
1812 txq->count += frags; in mvneta_tx()
1813 mvneta_txq_pend_desc_add(pp, txq, frags); in mvneta_tx()
1815 if (txq->count >= txq->tx_stop_threshold) in mvneta_tx()
1833 struct mvneta_tx_queue *txq) in mvneta_txq_done_force() argument
1836 int tx_done = txq->count; in mvneta_txq_done_force()
1838 mvneta_txq_bufs_free(pp, txq, tx_done); in mvneta_txq_done_force()
1841 txq->count = 0; in mvneta_txq_done_force()
1842 txq->txq_put_index = 0; in mvneta_txq_done_force()
1843 txq->txq_get_index = 0; in mvneta_txq_done_force()
1851 struct mvneta_tx_queue *txq; in mvneta_tx_done_gbe() local
1855 txq = mvneta_tx_done_policy(pp, cause_tx_done); in mvneta_tx_done_gbe()
1857 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
1860 if (txq->count) in mvneta_tx_done_gbe()
1861 mvneta_txq_done(pp, txq); in mvneta_tx_done_gbe()
1864 cause_tx_done &= ~((1 << txq->id)); in mvneta_tx_done_gbe()
2284 struct mvneta_tx_queue *txq) in mvneta_txq_init() argument
2286 txq->size = pp->tx_ring_size; in mvneta_txq_init()
2292 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; in mvneta_txq_init()
2293 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; in mvneta_txq_init()
2297 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2298 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_init()
2299 &txq->descs_phys, GFP_KERNEL); in mvneta_txq_init()
2300 if (txq->descs == NULL) in mvneta_txq_init()
2304 BUG_ON(txq->descs != in mvneta_txq_init()
2305 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); in mvneta_txq_init()
2307 txq->last_desc = txq->size - 1; in mvneta_txq_init()
2310 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_init()
2311 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_init()
2314 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_init()
2315 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_init()
2317 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); in mvneta_txq_init()
2318 if (txq->tx_skb == NULL) { in mvneta_txq_init()
2320 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_init()
2321 txq->descs, txq->descs_phys); in mvneta_txq_init()
2326 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2327 txq->size * TSO_HEADER_SIZE, in mvneta_txq_init()
2328 &txq->tso_hdrs_phys, GFP_KERNEL); in mvneta_txq_init()
2329 if (txq->tso_hdrs == NULL) { in mvneta_txq_init()
2330 kfree(txq->tx_skb); in mvneta_txq_init()
2332 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_init()
2333 txq->descs, txq->descs_phys); in mvneta_txq_init()
2336 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_init()
2343 struct mvneta_tx_queue *txq) in mvneta_txq_deinit() argument
2345 kfree(txq->tx_skb); in mvneta_txq_deinit()
2347 if (txq->tso_hdrs) in mvneta_txq_deinit()
2349 txq->size * TSO_HEADER_SIZE, in mvneta_txq_deinit()
2350 txq->tso_hdrs, txq->tso_hdrs_phys); in mvneta_txq_deinit()
2351 if (txq->descs) in mvneta_txq_deinit()
2353 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_deinit()
2354 txq->descs, txq->descs_phys); in mvneta_txq_deinit()
2356 txq->descs = NULL; in mvneta_txq_deinit()
2357 txq->last_desc = 0; in mvneta_txq_deinit()
2358 txq->next_desc_to_proc = 0; in mvneta_txq_deinit()
2359 txq->descs_phys = 0; in mvneta_txq_deinit()
2362 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_deinit()
2363 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_deinit()
2366 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_deinit()
2367 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_deinit()
2805 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce() local
2806 txq->done_pkts_coal = c->tx_max_coalesced_frames; in mvneta_ethtool_set_coalesce()
2807 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
2919 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init() local
2920 txq->id = queue; in mvneta_init()
2921 txq->size = pp->tx_ring_size; in mvneta_init()
2922 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; in mvneta_init()