Lines Matching refs:txq
217 #define IS_TSO_HEADER(txq, addr) \ argument
218 ((addr >= txq->tso_hdrs_dma) && \
219 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
230 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_nextdesc() local
236 if (bdp >= txq->tx_bd_base) { in fec_enet_get_nextdesc()
237 base = txq->tx_bd_base; in fec_enet_get_nextdesc()
238 ring_size = txq->tx_ring_size; in fec_enet_get_nextdesc()
239 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; in fec_enet_get_nextdesc()
261 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_prevdesc() local
267 if (bdp >= txq->tx_bd_base) { in fec_enet_get_prevdesc()
268 base = txq->tx_bd_base; in fec_enet_get_prevdesc()
269 ring_size = txq->tx_ring_size; in fec_enet_get_prevdesc()
270 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; in fec_enet_get_prevdesc()
291 struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument
295 entries = ((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num()
296 (const char *)txq->cur_tx) / fep->bufdesc_size - 1; in fec_enet_get_free_txdesc_num()
298 return entries > 0 ? entries : entries + txq->tx_ring_size; in fec_enet_get_free_txdesc_num()
324 struct fec_enet_priv_tx_q *txq; in fec_dump() local
330 txq = fep->tx_queue[0]; in fec_dump()
331 bdp = txq->tx_bd_base; in fec_dump()
336 bdp == txq->cur_tx ? 'S' : ' ', in fec_dump()
337 bdp == txq->dirty_tx ? 'H' : ' ', in fec_dump()
339 txq->tx_skbuff[index]); in fec_dump()
342 } while (bdp != txq->tx_bd_base); in fec_dump()
368 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_frag_skb() argument
373 struct bufdesc *bdp = txq->cur_tx; in fec_enet_txq_submit_frag_skb()
418 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_frag_skb()
421 memcpy(txq->tx_bounce[index], bufaddr, frag_len); in fec_enet_txq_submit_frag_skb()
422 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_frag_skb()
444 bdp = txq->cur_tx; in fec_enet_txq_submit_frag_skb()
453 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_skb() argument
468 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_txq_submit_skb()
483 bdp = txq->cur_tx; in fec_enet_txq_submit_skb()
493 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_skb()
496 memcpy(txq->tx_bounce[index], skb->data, buflen); in fec_enet_txq_submit_skb()
497 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_skb()
513 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); in fec_enet_txq_submit_skb()
544 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); in fec_enet_txq_submit_skb()
546 txq->tx_skbuff[index] = skb; in fec_enet_txq_submit_skb()
566 txq->cur_tx = bdp; in fec_enet_txq_submit_skb()
575 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, in fec_enet_txq_put_data_tso() argument
594 memcpy(txq->tx_bounce[index], data, size); in fec_enet_txq_put_data_tso()
595 data = txq->tx_bounce[index]; in fec_enet_txq_put_data_tso()
636 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_put_hdr_tso() argument
653 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
654 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
657 memcpy(txq->tx_bounce[index], skb->data, hdr_len); in fec_enet_txq_put_hdr_tso()
658 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_put_hdr_tso()
690 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_tso() argument
697 struct bufdesc *bdp = txq->cur_tx; in fec_enet_txq_submit_tso()
703 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { in fec_enet_txq_submit_tso()
723 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_tso()
728 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_submit_tso()
730 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); in fec_enet_txq_submit_tso()
739 index = fec_enet_get_bd_index(txq->tx_bd_base, in fec_enet_txq_submit_tso()
741 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, in fec_enet_txq_submit_tso()
757 txq->tx_skbuff[index] = skb; in fec_enet_txq_submit_tso()
760 txq->cur_tx = bdp; in fec_enet_txq_submit_tso()
783 struct fec_enet_priv_tx_q *txq; in fec_enet_start_xmit() local
788 txq = fep->tx_queue[queue]; in fec_enet_start_xmit()
792 ret = fec_enet_txq_submit_tso(txq, skb, ndev); in fec_enet_start_xmit()
794 ret = fec_enet_txq_submit_skb(txq, skb, ndev); in fec_enet_start_xmit()
798 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_start_xmit()
799 if (entries_free <= txq->tx_stop_threshold) in fec_enet_start_xmit()
810 struct fec_enet_priv_tx_q *txq; in fec_enet_bd_init() local
840 txq = fep->tx_queue[q]; in fec_enet_bd_init()
841 bdp = txq->tx_bd_base; in fec_enet_bd_init()
842 txq->cur_tx = bdp; in fec_enet_bd_init()
844 for (i = 0; i < txq->tx_ring_size; i++) { in fec_enet_bd_init()
847 if (txq->tx_skbuff[i]) { in fec_enet_bd_init()
848 dev_kfree_skb_any(txq->tx_skbuff[i]); in fec_enet_bd_init()
849 txq->tx_skbuff[i] = NULL; in fec_enet_bd_init()
858 txq->dirty_tx = bdp; in fec_enet_bd_init()
874 struct fec_enet_priv_tx_q *txq; in fec_enet_enable_ring() local
890 txq = fep->tx_queue[i]; in fec_enet_enable_ring()
891 writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); in fec_enet_enable_ring()
903 struct fec_enet_priv_tx_q *txq; in fec_enet_reset_skb() local
907 txq = fep->tx_queue[i]; in fec_enet_reset_skb()
909 for (j = 0; j < txq->tx_ring_size; j++) { in fec_enet_reset_skb()
910 if (txq->tx_skbuff[j]) { in fec_enet_reset_skb()
911 dev_kfree_skb_any(txq->tx_skbuff[j]); in fec_enet_reset_skb()
912 txq->tx_skbuff[j] = NULL; in fec_enet_reset_skb()
1204 struct fec_enet_priv_tx_q *txq; in fec_enet_tx_queue() local
1213 txq = fep->tx_queue[queue_id]; in fec_enet_tx_queue()
1216 bdp = txq->dirty_tx; in fec_enet_tx_queue()
1221 while (bdp != READ_ONCE(txq->cur_tx)) { in fec_enet_tx_queue()
1228 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_tx_queue()
1230 skb = txq->tx_skbuff[index]; in fec_enet_tx_queue()
1231 txq->tx_skbuff[index] = NULL; in fec_enet_tx_queue()
1232 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) in fec_enet_tx_queue()
1283 txq->dirty_tx = bdp; in fec_enet_tx_queue()
1291 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_tx_queue()
1292 if (entries_free >= txq->tx_wake_threshold) in fec_enet_tx_queue()
1298 if (bdp != txq->cur_tx && in fec_enet_tx_queue()
2676 struct fec_enet_priv_tx_q *txq; in fec_enet_free_buffers() local
2698 txq = fep->tx_queue[q]; in fec_enet_free_buffers()
2699 bdp = txq->tx_bd_base; in fec_enet_free_buffers()
2700 for (i = 0; i < txq->tx_ring_size; i++) { in fec_enet_free_buffers()
2701 kfree(txq->tx_bounce[i]); in fec_enet_free_buffers()
2702 txq->tx_bounce[i] = NULL; in fec_enet_free_buffers()
2703 skb = txq->tx_skbuff[i]; in fec_enet_free_buffers()
2704 txq->tx_skbuff[i] = NULL; in fec_enet_free_buffers()
2714 struct fec_enet_priv_tx_q *txq; in fec_enet_free_queue() local
2718 txq = fep->tx_queue[i]; in fec_enet_free_queue()
2720 txq->tx_ring_size * TSO_HEADER_SIZE, in fec_enet_free_queue()
2721 txq->tso_hdrs, in fec_enet_free_queue()
2722 txq->tso_hdrs_dma); in fec_enet_free_queue()
2736 struct fec_enet_priv_tx_q *txq; in fec_enet_alloc_queue() local
2739 txq = kzalloc(sizeof(*txq), GFP_KERNEL); in fec_enet_alloc_queue()
2740 if (!txq) { in fec_enet_alloc_queue()
2745 fep->tx_queue[i] = txq; in fec_enet_alloc_queue()
2746 txq->tx_ring_size = TX_RING_SIZE; in fec_enet_alloc_queue()
2749 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; in fec_enet_alloc_queue()
2750 txq->tx_wake_threshold = in fec_enet_alloc_queue()
2751 (txq->tx_ring_size - txq->tx_stop_threshold) / 2; in fec_enet_alloc_queue()
2753 txq->tso_hdrs = dma_alloc_coherent(NULL, in fec_enet_alloc_queue()
2754 txq->tx_ring_size * TSO_HEADER_SIZE, in fec_enet_alloc_queue()
2755 &txq->tso_hdrs_dma, in fec_enet_alloc_queue()
2757 if (!txq->tso_hdrs) { in fec_enet_alloc_queue()
2829 struct fec_enet_priv_tx_q *txq; in fec_enet_alloc_txq_buffers() local
2831 txq = fep->tx_queue[queue]; in fec_enet_alloc_txq_buffers()
2832 bdp = txq->tx_bd_base; in fec_enet_alloc_txq_buffers()
2833 for (i = 0; i < txq->tx_ring_size; i++) { in fec_enet_alloc_txq_buffers()
2834 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); in fec_enet_alloc_txq_buffers()
2835 if (!txq->tx_bounce[i]) in fec_enet_alloc_txq_buffers()
3141 struct fec_enet_priv_tx_q *txq; in fec_enet_init() local
3196 txq = fep->tx_queue[i]; in fec_enet_init()
3197 txq->index = i; in fec_enet_init()
3198 txq->tx_bd_base = (struct bufdesc *)cbd_base; in fec_enet_init()
3199 txq->bd_dma = bd_dma; in fec_enet_init()
3201 bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; in fec_enet_init()
3203 (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); in fec_enet_init()
3205 bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; in fec_enet_init()
3206 cbd_base += txq->tx_ring_size; in fec_enet_init()