Lines Matching refs:bdp

224 struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,  in fec_enet_get_nextdesc()  argument
228 struct bufdesc *new_bd = bdp + 1; in fec_enet_get_nextdesc()
229 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; in fec_enet_get_nextdesc()
236 if (bdp >= txq->tx_bd_base) { in fec_enet_get_nextdesc()
255 struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, in fec_enet_get_prevdesc() argument
259 struct bufdesc *new_bd = bdp - 1; in fec_enet_get_prevdesc()
260 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; in fec_enet_get_prevdesc()
267 if (bdp >= txq->tx_bd_base) { in fec_enet_get_prevdesc()
284 static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, in fec_enet_get_bd_index() argument
287 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; in fec_enet_get_bd_index()
323 struct bufdesc *bdp; in fec_dump() local
331 bdp = txq->tx_bd_base; in fec_dump()
336 bdp == txq->cur_tx ? 'S' : ' ', in fec_dump()
337 bdp == txq->dirty_tx ? 'H' : ' ', in fec_dump()
338 bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, in fec_dump()
340 bdp = fec_enet_get_nextdesc(bdp, fep, 0); in fec_dump()
342 } while (bdp != txq->tx_bd_base); in fec_dump()
373 struct bufdesc *bdp = txq->cur_tx; in fec_enet_txq_submit_frag_skb() local
388 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_txq_submit_frag_skb()
389 ebdp = (struct bufdesc_ex *)bdp; in fec_enet_txq_submit_frag_skb()
391 status = bdp->cbd_sc; in fec_enet_txq_submit_frag_skb()
418 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_frag_skb()
437 bdp->cbd_bufaddr = addr; in fec_enet_txq_submit_frag_skb()
438 bdp->cbd_datlen = frag_len; in fec_enet_txq_submit_frag_skb()
439 bdp->cbd_sc = status; in fec_enet_txq_submit_frag_skb()
442 return bdp; in fec_enet_txq_submit_frag_skb()
444 bdp = txq->cur_tx; in fec_enet_txq_submit_frag_skb()
446 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_txq_submit_frag_skb()
447 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_txq_submit_frag_skb()
448 bdp->cbd_datlen, DMA_TO_DEVICE); in fec_enet_txq_submit_frag_skb()
458 struct bufdesc *bdp, *last_bdp; in fec_enet_txq_submit_skb() local
483 bdp = txq->cur_tx; in fec_enet_txq_submit_skb()
484 last_bdp = bdp; in fec_enet_txq_submit_skb()
485 status = bdp->cbd_sc; in fec_enet_txq_submit_skb()
493 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_skb()
528 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; in fec_enet_txq_submit_skb()
548 bdp->cbd_datlen = buflen; in fec_enet_txq_submit_skb()
549 bdp->cbd_bufaddr = addr; in fec_enet_txq_submit_skb()
555 bdp->cbd_sc = status; in fec_enet_txq_submit_skb()
558 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); in fec_enet_txq_submit_skb()
566 txq->cur_tx = bdp; in fec_enet_txq_submit_skb()
577 struct bufdesc *bdp, int index, char *data, in fec_enet_txq_put_data_tso() argument
581 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); in fec_enet_txq_put_data_tso()
587 status = bdp->cbd_sc; in fec_enet_txq_put_data_tso()
609 bdp->cbd_datlen = size; in fec_enet_txq_put_data_tso()
610 bdp->cbd_bufaddr = addr; in fec_enet_txq_put_data_tso()
630 bdp->cbd_sc = status; in fec_enet_txq_put_data_tso()
638 struct bufdesc *bdp, int index) in fec_enet_txq_put_hdr_tso() argument
642 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); in fec_enet_txq_put_hdr_tso()
649 status = bdp->cbd_sc; in fec_enet_txq_put_hdr_tso()
673 bdp->cbd_bufaddr = dmabuf; in fec_enet_txq_put_hdr_tso()
674 bdp->cbd_datlen = hdr_len; in fec_enet_txq_put_hdr_tso()
685 bdp->cbd_sc = status; in fec_enet_txq_put_hdr_tso()
697 struct bufdesc *bdp = txq->cur_tx; in fec_enet_txq_submit_tso() local
723 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_tso()
730 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); in fec_enet_txq_submit_tso()
738 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_txq_submit_tso()
740 bdp, fep); in fec_enet_txq_submit_tso()
742 bdp, index, in fec_enet_txq_submit_tso()
753 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_txq_submit_tso()
760 txq->cur_tx = bdp; in fec_enet_txq_submit_tso()
812 struct bufdesc *bdp; in fec_enet_bd_init() local
819 bdp = rxq->rx_bd_base; in fec_enet_bd_init()
824 if (bdp->cbd_bufaddr) in fec_enet_bd_init()
825 bdp->cbd_sc = BD_ENET_RX_EMPTY; in fec_enet_bd_init()
827 bdp->cbd_sc = 0; in fec_enet_bd_init()
828 bdp = fec_enet_get_nextdesc(bdp, fep, q); in fec_enet_bd_init()
832 bdp = fec_enet_get_prevdesc(bdp, fep, q); in fec_enet_bd_init()
833 bdp->cbd_sc |= BD_SC_WRAP; in fec_enet_bd_init()
841 bdp = txq->tx_bd_base; in fec_enet_bd_init()
842 txq->cur_tx = bdp; in fec_enet_bd_init()
846 bdp->cbd_sc = 0; in fec_enet_bd_init()
851 bdp->cbd_bufaddr = 0; in fec_enet_bd_init()
852 bdp = fec_enet_get_nextdesc(bdp, fep, q); in fec_enet_bd_init()
856 bdp = fec_enet_get_prevdesc(bdp, fep, q); in fec_enet_bd_init()
857 bdp->cbd_sc |= BD_SC_WRAP; in fec_enet_bd_init()
858 txq->dirty_tx = bdp; in fec_enet_bd_init()
1201 struct bufdesc *bdp; in fec_enet_tx_queue() local
1216 bdp = txq->dirty_tx; in fec_enet_tx_queue()
1219 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); in fec_enet_tx_queue()
1221 while (bdp != READ_ONCE(txq->cur_tx)) { in fec_enet_tx_queue()
1224 status = READ_ONCE(bdp->cbd_sc); in fec_enet_tx_queue()
1228 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_tx_queue()
1232 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) in fec_enet_tx_queue()
1233 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_tx_queue()
1234 bdp->cbd_datlen, DMA_TO_DEVICE); in fec_enet_tx_queue()
1235 bdp->cbd_bufaddr = 0; in fec_enet_tx_queue()
1237 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); in fec_enet_tx_queue()
1264 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; in fec_enet_tx_queue()
1283 txq->dirty_tx = bdp; in fec_enet_tx_queue()
1286 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); in fec_enet_tx_queue()
1298 if (bdp != txq->cur_tx && in fec_enet_tx_queue()
1317 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) in fec_enet_new_rxbdp() argument
1326 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, in fec_enet_new_rxbdp()
1329 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { in fec_enet_new_rxbdp()
1339 struct bufdesc *bdp, u32 length, bool swap) in fec_enet_copybreak() argument
1351 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_copybreak()
1373 struct bufdesc *bdp; in fec_enet_rx_queue() local
1396 bdp = rxq->cur_rx; in fec_enet_rx_queue()
1398 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { in fec_enet_rx_queue()
1440 pkt_len = bdp->cbd_datlen; in fec_enet_rx_queue()
1443 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); in fec_enet_rx_queue()
1450 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, in fec_enet_rx_queue()
1458 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_rx_queue()
1472 ebdp = (struct bufdesc_ex *)bdp; in fec_enet_rx_queue()
1515 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_rx_queue()
1520 fec_enet_new_rxbdp(ndev, bdp, skb_new); in fec_enet_rx_queue()
1529 bdp->cbd_sc = status; in fec_enet_rx_queue()
1532 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; in fec_enet_rx_queue()
1540 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); in fec_enet_rx_queue()
1548 rxq->cur_rx = bdp; in fec_enet_rx_queue()
2675 struct bufdesc *bdp; in fec_enet_free_buffers() local
2682 bdp = rxq->rx_bd_base; in fec_enet_free_buffers()
2688 bdp->cbd_bufaddr, in fec_enet_free_buffers()
2693 bdp = fec_enet_get_nextdesc(bdp, fep, q); in fec_enet_free_buffers()
2699 bdp = txq->tx_bd_base; in fec_enet_free_buffers()
2787 struct bufdesc *bdp; in fec_enet_alloc_rxq_buffers() local
2791 bdp = rxq->rx_bd_base; in fec_enet_alloc_rxq_buffers()
2797 if (fec_enet_new_rxbdp(ndev, bdp, skb)) { in fec_enet_alloc_rxq_buffers()
2803 bdp->cbd_sc = BD_ENET_RX_EMPTY; in fec_enet_alloc_rxq_buffers()
2806 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; in fec_enet_alloc_rxq_buffers()
2810 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_alloc_rxq_buffers()
2814 bdp = fec_enet_get_prevdesc(bdp, fep, queue); in fec_enet_alloc_rxq_buffers()
2815 bdp->cbd_sc |= BD_SC_WRAP; in fec_enet_alloc_rxq_buffers()
2828 struct bufdesc *bdp; in fec_enet_alloc_txq_buffers() local
2832 bdp = txq->tx_bd_base; in fec_enet_alloc_txq_buffers()
2838 bdp->cbd_sc = 0; in fec_enet_alloc_txq_buffers()
2839 bdp->cbd_bufaddr = 0; in fec_enet_alloc_txq_buffers()
2842 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; in fec_enet_alloc_txq_buffers()
2846 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_alloc_txq_buffers()
2850 bdp = fec_enet_get_prevdesc(bdp, fep, queue); in fec_enet_alloc_txq_buffers()
2851 bdp->cbd_sc |= BD_SC_WRAP; in fec_enet_alloc_txq_buffers()