Lines Matching refs:desc
58 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
177 struct desc *desc_tab; /* coherent */
198 struct desc { struct
236 (n) * sizeof(struct desc)) argument
240 ((n) + RX_DESCS) * sizeof(struct desc))
612 static inline void debug_desc(u32 phys, struct desc *desc) in debug_desc() argument
617 phys, desc->next, desc->buf_len, desc->pkt_len, in debug_desc()
618 desc->data, desc->dest_id, desc->src_id, desc->flags, in debug_desc()
619 desc->qos, desc->padlen, desc->vlan_tci, in debug_desc()
620 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, in debug_desc()
621 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, in debug_desc()
622 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, in debug_desc()
623 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); in debug_desc()
631 struct desc *tab; in queue_get_desc()
639 n_desc = (phys - tab_phys) / sizeof(struct desc); in queue_get_desc()
647 struct desc *desc) in queue_put_desc() argument
649 debug_desc(phys, desc); in queue_put_desc()
657 static inline void dma_unmap_tx(struct port *port, struct desc *desc) in dma_unmap_tx() argument
660 dma_unmap_single(&port->netdev->dev, desc->data, in dma_unmap_tx()
661 desc->buf_len, DMA_TO_DEVICE); in dma_unmap_tx()
663 dma_unmap_single(&port->netdev->dev, desc->data & ~3, in dma_unmap_tx()
664 ALIGN((desc->data & 3) + desc->buf_len, 4), in dma_unmap_tx()
695 struct desc *desc; in eth_poll() local
726 desc = rx_desc_ptr(port, n); in eth_poll()
739 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); in eth_poll()
745 desc->buf_len = MAX_MRU; in eth_poll()
746 desc->pkt_len = 0; in eth_poll()
747 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); in eth_poll()
755 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, in eth_poll()
758 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, in eth_poll()
761 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); in eth_poll()
764 skb_put(skb, desc->pkt_len); in eth_poll()
777 desc->data = phys + NET_IP_ALIGN; in eth_poll()
779 desc->buf_len = MAX_MRU; in eth_poll()
780 desc->pkt_len = 0; in eth_poll()
781 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); in eth_poll()
802 struct desc *desc; in eth_txdone_irq() local
810 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); in eth_txdone_irq()
812 desc = tx_desc_ptr(port, n_desc); in eth_txdone_irq()
813 debug_desc(phys, desc); in eth_txdone_irq()
817 port->netdev->stats.tx_bytes += desc->pkt_len; in eth_txdone_irq()
819 dma_unmap_tx(port, desc); in eth_txdone_irq()
829 queue_put_desc(port->plat->txreadyq, phys, desc); in eth_txdone_irq()
847 struct desc *desc; in eth_xmit() local
889 desc = tx_desc_ptr(port, n); in eth_xmit()
896 desc->data = phys + offset; in eth_xmit()
897 desc->buf_len = desc->pkt_len = len; in eth_xmit()
901 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); in eth_xmit()
1142 struct desc *desc = rx_desc_ptr(port, i); in init_queues() local
1154 desc->buf_len = MAX_MRU; in init_queues()
1155 desc->data = dma_map_single(&port->netdev->dev, data, in init_queues()
1157 if (dma_mapping_error(&port->netdev->dev, desc->data)) { in init_queues()
1161 desc->data += NET_IP_ALIGN; in init_queues()
1174 struct desc *desc = rx_desc_ptr(port, i); in destroy_queues() local
1178 desc->data - NET_IP_ALIGN, in destroy_queues()
1184 struct desc *desc = tx_desc_ptr(port, i); in destroy_queues() local
1187 dma_unmap_tx(port, desc); in destroy_queues()
1337 struct desc *desc; in eth_close() local
1341 desc = tx_desc_ptr(port, n); in eth_close()
1343 desc->buf_len = desc->pkt_len = 1; in eth_close()
1345 queue_put_desc(TX_QUEUE(port->id), phys, desc); in eth_close()