Lines Matching refs:tx_queue
143 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
176 struct gfar_priv_tx_q *tx_queue = NULL; in gfar_init_bds() local
185 tx_queue = priv->tx_queue[i]; in gfar_init_bds()
187 tx_queue->num_txbdfree = tx_queue->tx_ring_size; in gfar_init_bds()
188 tx_queue->dirty_tx = tx_queue->tx_bd_base; in gfar_init_bds()
189 tx_queue->cur_tx = tx_queue->tx_bd_base; in gfar_init_bds()
190 tx_queue->skb_curtx = 0; in gfar_init_bds()
191 tx_queue->skb_dirtytx = 0; in gfar_init_bds()
194 txbdp = tx_queue->tx_bd_base; in gfar_init_bds()
195 for (j = 0; j < tx_queue->tx_ring_size; j++) { in gfar_init_bds()
246 struct gfar_priv_tx_q *tx_queue = NULL; in gfar_alloc_skb_resources() local
251 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; in gfar_alloc_skb_resources()
268 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources()
269 tx_queue->tx_bd_base = vaddr; in gfar_alloc_skb_resources()
270 tx_queue->tx_bd_dma_base = addr; in gfar_alloc_skb_resources()
271 tx_queue->dev = ndev; in gfar_alloc_skb_resources()
273 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; in gfar_alloc_skb_resources()
274 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; in gfar_alloc_skb_resources()
289 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources()
290 tx_queue->tx_skbuff = in gfar_alloc_skb_resources()
291 kmalloc_array(tx_queue->tx_ring_size, in gfar_alloc_skb_resources()
292 sizeof(*tx_queue->tx_skbuff), in gfar_alloc_skb_resources()
294 if (!tx_queue->tx_skbuff) in gfar_alloc_skb_resources()
297 for (k = 0; k < tx_queue->tx_ring_size; k++) in gfar_alloc_skb_resources()
298 tx_queue->tx_skbuff[k] = NULL; in gfar_alloc_skb_resources()
332 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); in gfar_init_tx_rx_base()
462 if (likely(priv->tx_queue[i]->txcoalescing)) in gfar_configure_coalescing()
463 gfar_write(baddr + i, priv->tx_queue[i]->txic); in gfar_configure_coalescing()
477 if (likely(priv->tx_queue[0]->txcoalescing)) in gfar_configure_coalescing()
478 gfar_write(®s->txic, priv->tx_queue[0]->txic); in gfar_configure_coalescing()
509 tx_bytes += priv->tx_queue[i]->stats.tx_bytes; in gfar_get_stats()
510 tx_packets += priv->tx_queue[i]->stats.tx_packets; in gfar_get_stats()
564 spin_lock(&priv->tx_queue[i]->txlock); in lock_tx_qs()
572 spin_unlock(&priv->tx_queue[i]->txlock); in unlock_tx_qs()
580 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), in gfar_alloc_tx_queues()
582 if (!priv->tx_queue[i]) in gfar_alloc_tx_queues()
585 priv->tx_queue[i]->tx_skbuff = NULL; in gfar_alloc_tx_queues()
586 priv->tx_queue[i]->qindex = i; in gfar_alloc_tx_queues()
587 priv->tx_queue[i]->dev = priv->ndev; in gfar_alloc_tx_queues()
588 spin_lock_init(&(priv->tx_queue[i]->txlock)); in gfar_alloc_tx_queues()
615 kfree(priv->tx_queue[i]); in gfar_free_tx_queues()
747 if (!grp->tx_queue) in gfar_parse_group()
748 grp->tx_queue = priv->tx_queue[i]; in gfar_parse_group()
752 priv->tx_queue[i]->grp = grp; in gfar_parse_group()
1428 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; in gfar_probe()
1429 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; in gfar_probe()
1430 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; in gfar_probe()
1431 priv->tx_queue[i]->txic = DEFAULT_TXIC; in gfar_probe()
1495 i, priv->tx_queue[i]->tx_ring_size); in gfar_probe()
1893 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) in free_skb_tx_queue() argument
1896 struct gfar_private *priv = netdev_priv(tx_queue->dev); in free_skb_tx_queue()
1899 txbdp = tx_queue->tx_bd_base; in free_skb_tx_queue()
1901 for (i = 0; i < tx_queue->tx_ring_size; i++) { in free_skb_tx_queue()
1902 if (!tx_queue->tx_skbuff[i]) in free_skb_tx_queue()
1908 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; in free_skb_tx_queue()
1916 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); in free_skb_tx_queue()
1917 tx_queue->tx_skbuff[i] = NULL; in free_skb_tx_queue()
1919 kfree(tx_queue->tx_skbuff); in free_skb_tx_queue()
1920 tx_queue->tx_skbuff = NULL; in free_skb_tx_queue()
1952 struct gfar_priv_tx_q *tx_queue = NULL; in free_skb_resources() local
1960 tx_queue = priv->tx_queue[i]; in free_skb_resources()
1961 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); in free_skb_resources()
1962 if (tx_queue->tx_skbuff) in free_skb_resources()
1963 free_skb_tx_queue(tx_queue); in free_skb_resources()
1976 priv->tx_queue[0]->tx_bd_base, in free_skb_resources()
1977 priv->tx_queue[0]->tx_bd_dma_base); in free_skb_resources()
2248 struct gfar_priv_tx_q *tx_queue = NULL; in gfar_start_xmit() local
2261 tx_queue = priv->tx_queue[rq]; in gfar_start_xmit()
2263 base = tx_queue->tx_bd_base; in gfar_start_xmit()
2264 regs = tx_queue->grp->regs; in gfar_start_xmit()
2305 if (nr_txbds > tx_queue->num_txbdfree) { in gfar_start_xmit()
2314 tx_queue->stats.tx_bytes += bytes_sent; in gfar_start_xmit()
2317 tx_queue->stats.tx_packets++; in gfar_start_xmit()
2319 txbdp = txbdp_start = tx_queue->cur_tx; in gfar_start_xmit()
2325 tx_queue->tx_ring_size); in gfar_start_xmit()
2341 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
2448 spin_lock_irqsave(&tx_queue->txlock, flags); in gfar_start_xmit()
2456 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; in gfar_start_xmit()
2461 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & in gfar_start_xmit()
2462 TX_RING_MOD_MASK(tx_queue->tx_ring_size); in gfar_start_xmit()
2464 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
2467 tx_queue->num_txbdfree -= (nr_txbds); in gfar_start_xmit()
2472 if (!tx_queue->num_txbdfree) { in gfar_start_xmit()
2479 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); in gfar_start_xmit()
2482 spin_unlock_irqrestore(&tx_queue->txlock, flags); in gfar_start_xmit()
2487 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); in gfar_start_xmit()
2489 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
2500 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
2601 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) in gfar_clean_tx_ring() argument
2603 struct net_device *dev = tx_queue->dev; in gfar_clean_tx_ring()
2608 struct txbd8 *base = tx_queue->tx_bd_base; in gfar_clean_tx_ring()
2611 int tx_ring_size = tx_queue->tx_ring_size; in gfar_clean_tx_ring()
2615 int tqi = tx_queue->qindex; in gfar_clean_tx_ring()
2621 bdp = tx_queue->dirty_tx; in gfar_clean_tx_ring()
2622 skb_dirtytx = tx_queue->skb_dirtytx; in gfar_clean_tx_ring()
2624 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { in gfar_clean_tx_ring()
2683 tx_queue->tx_skbuff[skb_dirtytx] = NULL; in gfar_clean_tx_ring()
2689 spin_lock_irqsave(&tx_queue->txlock, flags); in gfar_clean_tx_ring()
2690 tx_queue->num_txbdfree += nr_txbds; in gfar_clean_tx_ring()
2691 spin_unlock_irqrestore(&tx_queue->txlock, flags); in gfar_clean_tx_ring()
2695 if (tx_queue->num_txbdfree && in gfar_clean_tx_ring()
2701 tx_queue->skb_dirtytx = skb_dirtytx; in gfar_clean_tx_ring()
2702 tx_queue->dirty_tx = bdp; in gfar_clean_tx_ring()
3020 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; in gfar_poll_tx_sq() local
3029 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) in gfar_poll_tx_sq()
3030 gfar_clean_tx_ring(tx_queue); in gfar_poll_tx_sq()
3111 struct gfar_priv_tx_q *tx_queue = NULL; in gfar_poll_tx() local
3121 tx_queue = priv->tx_queue[i]; in gfar_poll_tx()
3123 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { in gfar_poll_tx()
3124 gfar_clean_tx_ring(tx_queue); in gfar_poll_tx()