Lines Matching refs:rx_queue
119 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
142 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
155 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, in gfar_init_rxbdp() argument
163 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) in gfar_init_rxbdp()
176 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_init_bds() local
206 rx_queue = priv->rx_queue[i]; in gfar_init_bds()
208 rx_queue->next_to_clean = 0; in gfar_init_bds()
209 rx_queue->next_to_use = 0; in gfar_init_bds()
210 rx_queue->next_to_alloc = 0; in gfar_init_bds()
215 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); in gfar_init_bds()
217 rx_queue->rfbptr = rfbptr; in gfar_init_bds()
230 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_alloc_skb_resources() local
238 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; in gfar_alloc_skb_resources()
262 rx_queue = priv->rx_queue[i]; in gfar_alloc_skb_resources()
263 rx_queue->rx_bd_base = vaddr; in gfar_alloc_skb_resources()
264 rx_queue->rx_bd_dma_base = addr; in gfar_alloc_skb_resources()
265 rx_queue->ndev = ndev; in gfar_alloc_skb_resources()
266 rx_queue->dev = dev; in gfar_alloc_skb_resources()
267 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; in gfar_alloc_skb_resources()
268 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; in gfar_alloc_skb_resources()
286 rx_queue = priv->rx_queue[i]; in gfar_alloc_skb_resources()
287 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, in gfar_alloc_skb_resources()
288 sizeof(*rx_queue->rx_buff), in gfar_alloc_skb_resources()
290 if (!rx_queue->rx_buff) in gfar_alloc_skb_resources()
317 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); in gfar_init_tx_rx_base()
330 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | in gfar_init_rqprm()
436 if (likely(priv->rx_queue[i]->rxcoalescing)) in gfar_configure_coalescing()
437 gfar_write(baddr + i, priv->rx_queue[i]->rxic); in gfar_configure_coalescing()
448 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing()
449 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing()
466 rx_packets += priv->rx_queue[i]->stats.rx_packets; in gfar_get_stats()
467 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; in gfar_get_stats()
468 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; in gfar_get_stats()
558 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), in gfar_alloc_rx_queues()
560 if (!priv->rx_queue[i]) in gfar_alloc_rx_queues()
563 priv->rx_queue[i]->qindex = i; in gfar_alloc_rx_queues()
564 priv->rx_queue[i]->ndev = priv->ndev; in gfar_alloc_rx_queues()
582 kfree(priv->rx_queue[i]); in gfar_free_rx_queues()
697 if (!grp->rx_queue) in gfar_parse_group()
698 grp->rx_queue = priv->rx_queue[i]; in gfar_parse_group()
702 priv->rx_queue[i]->grp = grp; in gfar_parse_group()
1395 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; in gfar_probe()
1396 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; in gfar_probe()
1397 priv->rx_queue[i]->rxic = DEFAULT_RXIC; in gfar_probe()
1458 i, priv->rx_queue[i]->rx_ring_size); in gfar_probe()
1531 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; in gfar_filer_config_wol()
1983 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) in free_skb_rx_queue() argument
1987 struct rxbd8 *rxbdp = rx_queue->rx_bd_base; in free_skb_rx_queue()
1989 if (rx_queue->skb) in free_skb_rx_queue()
1990 dev_kfree_skb(rx_queue->skb); in free_skb_rx_queue()
1992 for (i = 0; i < rx_queue->rx_ring_size; i++) { in free_skb_rx_queue()
1993 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; in free_skb_rx_queue()
2002 dma_unmap_single(rx_queue->dev, rxb->dma, in free_skb_rx_queue()
2009 kfree(rx_queue->rx_buff); in free_skb_rx_queue()
2010 rx_queue->rx_buff = NULL; in free_skb_rx_queue()
2019 struct gfar_priv_rx_q *rx_queue = NULL; in free_skb_resources() local
2034 rx_queue = priv->rx_queue[i]; in free_skb_resources()
2035 if (rx_queue->rx_buff) in free_skb_resources()
2036 free_skb_rx_queue(rx_queue); in free_skb_resources()
2785 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) in gfar_rx_alloc_err() argument
2787 struct gfar_private *priv = netdev_priv(rx_queue->ndev); in gfar_rx_alloc_err()
2790 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); in gfar_rx_alloc_err()
2794 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, in gfar_alloc_rx_buffs() argument
2801 i = rx_queue->next_to_use; in gfar_alloc_rx_buffs()
2802 bdp = &rx_queue->rx_bd_base[i]; in gfar_alloc_rx_buffs()
2803 rxb = &rx_queue->rx_buff[i]; in gfar_alloc_rx_buffs()
2808 if (unlikely(!gfar_new_page(rx_queue, rxb))) { in gfar_alloc_rx_buffs()
2809 gfar_rx_alloc_err(rx_queue); in gfar_alloc_rx_buffs()
2815 gfar_init_rxbdp(rx_queue, bdp, in gfar_alloc_rx_buffs()
2822 if (unlikely(++i == rx_queue->rx_ring_size)) { in gfar_alloc_rx_buffs()
2824 bdp = rx_queue->rx_bd_base; in gfar_alloc_rx_buffs()
2825 rxb = rx_queue->rx_buff; in gfar_alloc_rx_buffs()
2829 rx_queue->next_to_use = i; in gfar_alloc_rx_buffs()
2830 rx_queue->next_to_alloc = i; in gfar_alloc_rx_buffs()
2974 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, in gfar_get_next_rxbuff() argument
2977 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; in gfar_get_next_rxbuff()
2986 gfar_rx_alloc_err(rx_queue); in gfar_get_next_rxbuff()
2993 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, in gfar_get_next_rxbuff()
2998 gfar_reuse_rx_page(rx_queue, rxb); in gfar_get_next_rxbuff()
3001 dma_unmap_page(rx_queue->dev, rxb->dma, in gfar_get_next_rxbuff()
3071 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) in gfar_clean_rx_ring() argument
3073 struct net_device *ndev = rx_queue->ndev; in gfar_clean_rx_ring()
3077 struct sk_buff *skb = rx_queue->skb; in gfar_clean_rx_ring()
3078 int cleaned_cnt = gfar_rxbd_unused(rx_queue); in gfar_clean_rx_ring()
3082 i = rx_queue->next_to_clean; in gfar_clean_rx_ring()
3088 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); in gfar_clean_rx_ring()
3092 bdp = &rx_queue->rx_bd_base[i]; in gfar_clean_rx_ring()
3101 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); in gfar_clean_rx_ring()
3108 if (unlikely(++i == rx_queue->rx_ring_size)) in gfar_clean_rx_ring()
3111 rx_queue->next_to_clean = i; in gfar_clean_rx_ring()
3123 rx_queue->stats.rx_dropped++; in gfar_clean_rx_ring()
3131 skb_record_rx_queue(skb, rx_queue->qindex); in gfar_clean_rx_ring()
3136 napi_gro_receive(&rx_queue->grp->napi_rx, skb); in gfar_clean_rx_ring()
3142 rx_queue->skb = skb; in gfar_clean_rx_ring()
3144 rx_queue->stats.rx_packets += total_pkts; in gfar_clean_rx_ring()
3145 rx_queue->stats.rx_bytes += total_bytes; in gfar_clean_rx_ring()
3148 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); in gfar_clean_rx_ring()
3152 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); in gfar_clean_rx_ring()
3154 gfar_write(rx_queue->rfbptr, bdp_dma); in gfar_clean_rx_ring()
3165 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; in gfar_poll_rx_sq() local
3173 work_done = gfar_clean_rx_ring(rx_queue, budget); in gfar_poll_rx_sq()
3225 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_poll_rx() local
3247 rx_queue = priv->rx_queue[i]; in gfar_poll_rx()
3249 gfar_clean_rx_ring(rx_queue, budget_per_q); in gfar_poll_rx()
3669 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_update_link_state() local
3729 rx_queue = priv->rx_queue[i]; in gfar_update_link_state()
3730 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); in gfar_update_link_state()
3731 gfar_write(rx_queue->rfbptr, bdp_dma); in gfar_update_link_state()