rxq               138 arch/arm/mach-ixp4xx/fsg-setup.c 		.rxq		= 3,
rxq               142 arch/arm/mach-ixp4xx/fsg-setup.c 		.rxq		= 4,
rxq               278 arch/arm/mach-ixp4xx/goramo_mlr.c 		.rxq		= 3,
rxq               282 arch/arm/mach-ixp4xx/goramo_mlr.c 		.rxq		= 4,
rxq               102 arch/arm/mach-ixp4xx/include/mach/platform.h 	u8 rxq;		/* configurable, currently 0 - 31 only */
rxq               193 arch/arm/mach-ixp4xx/ixdp425-setup.c 		.rxq		= 3,
rxq               197 arch/arm/mach-ixp4xx/ixdp425-setup.c 		.rxq		= 4,
rxq               171 arch/arm/mach-ixp4xx/nas100d-setup.c 		.rxq		= 3,
rxq               191 arch/arm/mach-ixp4xx/nslu2-setup.c 		.rxq		= 3,
rxq               176 arch/arm/mach-ixp4xx/omixp-setup.c 		.rxq		= 3,
rxq               180 arch/arm/mach-ixp4xx/omixp-setup.c 		.rxq		= 4,
rxq               130 arch/arm/mach-ixp4xx/vulcan-setup.c 		.rxq		= 3,
rxq               135 arch/arm/mach-ixp4xx/vulcan-setup.c 		.rxq		= 4,
rxq               673 drivers/atm/ambassador.c   amb_rxq * rxq = &dev->rxq[pool];
rxq               678 drivers/atm/ambassador.c   spin_lock_irqsave (&rxq->lock, flags);
rxq               680 drivers/atm/ambassador.c   if (rxq->pending < rxq->maximum) {
rxq               681 drivers/atm/ambassador.c     PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr);
rxq               683 drivers/atm/ambassador.c     *rxq->in.ptr = *rx;
rxq               684 drivers/atm/ambassador.c     rxq->pending++;
rxq               685 drivers/atm/ambassador.c     rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit);
rxq               687 drivers/atm/ambassador.c     wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr));
rxq               689 drivers/atm/ambassador.c     spin_unlock_irqrestore (&rxq->lock, flags);
rxq               692 drivers/atm/ambassador.c     spin_unlock_irqrestore (&rxq->lock, flags);
rxq               698 drivers/atm/ambassador.c   amb_rxq * rxq = &dev->rxq[pool];
rxq               703 drivers/atm/ambassador.c   spin_lock_irqsave (&rxq->lock, flags);
rxq               705 drivers/atm/ambassador.c   if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) {
rxq               707 drivers/atm/ambassador.c     rx_complete (dev, rxq->out.ptr);
rxq               709 drivers/atm/ambassador.c     rxq->out.ptr->status = 0;
rxq               710 drivers/atm/ambassador.c     rxq->out.ptr->length = 0;
rxq               712 drivers/atm/ambassador.c     rxq->pending--;
rxq               713 drivers/atm/ambassador.c     rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit);
rxq               715 drivers/atm/ambassador.c     if (rxq->pending < rxq->low)
rxq               716 drivers/atm/ambassador.c       rxq->low = rxq->pending;
rxq               717 drivers/atm/ambassador.c     spin_unlock_irqrestore (&rxq->lock, flags);
rxq               720 drivers/atm/ambassador.c     if (!rxq->pending && rxq->buffers_wanted)
rxq               721 drivers/atm/ambassador.c       rxq->emptied++;
rxq               722 drivers/atm/ambassador.c     spin_unlock_irqrestore (&rxq->lock, flags);
rxq               731 drivers/atm/ambassador.c   amb_rxq * rxq = &dev->rxq[pool];
rxq               741 drivers/atm/ambassador.c   if (rxq->pending > rxq->buffers_wanted) {
rxq               748 drivers/atm/ambassador.c     while (rxq->pending > rxq->buffers_wanted)
rxq               769 drivers/atm/ambassador.c   amb_rxq * rxq;
rxq               776 drivers/atm/ambassador.c   rxq = &dev->rxq[pool];
rxq               777 drivers/atm/ambassador.c   while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) {
rxq               779 drivers/atm/ambassador.c     struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority);
rxq              1092 drivers/atm/ambassador.c       if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) {
rxq              1094 drivers/atm/ambassador.c 		pool, rxtp->max_sdu, dev->rxq[pool].buffer_size);
rxq              1178 drivers/atm/ambassador.c     if (!dev->rxq[pool].buffers_wanted)
rxq              1179 drivers/atm/ambassador.c       dev->rxq[pool].buffers_wanted = rx_lats;
rxq              1180 drivers/atm/ambassador.c     dev->rxq[pool].buffers_wanted += 1;
rxq              1274 drivers/atm/ambassador.c     dev->rxq[pool].buffers_wanted -= 1;
rxq              1275 drivers/atm/ambassador.c     if (dev->rxq[pool].buffers_wanted == rx_lats) {
rxq              1276 drivers/atm/ambassador.c       dev->rxq[pool].buffers_wanted = 0;
rxq              1443 drivers/atm/ambassador.c       amb_rxq * r = &dev->rxq[pool];
rxq              1454 drivers/atm/ambassador.c       amb_rxq * r = &dev->rxq[pool];
rxq              1575 drivers/atm/ambassador.c     amb_rxq * rxq = &dev->rxq[pool];
rxq              1577 drivers/atm/ambassador.c     rxq->buffer_size = rx_buffer_sizes[pool];
rxq              1578 drivers/atm/ambassador.c     rxq->buffers_wanted = 0;
rxq              1580 drivers/atm/ambassador.c     rxq->pending = 0;
rxq              1581 drivers/atm/ambassador.c     rxq->low = rxs[pool] - 1;
rxq              1582 drivers/atm/ambassador.c     rxq->emptied = 0;
rxq              1583 drivers/atm/ambassador.c     rxq->maximum = rxs[pool] - 1;
rxq              1585 drivers/atm/ambassador.c     rxq->in.start = in;
rxq              1586 drivers/atm/ambassador.c     rxq->in.ptr = in;
rxq              1587 drivers/atm/ambassador.c     rxq->in.limit = in + rxs[pool];
rxq              1589 drivers/atm/ambassador.c     memory = rxq->in.limit;
rxq              1592 drivers/atm/ambassador.c     rxq->out.start = out;
rxq              1593 drivers/atm/ambassador.c     rxq->out.ptr = out;
rxq              1594 drivers/atm/ambassador.c     rxq->out.limit = out + rxs[pool];
rxq              1596 drivers/atm/ambassador.c     memory = rxq->out.limit;
rxq              1988 drivers/atm/ambassador.c     a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start);
rxq              1989 drivers/atm/ambassador.c     a.rec_struct[pool].buffer_end   = bus_addr (dev->rxq[pool].in.limit);
rxq              1990 drivers/atm/ambassador.c     a.rec_struct[pool].rx_start     = bus_addr (dev->rxq[pool].out.start);
rxq              1991 drivers/atm/ambassador.c     a.rec_struct[pool].rx_end       = bus_addr (dev->rxq[pool].out.limit);
rxq              1992 drivers/atm/ambassador.c     a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size);
rxq              2155 drivers/atm/ambassador.c 	spin_lock_init (&dev->rxq[pool].lock);
rxq               620 drivers/atm/ambassador.h   amb_rxq          rxq[NUM_RX_POOLS];
rxq              1089 drivers/atm/fore200e.c     struct host_rxq*        rxq = &fore200e->host_rxq;
rxq              1096 drivers/atm/fore200e.c 	entry = &rxq->host_entry[ rxq->head ];
rxq              1127 drivers/atm/fore200e.c 	FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
rxq              2138 drivers/atm/fore200e.c     struct host_rxq*     rxq =  &fore200e->host_rxq;
rxq              2146 drivers/atm/fore200e.c 				       &rxq->status,
rxq              2155 drivers/atm/fore200e.c 				       &rxq->rpd,
rxq              2160 drivers/atm/fore200e.c 	fore200e_dma_chunk_free(fore200e, &rxq->status);
rxq              2170 drivers/atm/fore200e.c 	rxq->host_entry[ i ].status = 
rxq              2171 drivers/atm/fore200e.c 	                     FORE200E_INDEX(rxq->status.align_addr, enum status, i);
rxq              2172 drivers/atm/fore200e.c 	rxq->host_entry[ i ].rpd = 
rxq              2173 drivers/atm/fore200e.c 	                     FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
rxq              2174 drivers/atm/fore200e.c 	rxq->host_entry[ i ].rpd_dma = 
rxq              2175 drivers/atm/fore200e.c 	                     FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
rxq              2176 drivers/atm/fore200e.c 	rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
rxq              2178 drivers/atm/fore200e.c 	*rxq->host_entry[ i ].status = STATUS_FREE;
rxq              2180 drivers/atm/fore200e.c 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 
rxq              2183 drivers/atm/fore200e.c 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
rxq              2188 drivers/atm/fore200e.c     rxq->head = 0;
rxq               107 drivers/infiniband/hw/cxgb3/iwch_cm.c static struct sk_buff_head rxq;
rxq              2173 drivers/infiniband/hw/cxgb3/iwch_cm.c 	while ((skb = skb_dequeue(&rxq))) {
rxq              2204 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_queue_tail(&rxq, skb);
rxq              2245 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_queue_head_init(&rxq);
rxq               140 drivers/infiniband/hw/cxgb4/cm.c static struct sk_buff_head rxq;
rxq              4299 drivers/infiniband/hw/cxgb4/cm.c 	while ((skb = skb_dequeue(&rxq))) {
rxq              4353 drivers/infiniband/hw/cxgb4/cm.c 	skb_queue_tail(&rxq, skb);
rxq              4455 drivers/infiniband/hw/cxgb4/cm.c 	skb_queue_head_init(&rxq);
rxq               134 drivers/infiniband/hw/hfi1/vnic.h 	struct hfi1_vnic_rx_queue rxq[HFI1_NUM_VNIC_CTXT];
rxq               438 drivers/infiniband/hw/hfi1/vnic_main.c static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq,
rxq               441 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
rxq               449 drivers/infiniband/hw/hfi1/vnic_main.c 		vinfo->stats[rxq->idx].rx_oversize++;
rxq               451 drivers/infiniband/hw/hfi1/vnic_main.c 		vinfo->stats[rxq->idx].rx_runt++;
rxq               457 drivers/infiniband/hw/hfi1/vnic_main.c static inline struct sk_buff *hfi1_vnic_get_skb(struct hfi1_vnic_rx_queue *rxq)
rxq               462 drivers/infiniband/hw/hfi1/vnic_main.c 	skb = skb_dequeue(&rxq->skbq);
rxq               475 drivers/infiniband/hw/hfi1/vnic_main.c static void hfi1_vnic_handle_rx(struct hfi1_vnic_rx_queue *rxq,
rxq               478 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
rxq               486 drivers/infiniband/hw/hfi1/vnic_main.c 		skb = hfi1_vnic_get_skb(rxq);
rxq               490 drivers/infiniband/hw/hfi1/vnic_main.c 		rc = hfi1_vnic_decap_skb(rxq, skb);
rxq               492 drivers/infiniband/hw/hfi1/vnic_main.c 		hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
rxq               499 drivers/infiniband/hw/hfi1/vnic_main.c 		skb->protocol = eth_type_trans(skb, rxq->netdev);
rxq               501 drivers/infiniband/hw/hfi1/vnic_main.c 		napi_gro_receive(&rxq->napi, skb);
rxq               509 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_vnic_rx_queue *rxq = container_of(napi,
rxq               511 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
rxq               514 drivers/infiniband/hw/hfi1/vnic_main.c 	v_dbg("napi %d budget %d\n", rxq->idx, budget);
rxq               515 drivers/infiniband/hw/hfi1/vnic_main.c 	hfi1_vnic_handle_rx(rxq, &work_done, budget);
rxq               517 drivers/infiniband/hw/hfi1/vnic_main.c 	v_dbg("napi %d work_done %d\n", rxq->idx, work_done);
rxq               528 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_vnic_rx_queue *rxq;
rxq               563 drivers/infiniband/hw/hfi1/vnic_main.c 	rxq = &vinfo->rxq[q_idx];
rxq               566 drivers/infiniband/hw/hfi1/vnic_main.c 		skb_queue_purge(&rxq->skbq);
rxq               570 drivers/infiniband/hw/hfi1/vnic_main.c 	if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) {
rxq               583 drivers/infiniband/hw/hfi1/vnic_main.c 	skb_queue_tail(&rxq->skbq, skb);
rxq               585 drivers/infiniband/hw/hfi1/vnic_main.c 	if (napi_schedule_prep(&rxq->napi)) {
rxq               587 drivers/infiniband/hw/hfi1/vnic_main.c 		__napi_schedule(&rxq->napi);
rxq               606 drivers/infiniband/hw/hfi1/vnic_main.c 		struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
rxq               608 drivers/infiniband/hw/hfi1/vnic_main.c 		skb_queue_head_init(&rxq->skbq);
rxq               609 drivers/infiniband/hw/hfi1/vnic_main.c 		napi_enable(&rxq->napi);
rxq               634 drivers/infiniband/hw/hfi1/vnic_main.c 		struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
rxq               636 drivers/infiniband/hw/hfi1/vnic_main.c 		napi_disable(&rxq->napi);
rxq               637 drivers/infiniband/hw/hfi1/vnic_main.c 		skb_queue_purge(&rxq->skbq);
rxq               839 drivers/infiniband/hw/hfi1/vnic_main.c 		struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
rxq               841 drivers/infiniband/hw/hfi1/vnic_main.c 		rxq->idx = i;
rxq               842 drivers/infiniband/hw/hfi1/vnic_main.c 		rxq->vinfo = vinfo;
rxq               843 drivers/infiniband/hw/hfi1/vnic_main.c 		rxq->netdev = netdev;
rxq               844 drivers/infiniband/hw/hfi1/vnic_main.c 		netif_napi_add(netdev, &rxq->napi, hfi1_vnic_napi, 64);
rxq               105 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_queue	*rxq;
rxq               290 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_queue *mq = mbox->rxq;
rxq               310 drivers/mailbox/omap-mailbox.c 	schedule_work(&mbox->rxq->work);
rxq               365 drivers/mailbox/omap-mailbox.c 	mbox->rxq = mq;
rxq               383 drivers/mailbox/omap-mailbox.c 	mbox_queue_free(mbox->rxq);
rxq               391 drivers/mailbox/omap-mailbox.c 	flush_work(&mbox->rxq->work);
rxq               392 drivers/mailbox/omap-mailbox.c 	mbox_queue_free(mbox->rxq);
rxq               289 drivers/net/dsa/mv88e6xxx/hwtstamp.c 			       struct sk_buff_head *rxq)
rxq               300 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	spin_lock_irqsave(&rxq->lock, flags);
rxq               301 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	skb_queue_splice_tail_init(rxq, &received);
rxq               302 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	spin_unlock_irqrestore(&rxq->lock, flags);
rxq               545 drivers/net/ethernet/alacritech/slic.h 	struct slic_rx_queue rxq;
rxq               121 drivers/net/ethernet/alacritech/slicoss.c static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq)
rxq               123 drivers/net/ethernet/alacritech/slicoss.c 	return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len);
rxq               397 drivers/net/ethernet/alacritech/slicoss.c 	struct slic_rx_queue *rxq = &sdev->rxq;
rxq               406 drivers/net/ethernet/alacritech/slicoss.c 	while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) {
rxq               438 drivers/net/ethernet/alacritech/slicoss.c 		buff = &rxq->rxbuffs[rxq->put_idx];
rxq               447 drivers/net/ethernet/alacritech/slicoss.c 		rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len);
rxq               550 drivers/net/ethernet/alacritech/slicoss.c 	struct slic_rx_queue *rxq = &sdev->rxq;
rxq               560 drivers/net/ethernet/alacritech/slicoss.c 	while (todo && (rxq->done_idx != rxq->put_idx)) {
rxq               561 drivers/net/ethernet/alacritech/slicoss.c 		buff = &rxq->rxbuffs[rxq->done_idx];
rxq               614 drivers/net/ethernet/alacritech/slicoss.c 		rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len);
rxq               919 drivers/net/ethernet/alacritech/slicoss.c 	struct slic_rx_queue *rxq = &sdev->rxq;
rxq               922 drivers/net/ethernet/alacritech/slicoss.c 	rxq->len = SLIC_NUM_RX_LES;
rxq               923 drivers/net/ethernet/alacritech/slicoss.c 	rxq->done_idx = 0;
rxq               924 drivers/net/ethernet/alacritech/slicoss.c 	rxq->put_idx = 0;
rxq               926 drivers/net/ethernet/alacritech/slicoss.c 	buff = kcalloc(rxq->len, sizeof(*buff), GFP_KERNEL);
rxq               930 drivers/net/ethernet/alacritech/slicoss.c 	rxq->rxbuffs = buff;
rxq               938 drivers/net/ethernet/alacritech/slicoss.c 	struct slic_rx_queue *rxq = &sdev->rxq;
rxq               943 drivers/net/ethernet/alacritech/slicoss.c 	for (i = 0; i < rxq->len; i++) {
rxq               944 drivers/net/ethernet/alacritech/slicoss.c 		buff = &rxq->rxbuffs[i];
rxq               955 drivers/net/ethernet/alacritech/slicoss.c 	kfree(rxq->rxbuffs);
rxq                96 drivers/net/ethernet/atheros/alx/alx.h 	struct alx_rx_queue	*rxq;
rxq               378 drivers/net/ethernet/atheros/alx/hw.c 	u32 rxq, txq, val;
rxq               381 drivers/net/ethernet/atheros/alx/hw.c 	rxq = alx_read_mem32(hw, ALX_RXQ0);
rxq               382 drivers/net/ethernet/atheros/alx/hw.c 	alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
rxq               609 drivers/net/ethernet/atheros/alx/hw.c 	u32 mac, txq, rxq;
rxq               611 drivers/net/ethernet/atheros/alx/hw.c 	rxq = alx_read_mem32(hw, ALX_RXQ0);
rxq               612 drivers/net/ethernet/atheros/alx/hw.c 	alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
rxq                74 drivers/net/ethernet/atheros/alx/main.c 	struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
rxq                80 drivers/net/ethernet/atheros/alx/main.c 	next = cur = rxq->write_idx;
rxq                83 drivers/net/ethernet/atheros/alx/main.c 	cur_buf = &rxq->bufs[cur];
rxq                85 drivers/net/ethernet/atheros/alx/main.c 	while (!cur_buf->skb && next != rxq->read_idx) {
rxq                86 drivers/net/ethernet/atheros/alx/main.c 		struct alx_rfd *rfd = &rxq->rfd[cur];
rxq               128 drivers/net/ethernet/atheros/alx/main.c 		cur_buf = &rxq->bufs[cur];
rxq               135 drivers/net/ethernet/atheros/alx/main.c 		rxq->write_idx = cur;
rxq               217 drivers/net/ethernet/atheros/alx/main.c static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
rxq               226 drivers/net/ethernet/atheros/alx/main.c 	alx = netdev_priv(rxq->netdev);
rxq               229 drivers/net/ethernet/atheros/alx/main.c 		rrd = &rxq->rrd[rxq->rrd_read_idx];
rxq               235 drivers/net/ethernet/atheros/alx/main.c 				  RRD_SI) != rxq->read_idx ||
rxq               242 drivers/net/ethernet/atheros/alx/main.c 		rxb = &rxq->bufs[rxq->read_idx];
rxq               243 drivers/net/ethernet/atheros/alx/main.c 		dma_unmap_single(rxq->dev,
rxq               261 drivers/net/ethernet/atheros/alx/main.c 		skb->protocol = eth_type_trans(skb, rxq->netdev);
rxq               278 drivers/net/ethernet/atheros/alx/main.c 		napi_gro_receive(&rxq->np->napi, skb);
rxq               282 drivers/net/ethernet/atheros/alx/main.c 		if (++rxq->read_idx == rxq->count)
rxq               283 drivers/net/ethernet/atheros/alx/main.c 			rxq->read_idx = 0;
rxq               284 drivers/net/ethernet/atheros/alx/main.c 		if (++rxq->rrd_read_idx == rxq->count)
rxq               285 drivers/net/ethernet/atheros/alx/main.c 			rxq->rrd_read_idx = 0;
rxq               308 drivers/net/ethernet/atheros/alx/main.c 	if (np->rxq)
rxq               309 drivers/net/ethernet/atheros/alx/main.c 		work = alx_clean_rx_irq(np->rxq, budget);
rxq               468 drivers/net/ethernet/atheros/alx/main.c 		if (np->rxq) {
rxq               469 drivers/net/ethernet/atheros/alx/main.c 			np->rxq->read_idx = 0;
rxq               470 drivers/net/ethernet/atheros/alx/main.c 			np->rxq->write_idx = 0;
rxq               471 drivers/net/ethernet/atheros/alx/main.c 			np->rxq->rrd_read_idx = 0;
rxq               472 drivers/net/ethernet/atheros/alx/main.c 			alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
rxq               473 drivers/net/ethernet/atheros/alx/main.c 			alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
rxq               507 drivers/net/ethernet/atheros/alx/main.c static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
rxq               512 drivers/net/ethernet/atheros/alx/main.c 	if (!rxq->bufs)
rxq               515 drivers/net/ethernet/atheros/alx/main.c 	for (i = 0; i < rxq->count; i++) {
rxq               516 drivers/net/ethernet/atheros/alx/main.c 		cur_buf = rxq->bufs + i;
rxq               518 drivers/net/ethernet/atheros/alx/main.c 			dma_unmap_single(rxq->dev,
rxq               529 drivers/net/ethernet/atheros/alx/main.c 	rxq->write_idx = 0;
rxq               530 drivers/net/ethernet/atheros/alx/main.c 	rxq->read_idx = 0;
rxq               531 drivers/net/ethernet/atheros/alx/main.c 	rxq->rrd_read_idx = 0;
rxq               542 drivers/net/ethernet/atheros/alx/main.c 	if (alx->qnapi[0] && alx->qnapi[0]->rxq)
rxq               543 drivers/net/ethernet/atheros/alx/main.c 		alx_free_rxring_buf(alx->qnapi[0]->rxq);
rxq               631 drivers/net/ethernet/atheros/alx/main.c static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
rxq               634 drivers/net/ethernet/atheros/alx/main.c 	rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL);
rxq               635 drivers/net/ethernet/atheros/alx/main.c 	if (!rxq->bufs)
rxq               638 drivers/net/ethernet/atheros/alx/main.c 	rxq->rrd = alx->descmem.virt + offset;
rxq               639 drivers/net/ethernet/atheros/alx/main.c 	rxq->rrd_dma = alx->descmem.dma + offset;
rxq               640 drivers/net/ethernet/atheros/alx/main.c 	offset += sizeof(struct alx_rrd) * rxq->count;
rxq               642 drivers/net/ethernet/atheros/alx/main.c 	rxq->rfd = alx->descmem.virt + offset;
rxq               643 drivers/net/ethernet/atheros/alx/main.c 	rxq->rfd_dma = alx->descmem.dma + offset;
rxq               644 drivers/net/ethernet/atheros/alx/main.c 	offset += sizeof(struct alx_rfd) * rxq->count;
rxq               681 drivers/net/ethernet/atheros/alx/main.c 	offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
rxq               700 drivers/net/ethernet/atheros/alx/main.c 	if (alx->qnapi[0] && alx->qnapi[0]->rxq)
rxq               701 drivers/net/ethernet/atheros/alx/main.c 		kfree(alx->qnapi[0]->rxq->bufs);
rxq               722 drivers/net/ethernet/atheros/alx/main.c 		kfree(np->rxq);
rxq               742 drivers/net/ethernet/atheros/alx/main.c 	struct alx_rx_queue *rxq;
rxq               779 drivers/net/ethernet/atheros/alx/main.c 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
rxq               780 drivers/net/ethernet/atheros/alx/main.c 	if (!rxq)
rxq               783 drivers/net/ethernet/atheros/alx/main.c 	np->rxq = rxq;
rxq               784 drivers/net/ethernet/atheros/alx/main.c 	rxq->np = alx->qnapi[0];
rxq               785 drivers/net/ethernet/atheros/alx/main.c 	rxq->queue_idx = 0;
rxq               786 drivers/net/ethernet/atheros/alx/main.c 	rxq->count = alx->rx_ringsz;
rxq               787 drivers/net/ethernet/atheros/alx/main.c 	rxq->netdev = alx->dev;
rxq               788 drivers/net/ethernet/atheros/alx/main.c 	rxq->dev = &alx->hw.pdev->dev;
rxq               868 drivers/net/ethernet/atheros/alx/main.c 		if (np->txq && np->rxq)
rxq               874 drivers/net/ethernet/atheros/alx/main.c 		else if (np->rxq)
rxq               876 drivers/net/ethernet/atheros/alx/main.c 				np->rxq->queue_idx);
rxq              1190 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	u32 mac, txq, rxq;
rxq              1197 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	AT_READ_REG(hw, REG_RXQ_CTRL, &rxq);
rxq              1201 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	rxq |= RXQ_CTRL_EN;
rxq              1215 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq);
rxq              2424 drivers/net/ethernet/broadcom/bcmsysport.c 	u32 txq, rxq;
rxq              2438 drivers/net/ethernet/broadcom/bcmsysport.c 	if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
rxq              2439 drivers/net/ethernet/broadcom/bcmsysport.c 		rxq = 1;
rxq              2445 drivers/net/ethernet/broadcom/bcmsysport.c 	dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
rxq              2559 drivers/net/ethernet/broadcom/bcmsysport.c 		 priv->irq0, priv->irq1, txq, rxq);
rxq              1930 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
rxq              1936 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
rxq              1942 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 			cur_query_entry->index = vfq_stat_id(vf, rxq);
rxq               634 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.rcq_addr = fp->rx_comp_mapping;
rxq               635 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
rxq               636 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.rxq_addr = fp->rx_desc_mapping;
rxq               637 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.sge_addr = fp->rx_sge_mapping;
rxq               638 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.vf_sb = fp_idx;
rxq               639 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
rxq               640 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
rxq               641 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.mtu = bp->dev->mtu;
rxq               642 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.buf_sz = fp->rx_buf_size;
rxq               643 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
rxq               644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.tpa_agg_sz = tpa_agg_size;
rxq               645 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
rxq               646 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
rxq               648 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.flags = flags;
rxq               649 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.drop_flags = 0;
rxq               650 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
rxq               651 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 	req->rxq.stat_id = -1; /* No stats at the moment */
rxq              1595 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			q->sb_idx = setup_q->rxq.vf_sb;
rxq              1598 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			init_p->rx.hc_rate = setup_q->rxq.hc_rate;
rxq              1599 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
rxq              1600 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
rxq              1604 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
rxq              1608 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			setup_p->gen_params.mtu = setup_q->rxq.mtu;
rxq              1611 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->drop_flags = setup_q->rxq.drop_flags;
rxq              1612 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->dscr_map = setup_q->rxq.rxq_addr;
rxq              1613 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->sge_map = setup_q->rxq.sge_addr;
rxq              1614 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->rcq_map = setup_q->rxq.rcq_addr;
rxq              1615 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
rxq              1616 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->buf_sz = setup_q->rxq.buf_sz;
rxq              1617 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
rxq              1618 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
rxq              1619 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
rxq              1621 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 				setup_q->rxq.cache_line_log;
rxq              1622 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c 			rxq_params->sb_cq_index = setup_q->rxq.sb_index;
rxq               273 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h 	} rxq;
rxq              4621 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
rxq              4624 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		vnic = &bp->vnic_info[fltr->rxq + 1];
rxq              11115 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	new_fltr->rxq = rxq_index;
rxq              11145 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				if (rps_may_expire_flow(bp->dev, fltr->rxq,
rxq              1103 drivers/net/ethernet/broadcom/bnxt/bnxt.h 	u16			rxq;
rxq               994 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	fs->ring_cookie = fltr->rxq;
rxq               140 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 	xdp.rxq = &rxr->xdp_rxq;
rxq               184 drivers/net/ethernet/brocade/bna/bna.h 		(q0) = rxp->rxq.single.only;				\
rxq               188 drivers/net/ethernet/brocade/bna/bna.h 		(q0) = rxp->rxq.slr.large;				\
rxq               189 drivers/net/ethernet/brocade/bna/bna.h 		(q1) = rxp->rxq.slr.small;				\
rxq               192 drivers/net/ethernet/brocade/bna/bna.h 		(q0) = rxp->rxq.hds.data;				\
rxq               193 drivers/net/ethernet/brocade/bna/bna.h 		(q1) = rxp->rxq.hds.hdr;				\
rxq              1764 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	struct bna_rxq *rxq = NULL;
rxq              1766 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
rxq              1767 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	list_del(&rxq->qe);
rxq              1770 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	return rxq;
rxq              1774 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
rxq              1776 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
rxq              1836 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->rxq.single.only = q0;
rxq              1837 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->rxq.single.reserved = NULL;
rxq              1840 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->rxq.slr.large = q0;
rxq              1841 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->rxq.slr.small = q1;
rxq              1844 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->rxq.hds.data = q0;
rxq              1845 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->rxq.hds.hdr = q1;
rxq              1853 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rxq_qpt_setup(struct bna_rxq *rxq,
rxq              1866 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
rxq              1867 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
rxq              1868 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
rxq              1869 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rxq->qpt.page_count = page_count;
rxq              1870 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rxq->qpt.page_size = page_size;
rxq              1872 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
rxq              1873 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rxq->rcb->sw_q = page_mem->kva;
rxq              1878 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	for (i = 0; i < rxq->qpt.page_count; i++) {
rxq              1879 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxq->rcb->sw_qpt[i] = kva;
rxq              1883 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
rxq              1885 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
rxq              2038 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 	rx_mod->rxq = (struct bna_rxq *)
rxq              2073 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxq_ptr = &rx_mod->rxq[index];
rxq              2390 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		q0->rcb->rxq = q0;
rxq              2416 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 			q1->rcb->rxq = q1;
rxq              2512 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->rxq.slr.large = NULL;
rxq              2513 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->rxq.slr.small = NULL;
rxq               553 drivers/net/ethernet/brocade/bna/bna_types.h 	struct bna_rxq *rxq;
rxq               690 drivers/net/ethernet/brocade/bna/bna_types.h 	union	bna_rxq_u	rxq;
rxq               832 drivers/net/ethernet/brocade/bna/bna_types.h 	struct bna_rxq *rxq;		/* BFI_MAX_RXQ entries */
rxq               266 drivers/net/ethernet/brocade/bna/bnad.c 	order = get_order(rcb->rxq->buffer_size);
rxq               272 drivers/net/ethernet/brocade/bna/bnad.c 		unmap_q->map_size = rcb->rxq->buffer_size;
rxq               274 drivers/net/ethernet/brocade/bna/bnad.c 		if (rcb->rxq->multi_buffer) {
rxq               276 drivers/net/ethernet/brocade/bna/bnad.c 			unmap_q->map_size = rcb->rxq->buffer_size;
rxq               281 drivers/net/ethernet/brocade/bna/bnad.c 				(rcb->rxq->buffer_size > 2048) ?
rxq               371 drivers/net/ethernet/brocade/bna/bnad.c 			rcb->rxq->rxbuf_alloc_failed++;
rxq               380 drivers/net/ethernet/brocade/bna/bnad.c 			rcb->rxq->rxbuf_map_failed++;
rxq               422 drivers/net/ethernet/brocade/bna/bnad.c 	buff_sz = rcb->rxq->buffer_size;
rxq               434 drivers/net/ethernet/brocade/bna/bnad.c 			rcb->rxq->rxbuf_alloc_failed++;
rxq               443 drivers/net/ethernet/brocade/bna/bnad.c 			rcb->rxq->rxbuf_map_failed++;
rxq               684 drivers/net/ethernet/brocade/bna/bnad.c 			rcb->rxq->rx_packets_with_error++;
rxq               694 drivers/net/ethernet/brocade/bna/bnad.c 		rcb->rxq->rx_packets++;
rxq               695 drivers/net/ethernet/brocade/bna/bnad.c 		rcb->rxq->rx_bytes += totlen;
rxq              2394 drivers/net/ethernet/brocade/bna/bnad.c 				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
rxq              2396 drivers/net/ethernet/brocade/bna/bnad.c 					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
rxq              2399 drivers/net/ethernet/brocade/bna/bnad.c 					rcb[1]->rxq) {
rxq              2402 drivers/net/ethernet/brocade/bna/bnad.c 						ccb->rcb[1]->rxq->rx_packets;
rxq              2405 drivers/net/ethernet/brocade/bna/bnad.c 						ccb->rcb[1]->rxq->rx_bytes;
rxq               655 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					rcb[1]->rxq) {
rxq               740 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
rxq               765 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
rxq               791 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					rcb[0]->rxq) {
rxq               794 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->rx_packets;
rxq               795 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->rx_bytes;
rxq               796 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->
rxq               798 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->
rxq               800 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->rxbuf_map_failed;
rxq               806 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					rcb[1]->rxq) {
rxq               809 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->rx_packets;
rxq               810 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->rx_bytes;
rxq               811 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->
rxq               813 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->
rxq               815 drivers/net/ethernet/brocade/bna/bnad_ethtool.c 					buf[bi++] = rcb->rxq->rxbuf_map_failed;
rxq              1072 drivers/net/ethernet/cavium/liquidio/lio_ethtool.c 	lio->rxq = lio->linfo.rxpciq[0].s.q_no;
rxq              3656 drivers/net/ethernet/cavium/liquidio/lio_main.c 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
rxq              3667 drivers/net/ethernet/cavium/liquidio/lio_main.c 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
rxq              2154 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
rxq              2157 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
rxq               110 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	int rxq;
rxq               555 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	xdp.rxq = &rq->xdp_rxq;
rxq              3008 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	struct sge_rspq *rxq = (struct sge_rspq *)q; \
rxq              3009 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	QDESC_GET(rxq, rxq->desc, type, label); \
rxq               249 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq,
rxq               254 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h 	entry->qid = rxq->cntxt_id;
rxq               255 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h 	entry->desc_size = rxq->iqe_len;
rxq               256 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h 	entry->num_desc = rxq->size;
rxq               257 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h 	entry->data_size = rxq->size * rxq->iqe_len;
rxq               258 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h 	memcpy(entry->data, rxq->desc, entry->data_size);
rxq              1784 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
rxq               839 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	const struct sge_eth_rxq *rxq;
rxq               841 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	rxq = &adapter->sge.ethrxq[pi->first_qset];
rxq               848 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		rss[i] = rxq[*queues].rspq.abs_id;
rxq               107 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
rxq               124 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 		rxq->stats.nomem++;
rxq               129 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 		rxq->stats.imm++;
rxq               131 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 		rxq->stats.an++;
rxq               133 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 		rxq->stats.pkts++;
rxq              2739 drivers/net/ethernet/chelsio/cxgb4/sge.c static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
rxq              2742 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct adapter *adapter = rxq->rspq.adap;
rxq              2748 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb = napi_get_frags(&rxq->rspq.napi);
rxq              2751 drivers/net/ethernet/chelsio/cxgb4/sge.c 		rxq->stats.rx_drops++;
rxq              2762 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_record_rx_queue(skb, rxq->rspq.idx);
rxq              2767 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
rxq              2773 drivers/net/ethernet/chelsio/cxgb4/sge.c 		rxq->stats.vlan_ex++;
rxq              2775 drivers/net/ethernet/chelsio/cxgb4/sge.c 	ret = napi_gro_frags(&rxq->rspq.napi);
rxq              2777 drivers/net/ethernet/chelsio/cxgb4/sge.c 		rxq->stats.lro_pkts++;
rxq              2779 drivers/net/ethernet/chelsio/cxgb4/sge.c 		rxq->stats.lro_merged++;
rxq              2780 drivers/net/ethernet/chelsio/cxgb4/sge.c 	rxq->stats.pkts++;
rxq              2781 drivers/net/ethernet/chelsio/cxgb4/sge.c 	rxq->stats.rx_cso++;
rxq              2833 drivers/net/ethernet/chelsio/cxgb4/sge.c 			  struct sge_eth_rxq *rxq, struct sk_buff *skb)
rxq              2842 drivers/net/ethernet/chelsio/cxgb4/sge.c 			rxq->stats.rx_drops++;
rxq              2932 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
rxq              2966 drivers/net/ethernet/chelsio/cxgb4/sge.c 		rxq->stats.bad_rx_pkts++;
rxq              2971 drivers/net/ethernet/chelsio/cxgb4/sge.c 		do_gro(rxq, si, pkt, tnl_hdr_len);
rxq              2978 drivers/net/ethernet/chelsio/cxgb4/sge.c 		rxq->stats.rx_drops++;
rxq              2985 drivers/net/ethernet/chelsio/cxgb4/sge.c 		ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
rxq              3006 drivers/net/ethernet/chelsio/cxgb4/sge.c 	rxq->stats.pkts++;
rxq              3014 drivers/net/ethernet/chelsio/cxgb4/sge.c 			rxq->stats.rx_cso++;
rxq              3025 drivers/net/ethernet/chelsio/cxgb4/sge.c 			rxq->stats.rx_cso++;
rxq              3052 drivers/net/ethernet/chelsio/cxgb4/sge.c 		rxq->stats.vlan_ex++;
rxq              3139 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
rxq              3161 drivers/net/ethernet/chelsio/cxgb4/sge.c 					free_rx_bufs(q->adap, &rxq->fl, 1);
rxq              3170 drivers/net/ethernet/chelsio/cxgb4/sge.c 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
rxq              3178 drivers/net/ethernet/chelsio/cxgb4/sge.c 				unmap_rx_buf(q->adap, &rxq->fl);
rxq              3200 drivers/net/ethernet/chelsio/cxgb4/sge.c 				restore_rx_bufs(&si, &rxq->fl, frags);
rxq              3217 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
rxq              3218 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__refill_fl(q->adap, &rxq->fl);
rxq              3393 drivers/net/ethernet/chelsio/cxgb4/sge.c 			struct sge_eth_rxq *rxq;
rxq              3401 drivers/net/ethernet/chelsio/cxgb4/sge.c 				rxq = container_of(fl, struct sge_eth_rxq, fl);
rxq              3402 drivers/net/ethernet/chelsio/cxgb4/sge.c 				if (napi_reschedule(&rxq->rspq.napi))
rxq              7588 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
rxq              7600 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 				     FW_PFVF_CMD_NIQ_V(rxq));
rxq               385 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int rxq, msi, err;
rxq               399 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	for_each_ethrxq(s, rxq) {
rxq               403 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 				  &s->ethrxq[rxq].rspq);
rxq               411 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	while (--rxq >= 0)
rxq               412 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
rxq               423 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int rxq, msi;
rxq               427 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	for_each_ethrxq(s, rxq)
rxq               429 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			 &s->ethrxq[rxq].rspq);
rxq               454 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int rxq;
rxq               457 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	for_each_ethrxq(s, rxq)
rxq               458 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		qenable(&s->ethrxq[rxq].rspq);
rxq               479 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int rxq;
rxq               481 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	for_each_ethrxq(s, rxq)
rxq               482 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		napi_disable(&s->ethrxq[rxq].rspq.napi);
rxq               634 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
rxq               638 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
rxq               639 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
rxq               641 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 						 &rxq->fl, t4vf_ethrx_handler);
rxq               651 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			rxq->rspq.idx = qs;
rxq               652 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			memset(&rxq->stats, 0, sizeof(rxq->stats));
rxq               665 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
rxq               669 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
rxq               670 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
rxq               682 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
rxq               683 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
rxq               707 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
rxq               712 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			rss[qs] = rxq[qs].rspq.abs_id;
rxq               739 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 					rxq[0].rspq.abs_id;
rxq              1238 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		struct sge_eth_rxq *rxq;
rxq              1241 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		rxq = &adapter->sge.ethrxq[pi->first_qset];
rxq              1243 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			t4vf_sge_intr_msix(0, &rxq->rspq);
rxq              1244 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			rxq++;
rxq              1805 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
rxq              1809 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
rxq              1812 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		stats->rx_csum += rxq->stats.rx_cso;
rxq              1813 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		stats->vlan_ex += rxq->stats.vlan_ex;
rxq              1815 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		stats->lro_pkts += rxq->stats.lro_pkts;
rxq              1816 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		stats->lro_merged += rxq->stats.lro_merged;
rxq              2071 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	#define R(s, v)		S3("u", s, rxq[qs].v)
rxq              2074 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
rxq              2080 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		  (rxq[qs].rspq.netdev
rxq              2081 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		   ? rxq[qs].rspq.netdev->name
rxq              2084 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		   (rxq[qs].rspq.netdev
rxq              2086 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
rxq              2096 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
rxq              2098 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
rxq              2227 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
rxq              2231 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
rxq              2237 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		  (rxq[qs].rspq.netdev
rxq              2238 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		   ? rxq[qs].rspq.netdev->name
rxq              2792 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
rxq              2795 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
rxq              2796 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		rxq->fl.size = 72;
rxq              1567 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
rxq              1570 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct adapter *adapter = rxq->rspq.adapter;
rxq              1576 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb = napi_get_frags(&rxq->rspq.napi);
rxq              1579 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		rxq->stats.rx_drops++;
rxq              1588 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb_record_rx_queue(skb, rxq->rspq.idx);
rxq              1594 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		rxq->stats.vlan_ex++;
rxq              1596 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	ret = napi_gro_frags(&rxq->rspq.napi);
rxq              1599 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		rxq->stats.lro_pkts++;
rxq              1601 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		rxq->stats.lro_merged++;
rxq              1602 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	rxq->stats.pkts++;
rxq              1603 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	rxq->stats.rx_cso++;
rxq              1621 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
rxq              1633 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		do_gro(rxq, gl, pkt);
rxq              1643 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		rxq->stats.rx_drops++;
rxq              1650 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	rxq->stats.pkts++;
rxq              1656 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			rxq->stats.rx_cso++;
rxq              1661 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			rxq->stats.rx_cso++;
rxq              1667 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		rxq->stats.vlan_ex++;
rxq              1759 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
rxq              1796 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 					free_rx_bufs(rspq->adapter, &rxq->fl,
rxq              1809 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				BUG_ON(rxq->fl.avail == 0);
rxq              1810 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
rxq              1818 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				unmap_rx_buf(rspq->adapter, &rxq->fl);
rxq              1842 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				restore_rx_bufs(&gl, &rxq->fl, frag);
rxq              1872 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	    fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
rxq              1873 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		__refill_fl(rspq->adapter, &rxq->fl);
rxq              2097 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				struct sge_eth_rxq *rxq;
rxq              2099 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				rxq = container_of(fl, struct sge_eth_rxq, fl);
rxq              2100 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				if (napi_reschedule(&rxq->rspq.napi))
rxq              2566 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct sge_eth_rxq *rxq = s->ethrxq;
rxq              2572 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
rxq              2573 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (rxq->rspq.desc)
rxq              2574 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
rxq              1406 drivers/net/ethernet/emulex/benet/be_cmds.c 		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
rxq              1411 drivers/net/ethernet/emulex/benet/be_cmds.c 	struct be_dma_mem *q_mem = &rxq->dma_mem;
rxq              1438 drivers/net/ethernet/emulex/benet/be_cmds.c 		rxq->id = le16_to_cpu(resp->id);
rxq              1439 drivers/net/ethernet/emulex/benet/be_cmds.c 		rxq->created = true;
rxq              2402 drivers/net/ethernet/emulex/benet/be_cmds.h int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
rxq              2282 drivers/net/ethernet/emulex/benet/be_main.c 	struct be_queue_info *rxq = &rxo->q;
rxq              2283 drivers/net/ethernet/emulex/benet/be_main.c 	u32 frag_idx = rxq->tail;
rxq              2299 drivers/net/ethernet/emulex/benet/be_main.c 	queue_tail_inc(rxq);
rxq              2300 drivers/net/ethernet/emulex/benet/be_main.c 	atomic_dec(&rxq->used);
rxq              2593 drivers/net/ethernet/emulex/benet/be_main.c 	struct be_queue_info *rxq = &rxo->q;
rxq              2600 drivers/net/ethernet/emulex/benet/be_main.c 	page_info = &rxo->page_info_tbl[rxq->head];
rxq              2625 drivers/net/ethernet/emulex/benet/be_main.c 		rxd = queue_head_node(rxq);
rxq              2641 drivers/net/ethernet/emulex/benet/be_main.c 		queue_head_inc(rxq);
rxq              2642 drivers/net/ethernet/emulex/benet/be_main.c 		page_info = &rxo->page_info_tbl[rxq->head];
rxq              2654 drivers/net/ethernet/emulex/benet/be_main.c 		atomic_add(posted, &rxq->used);
rxq              2659 drivers/net/ethernet/emulex/benet/be_main.c 			be_rxq_notify(adapter, rxq->id, notify);
rxq              2662 drivers/net/ethernet/emulex/benet/be_main.c 	} else if (atomic_read(&rxq->used) == 0) {
rxq              2814 drivers/net/ethernet/emulex/benet/be_main.c 	struct be_queue_info *rxq = &rxo->q;
rxq              2817 drivers/net/ethernet/emulex/benet/be_main.c 	while (atomic_read(&rxq->used) > 0) {
rxq              2822 drivers/net/ethernet/emulex/benet/be_main.c 	BUG_ON(atomic_read(&rxq->used));
rxq              2823 drivers/net/ethernet/emulex/benet/be_main.c 	rxq->tail = 0;
rxq              2824 drivers/net/ethernet/emulex/benet/be_main.c 	rxq->head = 0;
rxq               299 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	xdp.rxq = &ch->xdp_rxq;
rxq               831 drivers/net/ethernet/freescale/fec_main.c 	struct fec_enet_priv_rx_q *rxq;
rxq               838 drivers/net/ethernet/freescale/fec_main.c 		rxq = fep->rx_queue[q];
rxq               839 drivers/net/ethernet/freescale/fec_main.c 		bdp = rxq->bd.base;
rxq               841 drivers/net/ethernet/freescale/fec_main.c 		for (i = 0; i < rxq->bd.ring_size; i++) {
rxq               848 drivers/net/ethernet/freescale/fec_main.c 			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
rxq               852 drivers/net/ethernet/freescale/fec_main.c 		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
rxq               855 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.cur = rxq->bd.base;
rxq               901 drivers/net/ethernet/freescale/fec_main.c 	struct fec_enet_priv_rx_q *rxq;
rxq               905 drivers/net/ethernet/freescale/fec_main.c 		rxq = fep->rx_queue[i];
rxq               906 drivers/net/ethernet/freescale/fec_main.c 		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
rxq              1415 drivers/net/ethernet/freescale/fec_main.c 	struct fec_enet_priv_rx_q *rxq;
rxq              1434 drivers/net/ethernet/freescale/fec_main.c 	rxq = fep->rx_queue[queue_id];
rxq              1439 drivers/net/ethernet/freescale/fec_main.c 	bdp = rxq->bd.cur;
rxq              1480 drivers/net/ethernet/freescale/fec_main.c 		index = fec_enet_get_bd_index(bdp, &rxq->bd);
rxq              1481 drivers/net/ethernet/freescale/fec_main.c 		skb = rxq->rx_skbuff[index];
rxq              1565 drivers/net/ethernet/freescale/fec_main.c 			rxq->rx_skbuff[index] = skb_new;
rxq              1590 drivers/net/ethernet/freescale/fec_main.c 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
rxq              1596 drivers/net/ethernet/freescale/fec_main.c 		writel(0, rxq->bd.reg_desc_active);
rxq              1598 drivers/net/ethernet/freescale/fec_main.c 	rxq->bd.cur = bdp;
rxq              2749 drivers/net/ethernet/freescale/fec_main.c 	struct fec_enet_priv_rx_q *rxq;
rxq              2753 drivers/net/ethernet/freescale/fec_main.c 		rxq = fep->rx_queue[q];
rxq              2754 drivers/net/ethernet/freescale/fec_main.c 		bdp = rxq->bd.base;
rxq              2755 drivers/net/ethernet/freescale/fec_main.c 		for (i = 0; i < rxq->bd.ring_size; i++) {
rxq              2756 drivers/net/ethernet/freescale/fec_main.c 			skb = rxq->rx_skbuff[i];
rxq              2757 drivers/net/ethernet/freescale/fec_main.c 			rxq->rx_skbuff[i] = NULL;
rxq              2765 drivers/net/ethernet/freescale/fec_main.c 			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
rxq              2860 drivers/net/ethernet/freescale/fec_main.c 	struct fec_enet_priv_rx_q *rxq;
rxq              2862 drivers/net/ethernet/freescale/fec_main.c 	rxq = fep->rx_queue[queue];
rxq              2863 drivers/net/ethernet/freescale/fec_main.c 	bdp = rxq->bd.base;
rxq              2864 drivers/net/ethernet/freescale/fec_main.c 	for (i = 0; i < rxq->bd.ring_size; i++) {
rxq              2874 drivers/net/ethernet/freescale/fec_main.c 		rxq->rx_skbuff[i] = skb;
rxq              2882 drivers/net/ethernet/freescale/fec_main.c 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
rxq              2886 drivers/net/ethernet/freescale/fec_main.c 	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
rxq              3274 drivers/net/ethernet/freescale/fec_main.c 		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
rxq              3275 drivers/net/ethernet/freescale/fec_main.c 		unsigned size = dsize * rxq->bd.ring_size;
rxq              3277 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.qid = i;
rxq              3278 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.base = cbd_base;
rxq              3279 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.cur = cbd_base;
rxq              3280 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.dma = bd_dma;
rxq              3281 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.dsize = dsize;
rxq              3282 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.dsize_log2 = dsize_log2;
rxq              3283 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
rxq              3286 drivers/net/ethernet/freescale/fec_main.c 		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
rxq              1235 drivers/net/ethernet/freescale/gianfar.c static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
rxq              1244 drivers/net/ethernet/freescale/gianfar.c 	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
rxq              1245 drivers/net/ethernet/freescale/gianfar.c 	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
rxq              2414 drivers/net/ethernet/freescale/gianfar.c static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
rxq              2418 drivers/net/ethernet/freescale/gianfar.c 	u16 nta = rxq->next_to_alloc;
rxq              2420 drivers/net/ethernet/freescale/gianfar.c 	new_rxb = &rxq->rx_buff[nta];
rxq              2424 drivers/net/ethernet/freescale/gianfar.c 	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
rxq              2430 drivers/net/ethernet/freescale/gianfar.c 	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
rxq              1276 drivers/net/ethernet/freescale/gianfar.h static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
rxq              1278 drivers/net/ethernet/freescale/gianfar.h 	if (rxq->next_to_clean > rxq->next_to_use)
rxq              1279 drivers/net/ethernet/freescale/gianfar.h 		return rxq->next_to_clean - rxq->next_to_use - 1;
rxq              1281 drivers/net/ethernet/freescale/gianfar.h 	return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
rxq              1284 drivers/net/ethernet/freescale/gianfar.h static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
rxq              1290 drivers/net/ethernet/freescale/gianfar.h 	i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
rxq              1291 drivers/net/ethernet/freescale/gianfar.h 	bdp = &rxq->rx_bd_base[i];
rxq              1292 drivers/net/ethernet/freescale/gianfar.h 	bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
rxq              1293 drivers/net/ethernet/freescale/gianfar.h 	bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
rxq               121 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct hisi_femac_queue rxq;
rxq               212 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct hisi_femac_queue *rxq = &priv->rxq;
rxq               218 drivers/net/ethernet/hisilicon/hisi_femac.c 	pos = rxq->head;
rxq               220 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
rxq               222 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (unlikely(rxq->skb[pos])) {
rxq               224 drivers/net/ethernet/hisilicon/hisi_femac.c 				   pos, rxq->skb[pos]);
rxq               237 drivers/net/ethernet/hisilicon/hisi_femac.c 		rxq->dma_phys[pos] = addr;
rxq               238 drivers/net/ethernet/hisilicon/hisi_femac.c 		rxq->skb[pos] = skb;
rxq               240 drivers/net/ethernet/hisilicon/hisi_femac.c 		pos = (pos + 1) % rxq->num;
rxq               242 drivers/net/ethernet/hisilicon/hisi_femac.c 	rxq->head = pos;
rxq               248 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct hisi_femac_queue *rxq = &priv->rxq;
rxq               253 drivers/net/ethernet/hisilicon/hisi_femac.c 	pos = rxq->tail;
rxq               264 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb = rxq->skb[pos];
rxq               269 drivers/net/ethernet/hisilicon/hisi_femac.c 		rxq->skb[pos] = NULL;
rxq               271 drivers/net/ethernet/hisilicon/hisi_femac.c 		addr = rxq->dma_phys[pos];
rxq               288 drivers/net/ethernet/hisilicon/hisi_femac.c 		pos = (pos + 1) % rxq->num;
rxq               292 drivers/net/ethernet/hisilicon/hisi_femac.c 	rxq->tail = pos;
rxq               376 drivers/net/ethernet/hisilicon/hisi_femac.c 	ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
rxq               388 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct hisi_femac_queue *rxq = &priv->rxq;
rxq               393 drivers/net/ethernet/hisilicon/hisi_femac.c 	pos = rxq->tail;
rxq               394 drivers/net/ethernet/hisilicon/hisi_femac.c 	while (pos != rxq->head) {
rxq               395 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb = rxq->skb[pos];
rxq               398 drivers/net/ethernet/hisilicon/hisi_femac.c 				   pos, rxq->head);
rxq               402 drivers/net/ethernet/hisilicon/hisi_femac.c 		dma_addr = rxq->dma_phys[pos];
rxq               407 drivers/net/ethernet/hisilicon/hisi_femac.c 		rxq->skb[pos] = NULL;
rxq               408 drivers/net/ethernet/hisilicon/hisi_femac.c 		pos = (pos + 1) % rxq->num;
rxq               410 drivers/net/ethernet/hisilicon/hisi_femac.c 	rxq->tail = pos;
rxq                74 drivers/net/ethernet/huawei/hinic/hinic_main.c static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
rxq                81 drivers/net/ethernet/huawei/hinic/hinic_main.c 	hinic_rxq_get_stats(rxq, &rx_stats);
rxq                91 drivers/net/ethernet/huawei/hinic/hinic_main.c 	hinic_rxq_clean_stats(rxq);
rxq                53 drivers/net/ethernet/huawei/hinic/hinic_rx.c void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
rxq                55 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
rxq                71 drivers/net/ethernet/huawei/hinic/hinic_rx.c void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
rxq                73 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
rxq                93 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void rxq_stats_init(struct hinic_rxq *rxq)
rxq                95 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
rxq                98 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	hinic_rxq_clean_stats(rxq);
rxq               101 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void rx_csum(struct hinic_rxq *rxq, u32 status,
rxq               104 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct net_device *netdev = rxq->netdev;
rxq               117 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			rxq->rxq_stats.csum_errors++;
rxq               128 drivers/net/ethernet/huawei/hinic/hinic_rx.c static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
rxq               131 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
rxq               139 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
rxq               141 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
rxq               145 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
rxq               166 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
rxq               168 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
rxq               173 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
rxq               183 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
rxq               186 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rx_unmap_skb(rxq, dma_addr);
rxq               196 drivers/net/ethernet/huawei/hinic/hinic_rx.c static int rx_alloc_pkts(struct hinic_rxq *rxq)
rxq               198 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
rxq               207 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
rxq               214 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		skb = rx_alloc_skb(rxq, &dma_addr);
rxq               216 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
rxq               222 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
rxq               225 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			rx_free_skb(rxq, skb, dma_addr);
rxq               229 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
rxq               231 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
rxq               238 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_update(rxq->rq, prod_idx);
rxq               248 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void free_all_rx_skbs(struct hinic_rxq *rxq)
rxq               250 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rxq               263 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
rxq               276 drivers/net/ethernet/huawei/hinic/hinic_rx.c static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
rxq               286 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
rxq               291 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
rxq               293 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
rxq               326 drivers/net/ethernet/huawei/hinic/hinic_rx.c static int rxq_recv(struct hinic_rxq *rxq, int budget)
rxq               328 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
rxq               329 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct net_device *netdev = rxq->netdev;
rxq               331 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rxq               348 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
rxq               358 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
rxq               360 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
rxq               362 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rx_csum(rxq, status, skb);
rxq               372 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
rxq               388 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		skb->protocol = eth_type_trans(skb, rxq->netdev);
rxq               390 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		napi_gro_receive(&rxq->napi, skb);
rxq               401 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			(u16)(pkt_len >> rxq->rx_buff_shift) +
rxq               402 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
rxq               411 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
rxq               413 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rx_alloc_pkts(rxq);
rxq               415 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	u64_stats_update_begin(&rxq->rxq_stats.syncp);
rxq               416 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq->rxq_stats.pkts += pkts;
rxq               417 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq->rxq_stats.bytes += rx_bytes;
rxq               418 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	u64_stats_update_end(&rxq->rxq_stats.syncp);
rxq               425 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
rxq               426 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
rxq               427 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rxq               430 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	pkts = rxq_recv(rxq, budget);
rxq               442 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void rx_add_napi(struct hinic_rxq *rxq)
rxq               444 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
rxq               446 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
rxq               447 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	napi_enable(&rxq->napi);
rxq               450 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void rx_del_napi(struct hinic_rxq *rxq)
rxq               452 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	napi_disable(&rxq->napi);
rxq               453 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	netif_napi_del(&rxq->napi);
rxq               458 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rxq *rxq = (struct hinic_rxq *)data;
rxq               459 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rxq               463 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	nic_dev = netdev_priv(rxq->netdev);
rxq               468 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	nic_dev = netdev_priv(rxq->netdev);
rxq               471 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	napi_schedule(&rxq->napi);
rxq               475 drivers/net/ethernet/huawei/hinic/hinic_rx.c static int rx_request_irq(struct hinic_rxq *rxq)
rxq               477 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
rxq               479 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rxq               483 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rx_add_napi(rxq);
rxq               490 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
rxq               492 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rx_del_napi(rxq);
rxq               501 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void rx_free_irq(struct hinic_rxq *rxq)
rxq               503 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rxq               506 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	free_irq(rq->irq, rxq);
rxq               507 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rx_del_napi(rxq);
rxq               518 drivers/net/ethernet/huawei/hinic/hinic_rx.c int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
rxq               524 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq->netdev = netdev;
rxq               525 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq->rq = rq;
rxq               526 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq->buf_len = HINIC_RX_BUF_SZ;
rxq               527 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
rxq               529 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq_stats_init(rxq);
rxq               531 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
rxq               533 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	if (!rxq->irq_name)
rxq               536 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	pkts = rx_alloc_pkts(rxq);
rxq               542 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	err = rx_request_irq(rxq);
rxq               552 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	free_all_rx_skbs(rxq);
rxq               553 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	devm_kfree(&netdev->dev, rxq->irq_name);
rxq               561 drivers/net/ethernet/huawei/hinic/hinic_rx.c void hinic_clean_rxq(struct hinic_rxq *rxq)
rxq               563 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct net_device *netdev = rxq->netdev;
rxq               565 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rx_free_irq(rxq);
rxq               567 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	free_all_rx_skbs(rxq);
rxq               568 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	devm_kfree(&netdev->dev, rxq->irq_name);
rxq                44 drivers/net/ethernet/huawei/hinic/hinic_rx.h void hinic_rxq_clean_stats(struct hinic_rxq *rxq);
rxq                46 drivers/net/ethernet/huawei/hinic/hinic_rx.h void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
rxq                48 drivers/net/ethernet/huawei/hinic/hinic_rx.h int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
rxq                51 drivers/net/ethernet/huawei/hinic/hinic_rx.h void hinic_clean_rxq(struct hinic_rxq *rxq);
rxq                40 drivers/net/ethernet/ibm/ibmveth.h #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
rxq                41 drivers/net/ethernet/ibm/ibmveth.h   plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
rxq              2338 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	xdp.rxq = &rx_ring->xdp_rxq;
rxq              2138 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 			    qpi->rxq.vsi_id != qci->vsi_id ||
rxq              2139 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 			    qpi->rxq.queue_id != vsi_queue_id) {
rxq              2154 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 					     &qpi->rxq) ||
rxq               539 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	xdp.rxq = &rx_ring->xdp_rxq;
rxq               276 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 		vqpi->rxq.vsi_id = vqci->vsi_id;
rxq               277 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 		vqpi->rxq.queue_id = i;
rxq               278 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
rxq               279 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
rxq               280 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 		vqpi->rxq.max_pkt_size = max_frame;
rxq               281 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 		vqpi->rxq.databuffer_size =
rxq              1957 drivers/net/ethernet/intel/ice/ice_lib.c ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
rxq              1960 drivers/net/ethernet/intel/ice/ice_lib.c ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
rxq              1972 drivers/net/ethernet/intel/ice/ice_lib.c 	wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq              1988 drivers/net/ethernet/intel/ice/ice_lib.c 	u32 txq = 0, rxq = 0;
rxq              2018 drivers/net/ethernet/intel/ice/ice_lib.c 			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
rxq              2020 drivers/net/ethernet/intel/ice/ice_lib.c 			rxq++;
rxq              2682 drivers/net/ethernet/intel/ice/ice_lib.c 	u32 rxq = 0;
rxq              2697 drivers/net/ethernet/intel/ice/ice_lib.c 			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
rxq              2698 drivers/net/ethernet/intel/ice/ice_lib.c 			rxq++;
rxq                41 drivers/net/ethernet/intel/ice/ice_lib.h ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
rxq              2268 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		    qpi->rxq.vsi_id != qci->vsi_id ||
rxq              2269 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
rxq              2272 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
rxq              2285 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		if (qpi->rxq.ring_len > 0) {
rxq              2287 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
rxq              2288 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
rxq              2290 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			if (qpi->rxq.databuffer_size != 0 &&
rxq              2291 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
rxq              2292 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			     qpi->rxq.databuffer_size < 1024)) {
rxq              2296 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
rxq              2298 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
rxq              2299 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			    qpi->rxq.max_pkt_size < 64) {
rxq              2305 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		vsi->max_frame = qpi->rxq.max_pkt_size;
rxq              2291 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	xdp.rxq = &rx_ring->xdp_rxq;
rxq               439 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	xdp.rxq = &rx_ring->xdp_rxq;
rxq              1126 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	xdp.rxq = &rx_ring->xdp_rxq;
rxq               398 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct rx_queue rxq[8];
rxq               440 drivers/net/ethernet/marvell/mv643xx_eth.c static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
rxq               442 drivers/net/ethernet/marvell/mv643xx_eth.c 	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
rxq               450 drivers/net/ethernet/marvell/mv643xx_eth.c static void rxq_enable(struct rx_queue *rxq)
rxq               452 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
rxq               453 drivers/net/ethernet/marvell/mv643xx_eth.c 	wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
rxq               456 drivers/net/ethernet/marvell/mv643xx_eth.c static void rxq_disable(struct rx_queue *rxq)
rxq               458 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
rxq               459 drivers/net/ethernet/marvell/mv643xx_eth.c 	u8 mask = 1 << rxq->index;
rxq               505 drivers/net/ethernet/marvell/mv643xx_eth.c static int rxq_process(struct rx_queue *rxq, int budget)
rxq               507 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
rxq               512 drivers/net/ethernet/marvell/mv643xx_eth.c 	while (rx < budget && rxq->rx_desc_count) {
rxq               518 drivers/net/ethernet/marvell/mv643xx_eth.c 		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
rxq               525 drivers/net/ethernet/marvell/mv643xx_eth.c 		skb = rxq->rx_skb[rxq->rx_curr_desc];
rxq               526 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
rxq               528 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_curr_desc++;
rxq               529 drivers/net/ethernet/marvell/mv643xx_eth.c 		if (rxq->rx_curr_desc == rxq->rx_ring_size)
rxq               530 drivers/net/ethernet/marvell/mv643xx_eth.c 			rxq->rx_curr_desc = 0;
rxq               534 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_desc_count--;
rxq               537 drivers/net/ethernet/marvell/mv643xx_eth.c 		mp->work_rx_refill |= 1 << rxq->index;
rxq               592 drivers/net/ethernet/marvell/mv643xx_eth.c 		mp->work_rx &= ~(1 << rxq->index);
rxq               597 drivers/net/ethernet/marvell/mv643xx_eth.c static int rxq_refill(struct rx_queue *rxq, int budget)
rxq               599 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
rxq               603 drivers/net/ethernet/marvell/mv643xx_eth.c 	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
rxq               620 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_desc_count++;
rxq               622 drivers/net/ethernet/marvell/mv643xx_eth.c 		rx = rxq->rx_used_desc++;
rxq               623 drivers/net/ethernet/marvell/mv643xx_eth.c 		if (rxq->rx_used_desc == rxq->rx_ring_size)
rxq               624 drivers/net/ethernet/marvell/mv643xx_eth.c 			rxq->rx_used_desc = 0;
rxq               626 drivers/net/ethernet/marvell/mv643xx_eth.c 		rx_desc = rxq->rx_desc_area + rx;
rxq               633 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_skb[rx] = skb;
rxq               647 drivers/net/ethernet/marvell/mv643xx_eth.c 		mp->work_rx_refill &= ~(1 << rxq->index);
rxq              1934 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct rx_queue *rxq = mp->rxq + index;
rxq              1939 drivers/net/ethernet/marvell/mv643xx_eth.c 	rxq->index = index;
rxq              1941 drivers/net/ethernet/marvell/mv643xx_eth.c 	rxq->rx_ring_size = mp->rx_ring_size;
rxq              1943 drivers/net/ethernet/marvell/mv643xx_eth.c 	rxq->rx_desc_count = 0;
rxq              1944 drivers/net/ethernet/marvell/mv643xx_eth.c 	rxq->rx_curr_desc = 0;
rxq              1945 drivers/net/ethernet/marvell/mv643xx_eth.c 	rxq->rx_used_desc = 0;
rxq              1947 drivers/net/ethernet/marvell/mv643xx_eth.c 	size = rxq->rx_ring_size * sizeof(struct rx_desc);
rxq              1950 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
rxq              1952 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
rxq              1954 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
rxq              1955 drivers/net/ethernet/marvell/mv643xx_eth.c 						       size, &rxq->rx_desc_dma,
rxq              1959 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (rxq->rx_desc_area == NULL) {
rxq              1964 drivers/net/ethernet/marvell/mv643xx_eth.c 	memset(rxq->rx_desc_area, 0, size);
rxq              1966 drivers/net/ethernet/marvell/mv643xx_eth.c 	rxq->rx_desc_area_size = size;
rxq              1967 drivers/net/ethernet/marvell/mv643xx_eth.c 	rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
rxq              1969 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (rxq->rx_skb == NULL)
rxq              1972 drivers/net/ethernet/marvell/mv643xx_eth.c 	rx_desc = rxq->rx_desc_area;
rxq              1973 drivers/net/ethernet/marvell/mv643xx_eth.c 	for (i = 0; i < rxq->rx_ring_size; i++) {
rxq              1977 drivers/net/ethernet/marvell/mv643xx_eth.c 		if (nexti == rxq->rx_ring_size)
rxq              1980 drivers/net/ethernet/marvell/mv643xx_eth.c 		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
rxq              1989 drivers/net/ethernet/marvell/mv643xx_eth.c 		iounmap(rxq->rx_desc_area);
rxq              1992 drivers/net/ethernet/marvell/mv643xx_eth.c 				  rxq->rx_desc_area,
rxq              1993 drivers/net/ethernet/marvell/mv643xx_eth.c 				  rxq->rx_desc_dma);
rxq              1999 drivers/net/ethernet/marvell/mv643xx_eth.c static void rxq_deinit(struct rx_queue *rxq)
rxq              2001 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
rxq              2004 drivers/net/ethernet/marvell/mv643xx_eth.c 	rxq_disable(rxq);
rxq              2006 drivers/net/ethernet/marvell/mv643xx_eth.c 	for (i = 0; i < rxq->rx_ring_size; i++) {
rxq              2007 drivers/net/ethernet/marvell/mv643xx_eth.c 		if (rxq->rx_skb[i]) {
rxq              2008 drivers/net/ethernet/marvell/mv643xx_eth.c 			dev_consume_skb_any(rxq->rx_skb[i]);
rxq              2009 drivers/net/ethernet/marvell/mv643xx_eth.c 			rxq->rx_desc_count--;
rxq              2013 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (rxq->rx_desc_count) {
rxq              2015 drivers/net/ethernet/marvell/mv643xx_eth.c 			   rxq->rx_desc_count);
rxq              2018 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (rxq->index == 0 &&
rxq              2019 drivers/net/ethernet/marvell/mv643xx_eth.c 	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
rxq              2020 drivers/net/ethernet/marvell/mv643xx_eth.c 		iounmap(rxq->rx_desc_area);
rxq              2022 drivers/net/ethernet/marvell/mv643xx_eth.c 		dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
rxq              2023 drivers/net/ethernet/marvell/mv643xx_eth.c 				  rxq->rx_desc_area, rxq->rx_desc_dma);
rxq              2025 drivers/net/ethernet/marvell/mv643xx_eth.c 	kfree(rxq->rx_skb);
rxq              2287 drivers/net/ethernet/marvell/mv643xx_eth.c 			work_done += rxq_process(mp->rxq + queue, work_tbd);
rxq              2289 drivers/net/ethernet/marvell/mv643xx_eth.c 			work_done += rxq_refill(mp->rxq + queue, work_tbd);
rxq              2377 drivers/net/ethernet/marvell/mv643xx_eth.c 		struct rx_queue *rxq = mp->rxq + i;
rxq              2380 drivers/net/ethernet/marvell/mv643xx_eth.c 		addr = (u32)rxq->rx_desc_dma;
rxq              2381 drivers/net/ethernet/marvell/mv643xx_eth.c 		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
rxq              2384 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq_enable(rxq);
rxq              2443 drivers/net/ethernet/marvell/mv643xx_eth.c 				rxq_deinit(mp->rxq + i);
rxq              2447 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq_refill(mp->rxq + i, INT_MAX);
rxq              2477 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq_deinit(mp->rxq + i);
rxq              2490 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq_disable(mp->rxq + i);
rxq              2534 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq_deinit(mp->rxq + i);
rxq               125 drivers/net/ethernet/marvell/mvneta.c #define      MVNETA_CPU_RXQ_ACCESS(rxq)		 BIT(rxq)
rxq               753 drivers/net/ethernet/marvell/mvneta.c 					  struct mvneta_rx_queue *rxq,
rxq               760 drivers/net/ethernet/marvell/mvneta.c 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
rxq               766 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
rxq               772 drivers/net/ethernet/marvell/mvneta.c 					struct mvneta_rx_queue *rxq)
rxq               776 drivers/net/ethernet/marvell/mvneta.c 	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
rxq               784 drivers/net/ethernet/marvell/mvneta.c 				       struct mvneta_rx_queue *rxq,
rxq               792 drivers/net/ethernet/marvell/mvneta.c 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
rxq               812 drivers/net/ethernet/marvell/mvneta.c 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
rxq               818 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
rxq               820 drivers/net/ethernet/marvell/mvneta.c 	int rx_desc = rxq->next_desc_to_proc;
rxq               822 drivers/net/ethernet/marvell/mvneta.c 	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
rxq               823 drivers/net/ethernet/marvell/mvneta.c 	prefetch(rxq->descs + rxq->next_desc_to_proc);
rxq               824 drivers/net/ethernet/marvell/mvneta.c 	return rxq->descs + rx_desc;
rxq               842 drivers/net/ethernet/marvell/mvneta.c 				  struct mvneta_rx_queue *rxq,
rxq               847 drivers/net/ethernet/marvell/mvneta.c 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
rxq               852 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
rxq               899 drivers/net/ethernet/marvell/mvneta.c 				    struct mvneta_rx_queue *rxq,
rxq               904 drivers/net/ethernet/marvell/mvneta.c 	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
rxq               909 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
rxq               914 drivers/net/ethernet/marvell/mvneta.c 				  struct mvneta_rx_queue *rxq)
rxq               918 drivers/net/ethernet/marvell/mvneta.c 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
rxq               920 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
rxq               925 drivers/net/ethernet/marvell/mvneta.c 				 struct mvneta_rx_queue *rxq)
rxq               929 drivers/net/ethernet/marvell/mvneta.c 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
rxq               931 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
rxq               936 drivers/net/ethernet/marvell/mvneta.c 				     struct mvneta_rx_queue *rxq)
rxq               940 drivers/net/ethernet/marvell/mvneta.c 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
rxq               944 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
rxq               949 drivers/net/ethernet/marvell/mvneta.c 				      struct mvneta_rx_queue *rxq)
rxq               953 drivers/net/ethernet/marvell/mvneta.c 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
rxq               957 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
rxq              1164 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
rxq              1166 drivers/net/ethernet/marvell/mvneta.c 		if (rxq->descs)
rxq              1393 drivers/net/ethernet/marvell/mvneta.c 		int rxq, txq;
rxq              1395 drivers/net/ethernet/marvell/mvneta.c 			for (rxq = 0; rxq < rxq_number; rxq++)
rxq              1396 drivers/net/ethernet/marvell/mvneta.c 				if ((rxq % max_cpu) == cpu)
rxq              1397 drivers/net/ethernet/marvell/mvneta.c 					rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
rxq              1581 drivers/net/ethernet/marvell/mvneta.c 				    struct mvneta_rx_queue *rxq, u32 value)
rxq              1583 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
rxq              1591 drivers/net/ethernet/marvell/mvneta.c 				    struct mvneta_rx_queue *rxq, u32 value)
rxq              1599 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
rxq              1619 drivers/net/ethernet/marvell/mvneta.c 				struct mvneta_rx_queue *rxq)
rxq              1624 drivers/net/ethernet/marvell/mvneta.c 	i = rx_desc - rxq->descs;
rxq              1625 drivers/net/ethernet/marvell/mvneta.c 	rxq->buf_virt_addr[i] = virt_addr;
rxq              1823 drivers/net/ethernet/marvell/mvneta.c 			    struct mvneta_rx_queue *rxq,
rxq              1842 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
rxq              1879 drivers/net/ethernet/marvell/mvneta.c 				 struct mvneta_rx_queue *rxq)
rxq              1883 drivers/net/ethernet/marvell/mvneta.c 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
rxq              1885 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
rxq              1890 drivers/net/ethernet/marvell/mvneta.c 						  mvneta_rxq_next_desc_get(rxq);
rxq              1902 drivers/net/ethernet/marvell/mvneta.c 	for (i = 0; i < rxq->size; i++) {
rxq              1903 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
rxq              1904 drivers/net/ethernet/marvell/mvneta.c 		void *data = rxq->buf_virt_addr[i];
rxq              1915 drivers/net/ethernet/marvell/mvneta.c int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
rxq              1918 drivers/net/ethernet/marvell/mvneta.c 	int curr_desc = rxq->first_to_refill;
rxq              1921 drivers/net/ethernet/marvell/mvneta.c 	for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
rxq              1922 drivers/net/ethernet/marvell/mvneta.c 		rx_desc = rxq->descs + curr_desc;
rxq              1924 drivers/net/ethernet/marvell/mvneta.c 			if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
rxq              1926 drivers/net/ethernet/marvell/mvneta.c 				       rxq->id, i, rxq->refill_num);
rxq              1927 drivers/net/ethernet/marvell/mvneta.c 				rxq->refill_err++;
rxq              1931 drivers/net/ethernet/marvell/mvneta.c 		curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
rxq              1933 drivers/net/ethernet/marvell/mvneta.c 	rxq->refill_num -= i;
rxq              1934 drivers/net/ethernet/marvell/mvneta.c 	rxq->first_to_refill = curr_desc;
rxq              1942 drivers/net/ethernet/marvell/mvneta.c 			  struct mvneta_rx_queue *rxq)
rxq              1951 drivers/net/ethernet/marvell/mvneta.c 	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
rxq              1956 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
rxq              1964 drivers/net/ethernet/marvell/mvneta.c 		index = rx_desc - rxq->descs;
rxq              1965 drivers/net/ethernet/marvell/mvneta.c 		page = (struct page *)rxq->buf_virt_addr[index];
rxq              1973 drivers/net/ethernet/marvell/mvneta.c 		rxq->refill_num++;
rxq              1987 drivers/net/ethernet/marvell/mvneta.c 			rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
rxq              1988 drivers/net/ethernet/marvell/mvneta.c 			if (unlikely(!rxq->skb)) {
rxq              1993 drivers/net/ethernet/marvell/mvneta.c 					   rxq->id);
rxq              1995 drivers/net/ethernet/marvell/mvneta.c 				rxq->skb_alloc_err++;
rxq              2005 drivers/net/ethernet/marvell/mvneta.c 			memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
rxq              2007 drivers/net/ethernet/marvell/mvneta.c 			skb_put(rxq->skb, copy_size);
rxq              2008 drivers/net/ethernet/marvell/mvneta.c 			rxq->left_size = rx_bytes - copy_size;
rxq              2010 drivers/net/ethernet/marvell/mvneta.c 			mvneta_rx_csum(pp, rx_status, rxq->skb);
rxq              2011 drivers/net/ethernet/marvell/mvneta.c 			if (rxq->left_size == 0) {
rxq              2026 drivers/net/ethernet/marvell/mvneta.c 				frag_size = min(rxq->left_size,
rxq              2028 drivers/net/ethernet/marvell/mvneta.c 				skb_add_rx_frag(rxq->skb, frag_num, page,
rxq              2033 drivers/net/ethernet/marvell/mvneta.c 				rxq->left_size -= frag_size;
rxq              2037 drivers/net/ethernet/marvell/mvneta.c 			if (unlikely(!rxq->skb)) {
rxq              2042 drivers/net/ethernet/marvell/mvneta.c 			if (!rxq->left_size) {
rxq              2054 drivers/net/ethernet/marvell/mvneta.c 				frag_num = skb_shinfo(rxq->skb)->nr_frags;
rxq              2056 drivers/net/ethernet/marvell/mvneta.c 				frag_size = min(rxq->left_size,
rxq              2058 drivers/net/ethernet/marvell/mvneta.c 				skb_add_rx_frag(rxq->skb, frag_num, page,
rxq              2065 drivers/net/ethernet/marvell/mvneta.c 				rxq->left_size -= frag_size;
rxq              2073 drivers/net/ethernet/marvell/mvneta.c 		if (rxq->left_size) {
rxq              2075 drivers/net/ethernet/marvell/mvneta.c 			       rxq->left_size);
rxq              2076 drivers/net/ethernet/marvell/mvneta.c 			dev_kfree_skb_any(rxq->skb);
rxq              2077 drivers/net/ethernet/marvell/mvneta.c 			rxq->left_size = 0;
rxq              2078 drivers/net/ethernet/marvell/mvneta.c 			rxq->skb = NULL;
rxq              2082 drivers/net/ethernet/marvell/mvneta.c 		rcvd_bytes += rxq->skb->len;
rxq              2085 drivers/net/ethernet/marvell/mvneta.c 		rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
rxq              2087 drivers/net/ethernet/marvell/mvneta.c 		napi_gro_receive(napi, rxq->skb);
rxq              2090 drivers/net/ethernet/marvell/mvneta.c 		rxq->skb = NULL;
rxq              2091 drivers/net/ethernet/marvell/mvneta.c 		rxq->left_size = 0;
rxq              2104 drivers/net/ethernet/marvell/mvneta.c 	refill = mvneta_rx_refill_queue(pp, rxq);
rxq              2107 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
rxq              2115 drivers/net/ethernet/marvell/mvneta.c 			  struct mvneta_rx_queue *rxq)
rxq              2123 drivers/net/ethernet/marvell/mvneta.c 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
rxq              2132 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
rxq              2194 drivers/net/ethernet/marvell/mvneta.c 			rxq->refill_err++;
rxq              2234 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
rxq              2848 drivers/net/ethernet/marvell/mvneta.c static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
rxq              2854 drivers/net/ethernet/marvell/mvneta.c 		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
rxq              2855 drivers/net/ethernet/marvell/mvneta.c 		if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
rxq              2859 drivers/net/ethernet/marvell/mvneta.c 				   __func__, rxq->id, i, num);
rxq              2867 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
rxq              2894 drivers/net/ethernet/marvell/mvneta.c 			      struct mvneta_rx_queue *rxq)
rxq              2896 drivers/net/ethernet/marvell/mvneta.c 	rxq->size = pp->rx_ring_size;
rxq              2899 drivers/net/ethernet/marvell/mvneta.c 	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
rxq              2900 drivers/net/ethernet/marvell/mvneta.c 					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
rxq              2901 drivers/net/ethernet/marvell/mvneta.c 					&rxq->descs_phys, GFP_KERNEL);
rxq              2902 drivers/net/ethernet/marvell/mvneta.c 	if (!rxq->descs)
rxq              2905 drivers/net/ethernet/marvell/mvneta.c 	rxq->last_desc = rxq->size - 1;
rxq              2911 drivers/net/ethernet/marvell/mvneta.c 			       struct mvneta_rx_queue *rxq)
rxq              2914 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
rxq              2915 drivers/net/ethernet/marvell/mvneta.c 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
rxq              2918 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
rxq              2919 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
rxq              2923 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_offset_set(pp, rxq, 0);
rxq              2924 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
rxq              2927 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_bm_disable(pp, rxq);
rxq              2928 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_fill(pp, rxq, rxq->size);
rxq              2931 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_offset_set(pp, rxq,
rxq              2934 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_bm_enable(pp, rxq);
rxq              2936 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_long_pool_set(pp, rxq);
rxq              2937 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_short_pool_set(pp, rxq);
rxq              2938 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
rxq              2944 drivers/net/ethernet/marvell/mvneta.c 			   struct mvneta_rx_queue *rxq)
rxq              2949 drivers/net/ethernet/marvell/mvneta.c 	ret = mvneta_rxq_sw_init(pp, rxq);
rxq              2953 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rxq_hw_init(pp, rxq);
rxq              2960 drivers/net/ethernet/marvell/mvneta.c 			      struct mvneta_rx_queue *rxq)
rxq              2962 drivers/net/ethernet/marvell/mvneta.c 	mvneta_rxq_drop_pkts(pp, rxq);
rxq              2964 drivers/net/ethernet/marvell/mvneta.c 	if (rxq->skb)
rxq              2965 drivers/net/ethernet/marvell/mvneta.c 		dev_kfree_skb_any(rxq->skb);
rxq              2967 drivers/net/ethernet/marvell/mvneta.c 	if (rxq->descs)
rxq              2969 drivers/net/ethernet/marvell/mvneta.c 				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
rxq              2970 drivers/net/ethernet/marvell/mvneta.c 				  rxq->descs,
rxq              2971 drivers/net/ethernet/marvell/mvneta.c 				  rxq->descs_phys);
rxq              2973 drivers/net/ethernet/marvell/mvneta.c 	rxq->descs             = NULL;
rxq              2974 drivers/net/ethernet/marvell/mvneta.c 	rxq->last_desc         = 0;
rxq              2975 drivers/net/ethernet/marvell/mvneta.c 	rxq->next_desc_to_proc = 0;
rxq              2976 drivers/net/ethernet/marvell/mvneta.c 	rxq->descs_phys        = 0;
rxq              2977 drivers/net/ethernet/marvell/mvneta.c 	rxq->first_to_refill   = 0;
rxq              2978 drivers/net/ethernet/marvell/mvneta.c 	rxq->refill_num        = 0;
rxq              2979 drivers/net/ethernet/marvell/mvneta.c 	rxq->skb               = NULL;
rxq              2980 drivers/net/ethernet/marvell/mvneta.c 	rxq->left_size         = 0;
rxq              3693 drivers/net/ethernet/marvell/mvneta.c 		int rxq;
rxq              3695 drivers/net/ethernet/marvell/mvneta.c 		for (rxq = 0; rxq < rxq_number; rxq++)
rxq              3696 drivers/net/ethernet/marvell/mvneta.c 			if ((rxq % max_cpu) == cpu)
rxq              3697 drivers/net/ethernet/marvell/mvneta.c 				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
rxq              3986 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
rxq              3987 drivers/net/ethernet/marvell/mvneta.c 		rxq->time_coal = c->rx_coalesce_usecs;
rxq              3988 drivers/net/ethernet/marvell/mvneta.c 		rxq->pkts_coal = c->rx_max_coalesced_frames;
rxq              3989 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
rxq              3990 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
rxq              4401 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
rxq              4402 drivers/net/ethernet/marvell/mvneta.c 		rxq->id = queue;
rxq              4403 drivers/net/ethernet/marvell/mvneta.c 		rxq->size = pp->rx_ring_size;
rxq              4404 drivers/net/ethernet/marvell/mvneta.c 		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
rxq              4405 drivers/net/ethernet/marvell/mvneta.c 		rxq->time_coal = MVNETA_RX_COAL_USEC;
rxq              4406 drivers/net/ethernet/marvell/mvneta.c 		rxq->buf_virt_addr
rxq              4408 drivers/net/ethernet/marvell/mvneta.c 					     rxq->size,
rxq              4409 drivers/net/ethernet/marvell/mvneta.c 					     sizeof(*rxq->buf_virt_addr),
rxq              4411 drivers/net/ethernet/marvell/mvneta.c 		if (!rxq->buf_virt_addr)
rxq              4782 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
rxq              4784 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_drop_pkts(pp, rxq);
rxq              4833 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
rxq              4835 drivers/net/ethernet/marvell/mvneta.c 		rxq->next_desc_to_proc = 0;
rxq              4836 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_hw_init(pp, rxq);
rxq                33 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_RXQ_CONFIG_REG(rxq)		(0x800 + 4 * (rxq))
rxq               160 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)	(0x3000 + 4 * (rxq))
rxq               163 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_RXQ_STATUS_REG(rxq)		(0x3400 + 4 * (rxq))
rxq               244 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_ISR_RX_THRESHOLD_REG(rxq)		(0x5200 + 4 * (rxq))
rxq              1444 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
rxq              1452 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 	cpu = rxq / nrxqs;
rxq              1460 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 	return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
rxq               860 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int rxq;
rxq               883 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		for (rxq = 0; rxq < port->nrxqs; rxq++)
rxq               884 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
rxq               896 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		for (rxq = 0; rxq < port->nrxqs; rxq++)
rxq               897 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			mvpp2_rxq_short_pool_set(port, rxq,
rxq              1865 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
rxq              1867 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int rx_desc = rxq->next_desc_to_proc;
rxq              1869 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
rxq              1870 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	prefetch(rxq->descs + rxq->next_desc_to_proc);
rxq              1871 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	return rxq->descs + rx_desc;
rxq              2146 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				   struct mvpp2_rx_queue *rxq)
rxq              2150 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
rxq              2151 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
rxq              2153 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
rxq              2155 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			   rxq->pkts_coal);
rxq              2197 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				   struct mvpp2_rx_queue *rxq)
rxq              2200 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
rxq              2203 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->time_coal =
rxq              2207 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
rxq              2210 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
rxq              2352 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			  struct mvpp2_rx_queue *rxq)
rxq              2358 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rxq->size = port->rx_ring_size;
rxq              2361 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
rxq              2362 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 					rxq->size * MVPP2_DESC_ALIGNED_SIZE,
rxq              2363 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 					&rxq->descs_dma, GFP_KERNEL);
rxq              2364 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (!rxq->descs)
rxq              2367 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rxq->last_desc = rxq->size - 1;
rxq              2370 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
rxq              2374 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
rxq              2376 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq_dma = rxq->descs_dma;
rxq              2378 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
rxq              2380 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
rxq              2385 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
rxq              2388 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_rx_pkts_coal_set(port, rxq);
rxq              2389 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_rx_time_coal_set(port, rxq);
rxq              2392 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
rxq              2399 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				struct mvpp2_rx_queue *rxq)
rxq              2403 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rx_received = mvpp2_rxq_received(port, rxq->id);
rxq              2408 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
rxq              2419 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
rxq              2424 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			     struct mvpp2_rx_queue *rxq)
rxq              2428 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_rxq_drop_pkts(port, rxq);
rxq              2430 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (rxq->descs)
rxq              2432 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
rxq              2433 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				  rxq->descs,
rxq              2434 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				  rxq->descs_dma);
rxq              2436 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rxq->descs             = NULL;
rxq              2437 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rxq->last_desc         = 0;
rxq              2438 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rxq->next_desc_to_proc = 0;
rxq              2439 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rxq->descs_dma         = 0;
rxq              2444 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
rxq              2446 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
rxq              2919 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		    int rx_todo, struct mvpp2_rx_queue *rxq)
rxq              2928 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	rx_received = mvpp2_rxq_received(port, rxq->id);
rxq              2933 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
rxq              3011 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
rxq              3378 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_rx_queue *rxq;
rxq              3380 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq = mvpp2_get_rx_queue(port, cause_rx);
rxq              3381 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (!rxq)
rxq              3384 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		count = mvpp2_rx(port, napi, budget, rxq);
rxq              3392 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			cause_rx &= ~(1 << rxq->logic_rxq);
rxq              4046 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
rxq              4048 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->time_coal = c->rx_coalesce_usecs;
rxq              4049 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->pkts_coal = c->rx_max_coalesced_frames;
rxq              4050 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_rx_pkts_coal_set(port, rxq);
rxq              4051 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_rx_time_coal_set(port, rxq);
rxq              4606 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_rx_queue *rxq;
rxq              4609 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
rxq              4610 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (!rxq) {
rxq              4615 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->id = port->first_rxq + queue;
rxq              4616 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->port = port->id;
rxq              4617 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->logic_rxq = queue;
rxq              4619 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		port->rxqs[queue] = rxq;
rxq              4626 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
rxq              4628 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->size = port->rx_ring_size;
rxq              4629 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
rxq              4630 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->time_coal = MVPP2_RX_COAL_USEC;
rxq              1325 drivers/net/ethernet/marvell/sky2.c 	unsigned rxq = rxqaddr[sky2->port];
rxq              1329 drivers/net/ethernet/marvell/sky2.c 	sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
rxq              1332 drivers/net/ethernet/marvell/sky2.c 		if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
rxq              1333 drivers/net/ethernet/marvell/sky2.c 		    == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
rxq              1338 drivers/net/ethernet/marvell/sky2.c 	sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
rxq              1341 drivers/net/ethernet/marvell/sky2.c 	sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
rxq              1478 drivers/net/ethernet/marvell/sky2.c static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
rxq              1480 drivers/net/ethernet/marvell/sky2.c 	sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
rxq              1520 drivers/net/ethernet/marvell/sky2.c 	unsigned rxq = rxqaddr[sky2->port];
rxq              1524 drivers/net/ethernet/marvell/sky2.c 	sky2_qset(hw, rxq);
rxq              1528 drivers/net/ethernet/marvell/sky2.c 		sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
rxq              1534 drivers/net/ethernet/marvell/sky2.c 		sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
rxq              1536 drivers/net/ethernet/marvell/sky2.c 	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
rxq              1565 drivers/net/ethernet/marvell/sky2.c 	sky2_rx_update(sky2, rxq);
rxq              2932 drivers/net/ethernet/marvell/sky2.c 	unsigned rxq = rxqaddr[port];
rxq              2935 drivers/net/ethernet/marvell/sky2.c 	u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
rxq              2936 drivers/net/ethernet/marvell/sky2.c 	u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
rxq              2948 drivers/net/ethernet/marvell/sky2.c 			      fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
rxq               685 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	xdp.rxq = &ring->xdp_rxq;
rxq                74 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) {
rxq               139 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	xdp.rxq = &rq->xdp_rxq;
rxq                59 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	int			rxq;
rxq               384 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 					arfs_rule->rxq, arfs_rule->flow_id,
rxq               536 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rxq               540 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 		priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
rxq               543 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 			  __func__, arfs_rule->filter_id, arfs_rule->rxq,
rxq               553 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 				struct mlx5_flow_handle *rule, u16 rxq)
rxq               559 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	dst.tir_num = priv->direct_tir[rxq].tirn;
rxq               563 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 			    "Failed to modify aRFS rule destination to rq=%d\n", rxq);
rxq               593 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 				    arfs_rule->rxq);
rxq               602 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 					 u16 rxq, u32 flow_id)
rxq               612 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	rule->rxq = rxq;
rxq               697 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 		if (arfs_rule->rxq == rxq_index) {
rxq               701 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 		arfs_rule->rxq = rxq_index;
rxq              1818 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	xdp.rxq = &rx_ring->xdp_rxq;
rxq               465 drivers/net/ethernet/qlogic/qede/qede.h 	struct qede_rx_queue	*rxq;
rxq               525 drivers/net/ethernet/qlogic/qede/qede.h int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
rxq               566 drivers/net/ethernet/qlogic/qede/qede.h bool qede_has_rx_work(struct qede_rx_queue *rxq);
rxq               568 drivers/net/ethernet/qlogic/qede/qede.h void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
rxq               569 drivers/net/ethernet/qlogic/qede/qede.h void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
rxq               238 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 				       struct qede_rx_queue *rxq, u8 **buf)
rxq               243 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		sprintf(*buf, "%d: %s", rxq->rxq_id,
rxq               266 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 			qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
rxq               321 drivers/net/ethernet/qlogic/qede/qede_ethtool.c static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
rxq               326 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		**buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
rxq               347 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 			qede_get_ethtool_stats_rxq(fp->rxq, &buf);
rxq               828 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 				rx_handle = fp->rxq->handle;
rxq               899 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 							     fp->rxq->handle);
rxq              1571 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	struct qede_rx_queue *rxq = NULL;
rxq              1579 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 			rxq = edev->fp_array[i].rxq;
rxq              1584 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	if (!rxq) {
rxq              1594 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		if (!qede_has_rx_work(rxq)) {
rxq              1599 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
rxq              1600 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
rxq              1611 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
rxq              1614 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
rxq              1615 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
rxq              1621 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 				  rxq->rx_headroom);
rxq              1631 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 			qede_recycle_rx_bd_ring(rxq, 1);
rxq              1632 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 			qed_chain_recycle_consumed(&rxq->rx_comp_ring);
rxq              1637 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		qede_recycle_rx_bd_ring(rxq, 1);
rxq              1638 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
rxq              1646 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	qede_update_rx_prod(edev, rxq);
rxq               621 drivers/net/ethernet/qlogic/qede/qede_filter.c 		rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
rxq                51 drivers/net/ethernet/qlogic/qede/qede_fp.c int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
rxq                62 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (allow_lazy && likely(rxq->filled_buffers > 12)) {
rxq                63 drivers/net/ethernet/qlogic/qede/qede_fp.c 		rxq->filled_buffers--;
rxq                74 drivers/net/ethernet/qlogic/qede/qede_fp.c 	mapping = dma_map_page(rxq->dev, data, 0,
rxq                75 drivers/net/ethernet/qlogic/qede/qede_fp.c 			       PAGE_SIZE, rxq->data_direction);
rxq                76 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
rxq                81 drivers/net/ethernet/qlogic/qede/qede_fp.c 	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
rxq                87 drivers/net/ethernet/qlogic/qede/qede_fp.c 	rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
rxq                91 drivers/net/ethernet/qlogic/qede/qede_fp.c 				     rxq->rx_headroom);
rxq                93 drivers/net/ethernet/qlogic/qede/qede_fp.c 	rxq->sw_rx_prod++;
rxq                94 drivers/net/ethernet/qlogic/qede/qede_fp.c 	rxq->filled_buffers++;
rxq               479 drivers/net/ethernet/qlogic/qede/qede_fp.c bool qede_has_rx_work(struct qede_rx_queue *rxq)
rxq               486 drivers/net/ethernet/qlogic/qede/qede_fp.c 	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
rxq               487 drivers/net/ethernet/qlogic/qede/qede_fp.c 	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
rxq               492 drivers/net/ethernet/qlogic/qede/qede_fp.c static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
rxq               494 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qed_chain_consume(&rxq->rx_bd_ring);
rxq               495 drivers/net/ethernet/qlogic/qede/qede_fp.c 	rxq->sw_rx_cons++;
rxq               501 drivers/net/ethernet/qlogic/qede/qede_fp.c static inline void qede_reuse_page(struct qede_rx_queue *rxq,
rxq               504 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
rxq               508 drivers/net/ethernet/qlogic/qede/qede_fp.c 	curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
rxq               515 drivers/net/ethernet/qlogic/qede/qede_fp.c 					  rxq->rx_headroom);
rxq               517 drivers/net/ethernet/qlogic/qede/qede_fp.c 	rxq->sw_rx_prod++;
rxq               524 drivers/net/ethernet/qlogic/qede/qede_fp.c void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
rxq               529 drivers/net/ethernet/qlogic/qede/qede_fp.c 		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
rxq               530 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_reuse_page(rxq, curr_cons);
rxq               531 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_rx_bd_ring_consume(rxq);
rxq               535 drivers/net/ethernet/qlogic/qede/qede_fp.c static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
rxq               539 drivers/net/ethernet/qlogic/qede/qede_fp.c 	curr_cons->page_offset += rxq->rx_buf_seg_size;
rxq               542 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
rxq               546 drivers/net/ethernet/qlogic/qede/qede_fp.c 			curr_cons->page_offset -= rxq->rx_buf_seg_size;
rxq               551 drivers/net/ethernet/qlogic/qede/qede_fp.c 		dma_unmap_page(rxq->dev, curr_cons->mapping,
rxq               552 drivers/net/ethernet/qlogic/qede/qede_fp.c 			       PAGE_SIZE, rxq->data_direction);
rxq               559 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_reuse_page(rxq, curr_cons);
rxq               565 drivers/net/ethernet/qlogic/qede/qede_fp.c void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq               567 drivers/net/ethernet/qlogic/qede/qede_fp.c 	u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
rxq               568 drivers/net/ethernet/qlogic/qede/qede_fp.c 	u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
rxq               581 drivers/net/ethernet/qlogic/qede/qede_fp.c 	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
rxq               616 drivers/net/ethernet/qlogic/qede/qede_fp.c 				    struct qede_rx_queue *rxq,
rxq               642 drivers/net/ethernet/qlogic/qede/qede_fp.c 			      struct qede_rx_queue *rxq,
rxq               645 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
rxq               647 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
rxq               656 drivers/net/ethernet/qlogic/qede/qede_fp.c 			   current_bd->page_offset + rxq->rx_headroom,
rxq               659 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
rxq               667 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_rx_bd_ring_consume(rxq);
rxq               670 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb->truesize += rxq->rx_buf_seg_size;
rxq               677 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_recycle_rx_bd_ring(rxq, 1);
rxq               717 drivers/net/ethernet/qlogic/qede/qede_fp.c qede_build_skb(struct qede_rx_queue *rxq,
rxq               724 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = build_skb(buf, rxq->rx_buf_seg_size);
rxq               734 drivers/net/ethernet/qlogic/qede/qede_fp.c 		      struct qede_rx_queue *rxq,
rxq               740 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = qede_build_skb(rxq, bd, len, pad);
rxq               741 drivers/net/ethernet/qlogic/qede/qede_fp.c 	bd->page_offset += rxq->rx_buf_seg_size;
rxq               744 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
rxq               747 drivers/net/ethernet/qlogic/qede/qede_fp.c 			bd->page_offset -= rxq->rx_buf_seg_size;
rxq               754 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_reuse_page(rxq, bd);
rxq               758 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_rx_bd_ring_consume(rxq);
rxq               765 drivers/net/ethernet/qlogic/qede/qede_fp.c 		  struct qede_rx_queue *rxq,
rxq               783 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_reuse_page(rxq, bd);
rxq               787 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = qede_build_skb(rxq, bd, len, pad);
rxq               789 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
rxq               800 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_rx_bd_ring_consume(rxq);
rxq               806 drivers/net/ethernet/qlogic/qede/qede_fp.c 			   struct qede_rx_queue *rxq,
rxq               809 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
rxq               813 drivers/net/ethernet/qlogic/qede/qede_fp.c 	sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
rxq               814 drivers/net/ethernet/qlogic/qede/qede_fp.c 	pad = cqe->placement_offset + rxq->rx_headroom;
rxq               816 drivers/net/ethernet/qlogic/qede/qede_fp.c 	tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
rxq               830 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_rx_bd_ring_consume(rxq);
rxq               852 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
rxq               927 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_record_rx_queue(skb, fp->rxq->rxq_id);
rxq               928 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
rxq               932 drivers/net/ethernet/qlogic/qede/qede_fp.c 				 struct qede_rx_queue *rxq,
rxq               938 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
rxq               950 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct qede_rx_queue *rxq = fp->rxq;
rxq               955 drivers/net/ethernet/qlogic/qede/qede_fp.c 	tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
rxq               959 drivers/net/ethernet/qlogic/qede/qede_fp.c 		dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
rxq               960 drivers/net/ethernet/qlogic/qede/qede_fp.c 			       PAGE_SIZE, rxq->data_direction);
rxq               963 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
rxq              1000 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_reuse_page(rxq, &tpa_info->buffer);
rxq              1055 drivers/net/ethernet/qlogic/qede/qede_fp.c 			struct qede_rx_queue *rxq,
rxq              1068 drivers/net/ethernet/qlogic/qede/qede_fp.c 	xdp.rxq = &rxq->xdp_rxq;
rxq              1086 drivers/net/ethernet/qlogic/qede/qede_fp.c 	rxq->xdp_no_pass++;
rxq              1091 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (qede_alloc_rx_buffer(rxq, true)) {
rxq              1092 drivers/net/ethernet/qlogic/qede/qede_fp.c 			qede_recycle_rx_bd_ring(rxq, 1);
rxq              1101 drivers/net/ethernet/qlogic/qede/qede_fp.c 			dma_unmap_page(rxq->dev, bd->mapping,
rxq              1108 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_rx_bd_ring_consume(rxq);
rxq              1118 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
rxq              1125 drivers/net/ethernet/qlogic/qede/qede_fp.c 			       struct qede_rx_queue *rxq,
rxq              1139 drivers/net/ethernet/qlogic/qede/qede_fp.c 		u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
rxq              1150 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (unlikely(qede_alloc_rx_buffer(rxq, true)))
rxq              1156 drivers/net/ethernet/qlogic/qede/qede_fp.c 		bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
rxq              1157 drivers/net/ethernet/qlogic/qede/qede_fp.c 		bd = &rxq->sw_rx_ring[bd_cons_idx];
rxq              1158 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_rx_bd_ring_consume(rxq);
rxq              1160 drivers/net/ethernet/qlogic/qede/qede_fp.c 		dma_unmap_page(rxq->dev, bd->mapping,
rxq              1164 drivers/net/ethernet/qlogic/qede/qede_fp.c 				   bd->data, rxq->rx_headroom, cur_size);
rxq              1183 drivers/net/ethernet/qlogic/qede/qede_fp.c 				   struct qede_rx_queue *rxq,
rxq              1189 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
rxq              1192 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
rxq              1203 drivers/net/ethernet/qlogic/qede/qede_fp.c 			       struct qede_rx_queue *rxq)
rxq              1205 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
rxq              1216 drivers/net/ethernet/qlogic/qede/qede_fp.c 	cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
rxq              1230 drivers/net/ethernet/qlogic/qede/qede_fp.c 		return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
rxq              1235 drivers/net/ethernet/qlogic/qede/qede_fp.c 	bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
rxq              1236 drivers/net/ethernet/qlogic/qede/qede_fp.c 	bd = &rxq->sw_rx_ring[bd_cons_idx];
rxq              1240 drivers/net/ethernet/qlogic/qede/qede_fp.c 	pad = fp_cqe->placement_offset + rxq->rx_headroom;
rxq              1244 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
rxq              1255 drivers/net/ethernet/qlogic/qede/qede_fp.c 			rxq->rx_ip_frags++;
rxq              1257 drivers/net/ethernet/qlogic/qede/qede_fp.c 			rxq->rx_hw_errors++;
rxq              1263 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
rxq              1265 drivers/net/ethernet/qlogic/qede/qede_fp.c 		rxq->rx_alloc_errors++;
rxq              1266 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
rxq              1274 drivers/net/ethernet/qlogic/qede/qede_fp.c 		u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
rxq              1278 drivers/net/ethernet/qlogic/qede/qede_fp.c 			qede_recycle_rx_bd_ring(rxq, unmapped_frags);
rxq              1288 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_record_rx_queue(skb, rxq->rxq_id);
rxq              1292 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
rxq              1299 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct qede_rx_queue *rxq = fp->rxq;
rxq              1304 drivers/net/ethernet/qlogic/qede/qede_fp.c 	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
rxq              1305 drivers/net/ethernet/qlogic/qede/qede_fp.c 	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
rxq              1316 drivers/net/ethernet/qlogic/qede/qede_fp.c 		rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
rxq              1317 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
rxq              1318 drivers/net/ethernet/qlogic/qede/qede_fp.c 		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
rxq              1322 drivers/net/ethernet/qlogic/qede/qede_fp.c 	rxq->rcv_pkts += rcv_pkts;
rxq              1325 drivers/net/ethernet/qlogic/qede/qede_fp.c 	while (rxq->num_rx_buffers - rxq->filled_buffers)
rxq              1326 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (qede_alloc_rx_buffer(rxq, false))
rxq              1330 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_update_rx_prod(edev, rxq);
rxq              1352 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (qede_has_rx_work(fp->rxq))
rxq              1394 drivers/net/ethernet/qlogic/qede/qede_fp.c 			qede_has_rx_work(fp->rxq)) ?
rxq               852 drivers/net/ethernet/qlogic/qede/qede_main.c 			if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
rxq               853 drivers/net/ethernet/qlogic/qede/qede_main.c 				xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
rxq               854 drivers/net/ethernet/qlogic/qede/qede_main.c 			kfree(fp->rxq);
rxq               913 drivers/net/ethernet/qlogic/qede/qede_main.c 			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
rxq               914 drivers/net/ethernet/qlogic/qede/qede_main.c 			if (!fp->rxq)
rxq              1341 drivers/net/ethernet/qlogic/qede/qede_main.c 				 struct qede_rx_queue *rxq)
rxq              1345 drivers/net/ethernet/qlogic/qede/qede_main.c 	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
rxq              1349 drivers/net/ethernet/qlogic/qede/qede_main.c 		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
rxq              1353 drivers/net/ethernet/qlogic/qede/qede_main.c 			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
rxq              1360 drivers/net/ethernet/qlogic/qede/qede_main.c static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq              1363 drivers/net/ethernet/qlogic/qede/qede_main.c 	qede_free_rx_buffers(edev, rxq);
rxq              1366 drivers/net/ethernet/qlogic/qede/qede_main.c 	kfree(rxq->sw_rx_ring);
rxq              1369 drivers/net/ethernet/qlogic/qede/qede_main.c 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
rxq              1370 drivers/net/ethernet/qlogic/qede/qede_main.c 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
rxq              1373 drivers/net/ethernet/qlogic/qede/qede_main.c static void qede_set_tpa_param(struct qede_rx_queue *rxq)
rxq              1378 drivers/net/ethernet/qlogic/qede/qede_main.c 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
rxq              1385 drivers/net/ethernet/qlogic/qede/qede_main.c static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq              1389 drivers/net/ethernet/qlogic/qede/qede_main.c 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
rxq              1391 drivers/net/ethernet/qlogic/qede/qede_main.c 	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
rxq              1393 drivers/net/ethernet/qlogic/qede/qede_main.c 	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
rxq              1394 drivers/net/ethernet/qlogic/qede/qede_main.c 	size = rxq->rx_headroom +
rxq              1398 drivers/net/ethernet/qlogic/qede/qede_main.c 	if (rxq->rx_buf_size + size > PAGE_SIZE)
rxq              1399 drivers/net/ethernet/qlogic/qede/qede_main.c 		rxq->rx_buf_size = PAGE_SIZE - size;
rxq              1405 drivers/net/ethernet/qlogic/qede/qede_main.c 		size = size + rxq->rx_buf_size;
rxq              1406 drivers/net/ethernet/qlogic/qede/qede_main.c 		rxq->rx_buf_seg_size = roundup_pow_of_two(size);
rxq              1408 drivers/net/ethernet/qlogic/qede/qede_main.c 		rxq->rx_buf_seg_size = PAGE_SIZE;
rxq              1413 drivers/net/ethernet/qlogic/qede/qede_main.c 	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
rxq              1414 drivers/net/ethernet/qlogic/qede/qede_main.c 	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
rxq              1415 drivers/net/ethernet/qlogic/qede/qede_main.c 	if (!rxq->sw_rx_ring) {
rxq              1428 drivers/net/ethernet/qlogic/qede/qede_main.c 					    &rxq->rx_bd_ring, NULL);
rxq              1439 drivers/net/ethernet/qlogic/qede/qede_main.c 					    &rxq->rx_comp_ring, NULL);
rxq              1444 drivers/net/ethernet/qlogic/qede/qede_main.c 	rxq->filled_buffers = 0;
rxq              1445 drivers/net/ethernet/qlogic/qede/qede_main.c 	for (i = 0; i < rxq->num_rx_buffers; i++) {
rxq              1446 drivers/net/ethernet/qlogic/qede/qede_main.c 		rc = qede_alloc_rx_buffer(rxq, false);
rxq              1456 drivers/net/ethernet/qlogic/qede/qede_main.c 		qede_set_tpa_param(rxq);
rxq              1517 drivers/net/ethernet/qlogic/qede/qede_main.c 		qede_free_mem_rxq(edev, fp->rxq);
rxq              1542 drivers/net/ethernet/qlogic/qede/qede_main.c 		rc = qede_alloc_mem_rxq(edev, fp->rxq);
rxq              1670 drivers/net/ethernet/qlogic/qede/qede_main.c 			fp->rxq->rxq_id = rxq_index++;
rxq              1674 drivers/net/ethernet/qlogic/qede/qede_main.c 				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
rxq              1676 drivers/net/ethernet/qlogic/qede/qede_main.c 				fp->rxq->data_direction = DMA_FROM_DEVICE;
rxq              1677 drivers/net/ethernet/qlogic/qede/qede_main.c 			fp->rxq->dev = &edev->pdev->dev;
rxq              1680 drivers/net/ethernet/qlogic/qede/qede_main.c 			WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
rxq              1681 drivers/net/ethernet/qlogic/qede/qede_main.c 						 fp->rxq->rxq_id) < 0);
rxq              1954 drivers/net/ethernet/qlogic/qede/qede_main.c 			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
rxq              1967 drivers/net/ethernet/qlogic/qede/qede_main.c 			bpf_prog_put(fp->rxq->xdp_prog);
rxq              2078 drivers/net/ethernet/qlogic/qede/qede_main.c 			struct qede_rx_queue *rxq = fp->rxq;
rxq              2083 drivers/net/ethernet/qlogic/qede/qede_main.c 			q_params.queue_id = rxq->rxq_id;
rxq              2089 drivers/net/ethernet/qlogic/qede/qede_main.c 			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
rxq              2090 drivers/net/ethernet/qlogic/qede/qede_main.c 			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
rxq              2093 drivers/net/ethernet/qlogic/qede/qede_main.c 						   rxq->rx_buf_size,
rxq              2094 drivers/net/ethernet/qlogic/qede/qede_main.c 						   rxq->rx_bd_ring.p_phys_addr,
rxq              2104 drivers/net/ethernet/qlogic/qede/qede_main.c 			rxq->hw_rxq_prod_addr = ret_params.p_prod;
rxq              2105 drivers/net/ethernet/qlogic/qede/qede_main.c 			rxq->handle = ret_params.p_handle;
rxq              2108 drivers/net/ethernet/qlogic/qede/qede_main.c 			rxq->hw_cons_ptr = val;
rxq              2110 drivers/net/ethernet/qlogic/qede/qede_main.c 			qede_update_rx_prod(edev, rxq);
rxq              2118 drivers/net/ethernet/qlogic/qede/qede_main.c 			fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
rxq              2119 drivers/net/ethernet/qlogic/qede/qede_main.c 			if (IS_ERR(fp->rxq->xdp_prog)) {
rxq              2120 drivers/net/ethernet/qlogic/qede/qede_main.c 				rc = PTR_ERR(fp->rxq->xdp_prog);
rxq              2121 drivers/net/ethernet/qlogic/qede/qede_main.c 				fp->rxq->xdp_prog = NULL;
rxq              2566 drivers/net/ethernet/qlogic/qede/qede_main.c 			if (qede_has_rx_work(fp->rxq))
rxq              2573 drivers/net/ethernet/qlogic/qede/qede_main.c 			if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
rxq              2574 drivers/net/ethernet/qlogic/qede/qede_main.c 			    qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
rxq               455 drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h 	struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES];
rxq               326 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
rxq               581 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 				   priv->rxq[queue_num], rx_rsize);
rxq               590 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		priv->rxq[queue_num]->priv_ptr = priv;
rxq               604 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
rxq               656 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
rxq               679 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		priv->rxq[queue_num] = devm_kmalloc(priv->device,
rxq               681 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		if (!priv->rxq[queue_num])
rxq               961 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 					(priv->rxq[queue_num])->dma_rx_phy,
rxq              1135 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 				       (priv->rxq[queue_num])->irq_no,
rxq              1137 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 				       dev->name, priv->rxq[queue_num]);
rxq              1435 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
rxq              1436 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	     priv->rxq[qnum]->dirty_rx++) {
rxq              1437 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
rxq              1440 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p = priv->rxq[qnum]->dma_rx + entry;
rxq              1442 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
rxq              1450 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			priv->rxq[qnum]->rx_skbuff[entry] = skb;
rxq              1451 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			priv->rxq[qnum]->rx_skbuff_dma[entry] =
rxq              1456 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 				priv->rxq[qnum]->rx_skbuff_dma[entry];
rxq              1479 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	unsigned int entry = priv->rxq[qnum]->cur_rx;
rxq              1490 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p = priv->rxq[qnum]->dma_rx + entry;
rxq              1497 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
rxq              1498 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		prefetch(priv->rxq[qnum]->dma_rx + next_entry);
rxq              1513 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		skb = priv->rxq[qnum]->rx_skbuff[entry];
rxq              1519 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		priv->rxq[qnum]->rx_skbuff[entry] = NULL;
rxq              1660 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
rxq              1661 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	struct sxgbe_priv_data *priv = rxq->priv_ptr;
rxq              1664 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
rxq              1668 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
rxq              1679 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 					       rxq->queue_no, priv->rx_tc);
rxq               135 drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c 		priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
rxq               136 drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c 		if (priv->rxq[i]->irq_no <= 0) {
rxq               156 drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c 		irq_dispose_mapping(priv->rxq[i]->irq_no);
rxq               283 drivers/net/ethernet/sfc/ptp.c 	struct sk_buff_head rxq;
rxq              1233 drivers/net/ethernet/sfc/ptp.c 	while ((skb = skb_dequeue(&ptp->rxq))) {
rxq              1248 drivers/net/ethernet/sfc/ptp.c 			skb_queue_head(&ptp->rxq, skb);
rxq              1364 drivers/net/ethernet/sfc/ptp.c 	efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
rxq              1461 drivers/net/ethernet/sfc/ptp.c 	skb_queue_head_init(&ptp->rxq);
rxq              1569 drivers/net/ethernet/sfc/ptp.c 	skb_queue_purge(&efx->ptp_data->rxq);
rxq              1694 drivers/net/ethernet/sfc/ptp.c 	skb_queue_tail(&ptp->rxq, skb);
rxq               764 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned vf_rxq = req->u.mac_filter.rxq;
rxq               195 drivers/net/ethernet/sfc/vfdi.h 			u32 rxq;
rxq               997 drivers/net/ethernet/socionext/netsec.c 		xdp.rxq = &dring->xdp_rxq;
rxq              4470 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue, rxq, maxq;
rxq              4595 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	rxq = priv->plat->rx_queues_to_use;
rxq              4598 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
rxq               578 drivers/net/ethernet/ti/cpsw.c 	struct xdp_rxq_info *rxq;
rxq               583 drivers/net/ethernet/ti/cpsw.c 	rxq = &priv->xdp_rxq[ch];
rxq               585 drivers/net/ethernet/ti/cpsw.c 	ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
rxq               589 drivers/net/ethernet/ti/cpsw.c 	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
rxq               591 drivers/net/ethernet/ti/cpsw.c 		xdp_rxq_info_unreg(rxq);
rxq               598 drivers/net/ethernet/ti/cpsw.c 	struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
rxq               600 drivers/net/ethernet/ti/cpsw.c 	if (!xdp_rxq_info_is_reg(rxq))
rxq               603 drivers/net/ethernet/ti/cpsw.c 	xdp_rxq_info_unreg(rxq);
rxq               736 drivers/net/ethernet/ti/cpsw.c 		xdp.rxq = &priv->xdp_rxq[ch];
rxq               663 drivers/net/ethernet/xscale/ixp4xx_eth.c 	qmgr_disable_irq(port->plat->rxq);
rxq               671 drivers/net/ethernet/xscale/ixp4xx_eth.c 	unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
rxq               687 drivers/net/ethernet/xscale/ixp4xx_eth.c 		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
rxq               693 drivers/net/ethernet/xscale/ixp4xx_eth.c 			qmgr_enable_irq(rxq);
rxq               694 drivers/net/ethernet/xscale/ixp4xx_eth.c 			if (!qmgr_stat_below_low_watermark(rxq) &&
rxq               700 drivers/net/ethernet/xscale/ixp4xx_eth.c 				qmgr_disable_irq(rxq);
rxq              1038 drivers/net/ethernet/xscale/ixp4xx_eth.c 	err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
rxq              1067 drivers/net/ethernet/xscale/ixp4xx_eth.c 	qmgr_release_queue(port->plat->rxq);
rxq              1078 drivers/net/ethernet/xscale/ixp4xx_eth.c 	qmgr_release_queue(port->plat->rxq);
rxq              1191 drivers/net/ethernet/xscale/ixp4xx_eth.c 	msg.byte5 = port->plat->rxq | 0x80;
rxq              1192 drivers/net/ethernet/xscale/ixp4xx_eth.c 	msg.byte7 = port->plat->rxq << 4;
rxq              1258 drivers/net/ethernet/xscale/ixp4xx_eth.c 	qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
rxq              1279 drivers/net/ethernet/xscale/ixp4xx_eth.c 	qmgr_disable_irq(port->plat->rxq);
rxq              1295 drivers/net/ethernet/xscale/ixp4xx_eth.c 		while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
rxq               262 drivers/net/tap.c 	__u32 rxq;
rxq               271 drivers/net/tap.c 	rxq = skb_get_hash(skb);
rxq               272 drivers/net/tap.c 	if (rxq) {
rxq               273 drivers/net/tap.c 		queue = rcu_dereference(tap->taps[rxq % numvtaps]);
rxq               278 drivers/net/tap.c 		rxq = skb_get_rx_queue(skb);
rxq               280 drivers/net/tap.c 		while (unlikely(rxq >= numvtaps))
rxq               281 drivers/net/tap.c 			rxq -= numvtaps;
rxq               283 drivers/net/tap.c 		queue = rcu_dereference(tap->taps[rxq]);
rxq              1710 drivers/net/tun.c 		xdp.rxq = &tfile->xdp_rxq;
rxq              2452 drivers/net/tun.c 		xdp->rxq = &tfile->xdp_rxq;
rxq               371 drivers/net/usb/lan78xx.c 	struct sk_buff_head	rxq;
rxq              2297 drivers/net/usb/lan78xx.c 				unlink_urbs(dev, &dev->rxq);
rxq              2675 drivers/net/usb/lan78xx.c 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
rxq              2678 drivers/net/usb/lan78xx.c 	while (!skb_queue_empty(&dev->rxq) &&
rxq              3209 drivers/net/usb/lan78xx.c 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
rxq              3218 drivers/net/usb/lan78xx.c 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
rxq              3239 drivers/net/usb/lan78xx.c 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
rxq              3303 drivers/net/usb/lan78xx.c 	state = defer_bh(dev, skb, &dev->rxq, state);
rxq              3450 drivers/net/usb/lan78xx.c 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
rxq              3452 drivers/net/usb/lan78xx.c 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
rxq              3460 drivers/net/usb/lan78xx.c 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
rxq              3540 drivers/net/usb/lan78xx.c 		unlink_urbs(dev, &dev->rxq);
rxq              3739 drivers/net/usb/lan78xx.c 	skb_queue_head_init(&dev->rxq);
rxq               501 drivers/net/usb/usbnet.c 	spin_lock_irqsave (&dev->rxq.lock, lockflags);
rxq               528 drivers/net/usb/usbnet.c 			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
rxq               534 drivers/net/usb/usbnet.c 	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
rxq               651 drivers/net/usb/usbnet.c 	state = defer_bh(dev, skb, &dev->rxq, state);
rxq               755 drivers/net/usb/usbnet.c 		(void) unlink_urbs (dev, &dev->rxq);
rxq               787 drivers/net/usb/usbnet.c 		unlink_urbs(dev, &dev->rxq);
rxq               790 drivers/net/usb/usbnet.c 	wait_skb_queue_empty(&dev->rxq);
rxq              1094 drivers/net/usb/usbnet.c 		unlink_urbs(dev, &dev->rxq);
rxq              1160 drivers/net/usb/usbnet.c 		unlink_urbs (dev, &dev->rxq);
rxq              1498 drivers/net/usb/usbnet.c 	for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
rxq              1549 drivers/net/usb/usbnet.c 		if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
rxq              1559 drivers/net/usb/usbnet.c 		int	temp = dev->rxq.qlen;
rxq              1564 drivers/net/usb/usbnet.c 			if (temp != dev->rxq.qlen)
rxq              1567 drivers/net/usb/usbnet.c 					  temp, dev->rxq.qlen);
rxq              1568 drivers/net/usb/usbnet.c 			if (dev->rxq.qlen < RX_QLEN(dev))
rxq              1699 drivers/net/usb/usbnet.c 	skb_queue_head_init (&dev->rxq);
rxq               243 drivers/net/veth.c 	int rxq;
rxq               253 drivers/net/veth.c 	rxq = skb_get_queue_mapping(skb);
rxq               254 drivers/net/veth.c 	if (rxq < rcv->real_num_rx_queues) {
rxq               255 drivers/net/veth.c 		rq = &rcv_priv->rq[rxq];
rxq               258 drivers/net/veth.c 			skb_record_rx_queue(skb, rxq);
rxq               530 drivers/net/veth.c 		xdp.rxq = &rq->xdp_rxq;
rxq               542 drivers/net/veth.c 			xdp.rxq->mem = frame->mem;
rxq               554 drivers/net/veth.c 			xdp.rxq->mem = frame->mem;
rxq               658 drivers/net/veth.c 	xdp.rxq = &rq->xdp_rxq;
rxq               670 drivers/net/veth.c 		xdp.rxq->mem = rq->xdp_mem;
rxq               681 drivers/net/veth.c 		xdp.rxq->mem = rq->xdp_mem;
rxq               277 drivers/net/virtio_net.c static int rxq2vq(int rxq)
rxq               279 drivers/net/virtio_net.c 	return rxq * 2;
rxq               688 drivers/net/virtio_net.c 		xdp.rxq = &rq->xdp_rxq;
rxq               844 drivers/net/virtio_net.c 		xdp.rxq = &rq->xdp_rxq;
rxq               655 drivers/net/wan/ixp4xx_hss.c 	unsigned int rxq = queue_ids[port->id].rx;
rxq               672 drivers/net/wan/ixp4xx_hss.c 		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
rxq               678 drivers/net/wan/ixp4xx_hss.c 			qmgr_enable_irq(rxq);
rxq               679 drivers/net/wan/ixp4xx_hss.c 			if (!qmgr_stat_empty(rxq) &&
rxq               686 drivers/net/wan/ixp4xx_hss.c 				qmgr_disable_irq(rxq);
rxq              1911 drivers/net/wireless/ath/ath6kl/htc_mbox.c 				struct list_head *rxq,
rxq              1920 drivers/net/wireless/ath/ath6kl/htc_mbox.c 	n_scat_pkt = get_queue_depth(rxq);
rxq              1923 drivers/net/wireless/ath/ath6kl/htc_mbox.c 	if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
rxq              1936 drivers/net/wireless/ath/ath6kl/htc_mbox.c 			    __func__, get_queue_depth(rxq), n_scat_pkt);
rxq              1943 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		   get_queue_depth(rxq), n_scat_pkt);
rxq              1953 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		packet = list_first_entry(rxq, struct htc_packet, list);
rxq              1960 drivers/net/wireless/ath/ath6kl/htc_mbox.c 			list_add(&packet->list, rxq);
rxq              2663 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	struct ipw2100_bd_queue *rxq = &priv->rx_queue;
rxq              2676 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (r >= rxq->entries) {
rxq              2681 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	i = (rxq->next + 1) % rxq->entries;
rxq              2753 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		rxq->drv[i].status.info.field = 0;
rxq              2755 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		i = (i + 1) % rxq->entries;
rxq              2760 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		rxq->next = (i ? i : rxq->entries) - 1;
rxq              2763 drivers/net/wireless/intel/ipw2x00/ipw2100.c 			       IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, rxq->next);
rxq              3430 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				      struct ipw_rx_queue *rxq)
rxq              3435 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	spin_lock_irqsave(&rxq->lock, flags);
rxq              3437 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	INIT_LIST_HEAD(&rxq->rx_free);
rxq              3438 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	INIT_LIST_HEAD(&rxq->rx_used);
rxq              3444 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (rxq->pool[i].skb != NULL) {
rxq              3445 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
rxq              3447 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			dev_kfree_skb(rxq->pool[i].skb);
rxq              3448 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			rxq->pool[i].skb = NULL;
rxq              3450 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
rxq              3455 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	rxq->read = rxq->write = 0;
rxq              3456 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	rxq->free_count = 0;
rxq              3457 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	spin_unlock_irqrestore(&rxq->lock, flags);
rxq              3521 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (!priv->rxq)
rxq              3522 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		priv->rxq = ipw_rx_queue_alloc(priv);
rxq              3524 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		ipw_rx_queue_reset(priv, priv->rxq);
rxq              3525 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (!priv->rxq) {
rxq              3644 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
rxq              3655 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (priv->rxq) {
rxq              3656 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		ipw_rx_queue_free(priv, priv->rxq);
rxq              3657 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		priv->rxq = NULL;
rxq              5140 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_queue *rxq = priv->rxq;
rxq              5146 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	spin_lock_irqsave(&rxq->lock, flags);
rxq              5147 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	write = rxq->write;
rxq              5148 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
rxq              5149 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		element = rxq->rx_free.next;
rxq              5153 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
rxq              5155 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxq->queue[rxq->write] = rxb;
rxq              5156 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
rxq              5157 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxq->free_count--;
rxq              5159 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	spin_unlock_irqrestore(&rxq->lock, flags);
rxq              5163 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (rxq->free_count <= RX_LOW_WATERMARK)
rxq              5167 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (write != rxq->write)
rxq              5168 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
rxq              5180 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_queue *rxq = priv->rxq;
rxq              5185 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	spin_lock_irqsave(&rxq->lock, flags);
rxq              5186 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	while (!list_empty(&rxq->rx_used)) {
rxq              5187 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		element = rxq->rx_used.next;
rxq              5204 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		list_add_tail(&rxb->list, &rxq->rx_free);
rxq              5205 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxq->free_count++;
rxq              5207 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	spin_unlock_irqrestore(&rxq->lock, flags);
rxq              5226 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
rxq              5230 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (!rxq)
rxq              5234 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (rxq->pool[i].skb != NULL) {
rxq              5235 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
rxq              5237 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			dev_kfree_skb(rxq->pool[i].skb);
rxq              5241 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	kfree(rxq);
rxq              5246 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_queue *rxq;
rxq              5249 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
rxq              5250 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (unlikely(!rxq)) {
rxq              5254 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	spin_lock_init(&rxq->lock);
rxq              5255 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	INIT_LIST_HEAD(&rxq->rx_free);
rxq              5256 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	INIT_LIST_HEAD(&rxq->rx_used);
rxq              5260 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
rxq              5264 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	rxq->read = rxq->write = 0;
rxq              5265 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	rxq->free_count = 0;
rxq              5267 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	return rxq;
rxq              8261 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	i = priv->rxq->read;
rxq              8263 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
rxq              8267 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxb = priv->rxq->queue[i];
rxq              8272 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		priv->rxq->queue[i] = NULL;
rxq              8430 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		list_add_tail(&rxb->list, &priv->rxq->rx_used);
rxq              8437 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			priv->rxq->read = i;
rxq              8443 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	priv->rxq->read = i;
rxq              11794 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (priv->rxq) {
rxq              11795 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		ipw_rx_queue_free(priv, priv->rxq);
rxq              11796 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		priv->rxq = NULL;
rxq              1191 drivers/net/wireless/intel/ipw2x00/ipw2200.h 	struct ipw_rx_queue *rxq;
rxq               935 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_queue *rxq = &il->rxq;
rxq               940 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	spin_lock_irqsave(&rxq->lock, flags);
rxq               941 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
rxq               943 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		element = rxq->rx_free.next;
rxq               948 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->bd[rxq->write] =
rxq               950 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->queue[rxq->write] = rxb;
rxq               951 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq               952 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->free_count--;
rxq               954 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	spin_unlock_irqrestore(&rxq->lock, flags);
rxq               957 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	if (rxq->free_count <= RX_LOW_WATERMARK)
rxq               962 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	if (rxq->write_actual != (rxq->write & ~0x7) ||
rxq               963 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	    abs(rxq->write - rxq->read) > 7) {
rxq               964 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		spin_lock_irqsave(&rxq->lock, flags);
rxq               965 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->need_update = 1;
rxq               966 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		spin_unlock_irqrestore(&rxq->lock, flags);
rxq               967 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		il_rx_queue_update_write_ptr(il, rxq);
rxq               982 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_queue *rxq = &il->rxq;
rxq               991 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		spin_lock_irqsave(&rxq->lock, flags);
rxq               992 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (list_empty(&rxq->rx_used)) {
rxq               993 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			spin_unlock_irqrestore(&rxq->lock, flags);
rxq               996 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		spin_unlock_irqrestore(&rxq->lock, flags);
rxq               998 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (rxq->free_count > RX_LOW_WATERMARK)
rxq              1009 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			if (rxq->free_count <= RX_LOW_WATERMARK &&
rxq              1013 drivers/net/wireless/intel/iwlegacy/3945-mac.c 				       priority, rxq->free_count);
rxq              1031 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		spin_lock_irqsave(&rxq->lock, flags);
rxq              1033 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (list_empty(&rxq->rx_used)) {
rxq              1034 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			spin_unlock_irqrestore(&rxq->lock, flags);
rxq              1042 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		element = rxq->rx_used.next;
rxq              1048 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		list_add_tail(&rxb->list, &rxq->rx_free);
rxq              1049 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->free_count++;
rxq              1052 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		spin_unlock_irqrestore(&rxq->lock, flags);
rxq              1057 drivers/net/wireless/intel/iwlegacy/3945-mac.c il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
rxq              1061 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	spin_lock_irqsave(&rxq->lock, flags);
rxq              1062 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	INIT_LIST_HEAD(&rxq->rx_free);
rxq              1063 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	INIT_LIST_HEAD(&rxq->rx_used);
rxq              1068 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (rxq->pool[i].page != NULL) {
rxq              1069 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
rxq              1072 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			__il_free_pages(il, rxq->pool[i].page);
rxq              1073 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			rxq->pool[i].page = NULL;
rxq              1075 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
rxq              1080 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	rxq->read = rxq->write = 0;
rxq              1081 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	rxq->write_actual = 0;
rxq              1082 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	rxq->free_count = 0;
rxq              1083 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	spin_unlock_irqrestore(&rxq->lock, flags);
rxq              1113 drivers/net/wireless/intel/iwlegacy/3945-mac.c il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
rxq              1117 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (rxq->pool[i].page != NULL) {
rxq              1118 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
rxq              1121 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			__il_free_pages(il, rxq->pool[i].page);
rxq              1122 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			rxq->pool[i].page = NULL;
rxq              1126 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq              1127 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			  rxq->bd_dma);
rxq              1129 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			  rxq->rb_stts, rxq->rb_stts_dma);
rxq              1130 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	rxq->bd = NULL;
rxq              1131 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	rxq->rb_stts = NULL;
rxq              1184 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_queue *rxq = &il->rxq;
rxq              1194 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
rxq              1195 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	i = rxq->read;
rxq              1198 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	total_empty = r - rxq->write_actual;
rxq              1211 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxb = rxq->queue[i];
rxq              1218 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->queue[i] = NULL;
rxq              1264 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		spin_lock_irqsave(&rxq->lock, flags);
rxq              1274 drivers/net/wireless/intel/iwlegacy/3945-mac.c 				list_add_tail(&rxb->list, &rxq->rx_used);
rxq              1276 drivers/net/wireless/intel/iwlegacy/3945-mac.c 				list_add_tail(&rxb->list, &rxq->rx_free);
rxq              1277 drivers/net/wireless/intel/iwlegacy/3945-mac.c 				rxq->free_count++;
rxq              1280 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			list_add_tail(&rxb->list, &rxq->rx_used);
rxq              1282 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		spin_unlock_irqrestore(&rxq->lock, flags);
rxq              1290 drivers/net/wireless/intel/iwlegacy/3945-mac.c 				rxq->read = i;
rxq              1298 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	rxq->read = i;
rxq              1468 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		il_rx_queue_update_write_ptr(il, &il->rxq);
rxq              3825 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	if (il->rxq.bd)
rxq              3826 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		il3945_rx_queue_free(il, &il->rxq);
rxq               778 drivers/net/wireless/intel/iwlegacy/3945.c il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
rxq               780 drivers/net/wireless/intel/iwlegacy/3945.c 	il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
rxq               781 drivers/net/wireless/intel/iwlegacy/3945.c 	il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
rxq               957 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il_rx_queue *rxq = &il->rxq;
rxq               967 drivers/net/wireless/intel/iwlegacy/3945.c 	if (!rxq->bd) {
rxq               974 drivers/net/wireless/intel/iwlegacy/3945.c 		il3945_rx_queue_reset(il, rxq);
rxq               978 drivers/net/wireless/intel/iwlegacy/3945.c 	il3945_rx_init(il, rxq);
rxq               985 drivers/net/wireless/intel/iwlegacy/3945.c 	il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
rxq               176 drivers/net/wireless/intel/iwlegacy/3945.h void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
rxq                84 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
rxq                88 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	spin_lock_irqsave(&rxq->lock, flags);
rxq                89 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	INIT_LIST_HEAD(&rxq->rx_free);
rxq                90 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	INIT_LIST_HEAD(&rxq->rx_used);
rxq                95 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (rxq->pool[i].page != NULL) {
rxq                96 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
rxq                99 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			__il_free_pages(il, rxq->pool[i].page);
rxq               100 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			rxq->pool[i].page = NULL;
rxq               102 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
rxq               106 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->queue[i] = NULL;
rxq               110 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	rxq->read = rxq->write = 0;
rxq               111 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	rxq->write_actual = 0;
rxq               112 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	rxq->free_count = 0;
rxq               113 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	spin_unlock_irqrestore(&rxq->lock, flags);
rxq               117 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
rxq               135 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
rxq               138 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
rxq               182 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_queue *rxq = &il->rxq;
rxq               195 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	if (!rxq->bd) {
rxq               202 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		il4965_rx_queue_reset(il, rxq);
rxq               206 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	il4965_rx_init(il, rxq);
rxq               210 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	rxq->need_update = 1;
rxq               211 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	il_rx_queue_update_write_ptr(il, rxq);
rxq               251 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_queue *rxq = &il->rxq;
rxq               256 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	spin_lock_irqsave(&rxq->lock, flags);
rxq               257 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
rxq               259 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxb = rxq->queue[rxq->write];
rxq               263 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		element = rxq->rx_free.next;
rxq               268 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->bd[rxq->write] =
rxq               270 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->queue[rxq->write] = rxb;
rxq               271 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq               272 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->free_count--;
rxq               274 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	spin_unlock_irqrestore(&rxq->lock, flags);
rxq               277 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	if (rxq->free_count <= RX_LOW_WATERMARK)
rxq               282 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	if (rxq->write_actual != (rxq->write & ~0x7)) {
rxq               283 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		spin_lock_irqsave(&rxq->lock, flags);
rxq               284 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->need_update = 1;
rxq               285 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		spin_unlock_irqrestore(&rxq->lock, flags);
rxq               286 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		il_rx_queue_update_write_ptr(il, rxq);
rxq               301 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_queue *rxq = &il->rxq;
rxq               310 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		spin_lock_irqsave(&rxq->lock, flags);
rxq               311 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (list_empty(&rxq->rx_used)) {
rxq               312 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			spin_unlock_irqrestore(&rxq->lock, flags);
rxq               315 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		spin_unlock_irqrestore(&rxq->lock, flags);
rxq               317 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (rxq->free_count > RX_LOW_WATERMARK)
rxq               330 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			if (rxq->free_count <= RX_LOW_WATERMARK &&
rxq               336 drivers/net/wireless/intel/iwlegacy/4965-mac.c 				       rxq->free_count);
rxq               353 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		spin_lock_irqsave(&rxq->lock, flags);
rxq               355 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (list_empty(&rxq->rx_used)) {
rxq               356 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			spin_unlock_irqrestore(&rxq->lock, flags);
rxq               364 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		element = rxq->rx_used.next;
rxq               372 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		list_add_tail(&rxb->list, &rxq->rx_free);
rxq               373 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->free_count++;
rxq               376 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		spin_unlock_irqrestore(&rxq->lock, flags);
rxq               406 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
rxq               410 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (rxq->pool[i].page != NULL) {
rxq               411 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
rxq               414 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			__il_free_pages(il, rxq->pool[i].page);
rxq               415 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			rxq->pool[i].page = NULL;
rxq               419 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq               420 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			  rxq->bd_dma);
rxq               422 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			  rxq->rb_stts, rxq->rb_stts_dma);
rxq               423 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	rxq->bd = NULL;
rxq               424 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	rxq->rb_stts = NULL;
rxq              4214 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_queue *rxq = &il->rxq;
rxq              4224 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
rxq              4225 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	i = rxq->read;
rxq              4232 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	total_empty = r - rxq->write_actual;
rxq              4242 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxb = rxq->queue[i];
rxq              4249 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->queue[i] = NULL;
rxq              4295 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		spin_lock_irqsave(&rxq->lock, flags);
rxq              4306 drivers/net/wireless/intel/iwlegacy/4965-mac.c 				list_add_tail(&rxb->list, &rxq->rx_used);
rxq              4308 drivers/net/wireless/intel/iwlegacy/4965-mac.c 				list_add_tail(&rxb->list, &rxq->rx_free);
rxq              4309 drivers/net/wireless/intel/iwlegacy/4965-mac.c 				rxq->free_count++;
rxq              4312 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			list_add_tail(&rxb->list, &rxq->rx_used);
rxq              4314 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		spin_unlock_irqrestore(&rxq->lock, flags);
rxq              4322 drivers/net/wireless/intel/iwlegacy/4965-mac.c 				rxq->read = i;
rxq              4330 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	rxq->read = i;
rxq              4476 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		il_rx_queue_update_write_ptr(il, &il->rxq);
rxq              6740 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	if (il->rxq.bd)
rxq              6741 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		il4965_rx_queue_free(il, &il->rxq);
rxq                40 drivers/net/wireless/intel/iwlegacy/4965.h void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
rxq                41 drivers/net/wireless/intel/iwlegacy/4965.h int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
rxq                51 drivers/net/wireless/intel/iwlegacy/4965.h void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
rxq              2598 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_queue *rxq = &il->rxq;
rxq              2602 drivers/net/wireless/intel/iwlegacy/common.c 	spin_lock_init(&rxq->lock);
rxq              2603 drivers/net/wireless/intel/iwlegacy/common.c 	INIT_LIST_HEAD(&rxq->rx_free);
rxq              2604 drivers/net/wireless/intel/iwlegacy/common.c 	INIT_LIST_HEAD(&rxq->rx_used);
rxq              2607 drivers/net/wireless/intel/iwlegacy/common.c 	rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
rxq              2609 drivers/net/wireless/intel/iwlegacy/common.c 	if (!rxq->bd)
rxq              2612 drivers/net/wireless/intel/iwlegacy/common.c 	rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
rxq              2613 drivers/net/wireless/intel/iwlegacy/common.c 					  &rxq->rb_stts_dma, GFP_KERNEL);
rxq              2614 drivers/net/wireless/intel/iwlegacy/common.c 	if (!rxq->rb_stts)
rxq              2619 drivers/net/wireless/intel/iwlegacy/common.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
rxq              2623 drivers/net/wireless/intel/iwlegacy/common.c 	rxq->read = rxq->write = 0;
rxq              2624 drivers/net/wireless/intel/iwlegacy/common.c 	rxq->write_actual = 0;
rxq              2625 drivers/net/wireless/intel/iwlegacy/common.c 	rxq->free_count = 0;
rxq              2626 drivers/net/wireless/intel/iwlegacy/common.c 	rxq->need_update = 0;
rxq              2630 drivers/net/wireless/intel/iwlegacy/common.c 	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq              2631 drivers/net/wireless/intel/iwlegacy/common.c 			  rxq->bd_dma);
rxq              1256 drivers/net/wireless/intel/iwlegacy/common.h 	struct il_rx_queue rxq;
rxq               864 drivers/net/wireless/intel/iwlegacy/debug.c 	struct il_rx_queue *rxq = &il->rxq;
rxq               869 drivers/net/wireless/intel/iwlegacy/debug.c 	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
rxq               870 drivers/net/wireless/intel/iwlegacy/debug.c 	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
rxq               873 drivers/net/wireless/intel/iwlegacy/debug.c 		      rxq->free_count);
rxq               874 drivers/net/wireless/intel/iwlegacy/debug.c 	if (rxq->rb_stts) {
rxq               877 drivers/net/wireless/intel/iwlegacy/debug.c 			      le16_to_cpu(rxq->rb_stts->
rxq               450 drivers/net/wireless/intel/iwlwifi/fw/error-dump.h 	__le32 rxq;
rxq                96 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c 		cpu_to_le64(trans_pcie->rxq->bd_dma);
rxq               142 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c 		cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
rxq               144 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c 		cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
rxq               146 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c 		cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
rxq               154 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c 		cpu_to_le64(trans_pcie->rxq->used_bd_dma);
rxq               205 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c 	rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
rxq               206 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c 	rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
rxq               207 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c 	rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
rxq               265 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 					    struct iwl_rxq *rxq)
rxq               268 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 		__le16 *rb_stts = rxq->rb_stts;
rxq               272 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 		struct iwl_rb_status *rb_stts = rxq->rb_stts;
rxq               515 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	struct iwl_rxq *rxq;
rxq               659 drivers/net/wireless/intel/iwlwifi/pcie/internal.h void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
rxq               662 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 			    struct iwl_rxq *rxq);
rxq               176 drivers/net/wireless/intel/iwlwifi/pcie/rx.c static int iwl_rxq_space(const struct iwl_rxq *rxq)
rxq               179 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
rxq               187 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
rxq               224 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				    struct iwl_rxq *rxq)
rxq               228 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	lockdep_assert_held(&rxq->lock);
rxq               244 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			rxq->need_update = true;
rxq               249 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->write_actual = round_down(rxq->write, 8);
rxq               252 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			    (rxq->write_actual |
rxq               253 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			     ((FIRST_RX_QUEUE + rxq->id) << 16)));
rxq               255 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq               256 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			    rxq->write_actual);
rxq               258 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
rxq               267 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
rxq               269 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (!rxq->need_update)
rxq               271 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_lock(&rxq->lock);
rxq               272 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
rxq               273 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->need_update = false;
rxq               274 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_unlock(&rxq->lock);
rxq               279 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				struct iwl_rxq *rxq,
rxq               283 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		struct iwl_rx_transfer_desc *bd = rxq->bd;
rxq               287 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
rxq               288 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
rxq               290 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		__le64 *bd = rxq->bd;
rxq               292 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
rxq               296 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		     (u32)rxb->vid, rxq->id, rxq->write);
rxq               303 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  struct iwl_rxq *rxq)
rxq               318 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_lock(&rxq->lock);
rxq               319 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	while (rxq->free_count) {
rxq               321 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
rxq               328 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_restock_bd(trans, rxq, rxb);
rxq               329 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
rxq               330 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->free_count--;
rxq               332 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_unlock(&rxq->lock);
rxq               338 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (rxq->write_actual != (rxq->write & ~0x7)) {
rxq               339 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_lock(&rxq->lock);
rxq               340 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
rxq               341 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_unlock(&rxq->lock);
rxq               349 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  struct iwl_rxq *rxq)
rxq               364 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_lock(&rxq->lock);
rxq               365 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
rxq               366 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		__le32 *bd = (__le32 *)rxq->bd;
rxq               368 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb = rxq->queue[rxq->write];
rxq               372 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
rxq               378 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
rxq               379 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->queue[rxq->write] = rxb;
rxq               380 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq               381 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->free_count--;
rxq               383 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_unlock(&rxq->lock);
rxq               387 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (rxq->write_actual != (rxq->write & ~0x7)) {
rxq               388 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_lock(&rxq->lock);
rxq               389 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
rxq               390 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_unlock(&rxq->lock);
rxq               406 drivers/net/wireless/intel/iwlwifi/pcie/rx.c void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
rxq               409 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rxmq_restock(trans, rxq);
rxq               411 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rxsq_restock(trans, rxq);
rxq               456 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			    struct iwl_rxq *rxq)
rxq               463 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_lock(&rxq->lock);
rxq               464 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (list_empty(&rxq->rx_used)) {
rxq               465 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			spin_unlock(&rxq->lock);
rxq               468 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_unlock(&rxq->lock);
rxq               475 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_lock(&rxq->lock);
rxq               477 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (list_empty(&rxq->rx_used)) {
rxq               478 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			spin_unlock(&rxq->lock);
rxq               482 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
rxq               485 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_unlock(&rxq->lock);
rxq               496 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			spin_lock(&rxq->lock);
rxq               497 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			list_add(&rxb->list, &rxq->rx_used);
rxq               498 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			spin_unlock(&rxq->lock);
rxq               503 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_lock(&rxq->lock);
rxq               505 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		list_add_tail(&rxb->list, &rxq->rx_free);
rxq               506 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->free_count++;
rxq               508 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_unlock(&rxq->lock);
rxq               634 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				      struct iwl_rxq *rxq)
rxq               640 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	lockdep_assert_held(&rxq->lock);
rxq               660 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		list_move(&rxb->list, &rxq->rx_free);
rxq               664 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
rxq               665 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->free_count += RX_CLAIM_REQ_ALLOC;
rxq               690 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  struct iwl_rxq *rxq)
rxq               697 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (rxq->bd)
rxq               699 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  free_size * rxq->queue_size,
rxq               700 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  rxq->bd, rxq->bd_dma);
rxq               701 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->bd_dma = 0;
rxq               702 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->bd = NULL;
rxq               704 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->rb_stts_dma = 0;
rxq               705 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->rb_stts = NULL;
rxq               707 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (rxq->used_bd)
rxq               709 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  (use_rx_td ? sizeof(*rxq->cd) :
rxq               710 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				   sizeof(__le32)) * rxq->queue_size,
rxq               711 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  rxq->used_bd, rxq->used_bd_dma);
rxq               712 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->used_bd_dma = 0;
rxq               713 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->used_bd = NULL;
rxq               718 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (rxq->tr_tail)
rxq               720 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  rxq->tr_tail, rxq->tr_tail_dma);
rxq               721 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->tr_tail_dma = 0;
rxq               722 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->tr_tail = NULL;
rxq               724 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (rxq->cr_tail)
rxq               726 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  rxq->cr_tail, rxq->cr_tail_dma);
rxq               727 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->cr_tail_dma = 0;
rxq               728 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->cr_tail = NULL;
rxq               732 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  struct iwl_rxq *rxq)
rxq               743 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_lock_init(&rxq->lock);
rxq               745 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->queue_size = MQ_RX_TABLE_SIZE;
rxq               747 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->queue_size = RX_QUEUE_SIZE;
rxq               755 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
rxq               756 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				     &rxq->bd_dma, GFP_KERNEL);
rxq               757 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (!rxq->bd)
rxq               761 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->used_bd = dma_alloc_coherent(dev,
rxq               762 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 						  (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
rxq               763 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 						  &rxq->used_bd_dma,
rxq               765 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (!rxq->used_bd)
rxq               769 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
rxq               770 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->rb_stts_dma =
rxq               771 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
rxq               777 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
rxq               778 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 					  &rxq->tr_tail_dma, GFP_KERNEL);
rxq               779 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (!rxq->tr_tail)
rxq               783 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
rxq               784 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 					  &rxq->cr_tail_dma, GFP_KERNEL);
rxq               785 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (!rxq->cr_tail)
rxq               791 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	*rxq->cr_tail = cpu_to_le16(500);
rxq               797 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
rxq               799 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_free_rxq_dma(trans, rxq);
rxq               814 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (WARN_ON(trans_pcie->rxq))
rxq               817 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
rxq               819 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (!trans_pcie->rxq)
rxq               839 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
rxq               841 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->id = i;
rxq               842 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
rxq               857 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	kfree(trans_pcie->rxq);
rxq               862 drivers/net/wireless/intel/iwlwifi/pcie/rx.c static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
rxq               899 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		    (u32)(rxq->bd_dma >> 8));
rxq               903 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		    rxq->rb_stts_dma >> 4);
rxq               968 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 					 trans_pcie->rxq[i].bd_dma);
rxq               972 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 					 trans_pcie->rxq[i].used_bd_dma);
rxq               976 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 					 trans_pcie->rxq[i].rb_stts_dma);
rxq              1020 drivers/net/wireless/intel/iwlwifi/pcie/rx.c void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
rxq              1022 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	lockdep_assert_held(&rxq->lock);
rxq              1024 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	INIT_LIST_HEAD(&rxq->rx_free);
rxq              1025 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	INIT_LIST_HEAD(&rxq->rx_used);
rxq              1026 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->free_count = 0;
rxq              1027 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->used_count = 0;
rxq              1043 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (!trans_pcie->rxq) {
rxq              1048 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	def_rxq = trans_pcie->rxq;
rxq              1066 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
rxq              1068 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_lock(&rxq->lock);
rxq              1074 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->read = 0;
rxq              1075 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->write = 0;
rxq              1076 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->write_actual = 0;
rxq              1077 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		memset(rxq->rb_stts, 0, (trans->trans_cfg->device_family >=
rxq              1081 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rx_init_rxb_lists(rxq);
rxq              1083 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (!rxq->napi.poll)
rxq              1084 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
rxq              1087 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		spin_unlock(&rxq->lock);
rxq              1126 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
rxq              1128 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
rxq              1130 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_lock(&trans_pcie->rxq->lock);
rxq              1131 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
rxq              1132 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_unlock(&trans_pcie->rxq->lock);
rxq              1162 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (!trans_pcie->rxq) {
rxq              1181 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
rxq              1183 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_free_rxq_dma(trans, rxq);
rxq              1185 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (rxq->napi.poll)
rxq              1186 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			netif_napi_del(&rxq->napi);
rxq              1188 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	kfree(trans_pcie->rxq);
rxq              1191 drivers/net/wireless/intel/iwlwifi/pcie/rx.c static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
rxq              1195 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
rxq              1207 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				  struct iwl_rxq *rxq, bool emergency)
rxq              1214 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	list_add_tail(&rxb->list, &rxq->rx_used);
rxq              1220 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->used_count++;
rxq              1227 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
rxq              1230 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rx_move_to_allocator(rxq, rba);
rxq              1238 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				struct iwl_rxq *rxq,
rxq              1272 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				     rxq->id, offset);
rxq              1277 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			FH_RSCSR_RXQ_POS != rxq->id,
rxq              1279 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		     rxq->id,
rxq              1285 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			     rxq->id, offset,
rxq              1321 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (rxq->id == trans_pcie->def_rx_queue)
rxq              1322 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
rxq              1325 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
rxq              1326 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 					   &rxcb, rxq->id);
rxq              1377 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
rxq              1379 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			list_add_tail(&rxb->list, &rxq->rx_free);
rxq              1380 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			rxq->free_count++;
rxq              1383 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
rxq              1387 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 						  struct iwl_rxq *rxq, int i)
rxq              1396 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb = rxq->queue[i];
rxq              1397 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->queue[i] = NULL;
rxq              1403 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
rxq              1405 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
rxq              1433 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	struct iwl_rxq *rxq;
rxq              1437 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
rxq              1440 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq = &trans_pcie->rxq[queue];
rxq              1443 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_lock(&rxq->lock);
rxq              1446 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
rxq              1447 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	i = rxq->read;
rxq              1450 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	r &= (rxq->queue_size - 1);
rxq              1454 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
rxq              1464 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
rxq              1466 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			iwl_pcie_rx_move_to_allocator(rxq, rba);
rxq              1473 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
rxq              1475 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb = iwl_pcie_get_rxb(trans, rxq, i);
rxq              1479 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
rxq              1481 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		i = (i + 1) & (rxq->queue_size - 1);
rxq              1490 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
rxq              1491 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			iwl_pcie_rx_allocator_get(trans, rxq);
rxq              1493 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
rxq              1495 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			iwl_pcie_rx_move_to_allocator(rxq, rba);
rxq              1500 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				if (rb_pending_alloc < rxq->queue_size / 3) {
rxq              1507 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				rxq->read = i;
rxq              1508 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				spin_unlock(&rxq->lock);
rxq              1509 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
rxq              1510 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 				iwl_pcie_rxq_restock(trans, rxq);
rxq              1517 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq->read = i;
rxq              1520 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		*rxq->cr_tail = cpu_to_le16(r);
rxq              1521 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	spin_unlock(&rxq->lock);
rxq              1536 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
rxq              1538 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	napi = &rxq->napi;
rxq              1549 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	iwl_pcie_rxq_restock(trans, rxq);
rxq              1855 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
rxq              2193 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
rxq              2332 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
rxq              2335 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
rxq              2336 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
rxq              2337 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
rxq              2597 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	if (!trans_pcie->rxq)
rxq              2605 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
rxq              2610 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				 rxq->read);
rxq              2612 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				 rxq->write);
rxq              2614 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				 rxq->write_actual);
rxq              2616 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				 rxq->need_update);
rxq              2618 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				 rxq->free_count);
rxq              2619 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		if (rxq->rb_stts) {
rxq              2621 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 								     rxq));
rxq              2969 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
rxq              2972 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	spin_lock(&rxq->lock);
rxq              2974 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
rxq              2976 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	for (i = rxq->read, j = 0;
rxq              2979 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
rxq              3000 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	spin_unlock(&rxq->lock);
rxq              3272 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		struct iwl_rxq *rxq = &trans_pcie->rxq[0];
rxq              3275 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
rxq              3277 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
rxq               247 drivers/net/wireless/marvell/mwl8k.c 	struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
rxq              1162 drivers/net/wireless/marvell/mwl8k.c 	struct mwl8k_rx_queue *rxq = priv->rxq + index;
rxq              1166 drivers/net/wireless/marvell/mwl8k.c 	rxq->rxd_count = 0;
rxq              1167 drivers/net/wireless/marvell/mwl8k.c 	rxq->head = 0;
rxq              1168 drivers/net/wireless/marvell/mwl8k.c 	rxq->tail = 0;
rxq              1172 drivers/net/wireless/marvell/mwl8k.c 	rxq->rxd = pci_zalloc_consistent(priv->pdev, size, &rxq->rxd_dma);
rxq              1173 drivers/net/wireless/marvell/mwl8k.c 	if (rxq->rxd == NULL) {
rxq              1178 drivers/net/wireless/marvell/mwl8k.c 	rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
rxq              1179 drivers/net/wireless/marvell/mwl8k.c 	if (rxq->buf == NULL) {
rxq              1180 drivers/net/wireless/marvell/mwl8k.c 		pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
rxq              1191 drivers/net/wireless/marvell/mwl8k.c 		rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size);
rxq              1196 drivers/net/wireless/marvell/mwl8k.c 		next_dma_addr = rxq->rxd_dma + (nexti * desc_size);
rxq              1207 drivers/net/wireless/marvell/mwl8k.c 	struct mwl8k_rx_queue *rxq = priv->rxq + index;
rxq              1211 drivers/net/wireless/marvell/mwl8k.c 	while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) {
rxq              1224 drivers/net/wireless/marvell/mwl8k.c 		rxq->rxd_count++;
rxq              1225 drivers/net/wireless/marvell/mwl8k.c 		rx = rxq->tail++;
rxq              1226 drivers/net/wireless/marvell/mwl8k.c 		if (rxq->tail == MWL8K_RX_DESCS)
rxq              1227 drivers/net/wireless/marvell/mwl8k.c 			rxq->tail = 0;
rxq              1228 drivers/net/wireless/marvell/mwl8k.c 		rxq->buf[rx].skb = skb;
rxq              1229 drivers/net/wireless/marvell/mwl8k.c 		dma_unmap_addr_set(&rxq->buf[rx], dma, addr);
rxq              1231 drivers/net/wireless/marvell/mwl8k.c 		rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
rxq              1244 drivers/net/wireless/marvell/mwl8k.c 	struct mwl8k_rx_queue *rxq = priv->rxq + index;
rxq              1247 drivers/net/wireless/marvell/mwl8k.c 	if (rxq->rxd == NULL)
rxq              1251 drivers/net/wireless/marvell/mwl8k.c 		if (rxq->buf[i].skb != NULL) {
rxq              1253 drivers/net/wireless/marvell/mwl8k.c 					 dma_unmap_addr(&rxq->buf[i], dma),
rxq              1255 drivers/net/wireless/marvell/mwl8k.c 			dma_unmap_addr_set(&rxq->buf[i], dma, 0);
rxq              1257 drivers/net/wireless/marvell/mwl8k.c 			kfree_skb(rxq->buf[i].skb);
rxq              1258 drivers/net/wireless/marvell/mwl8k.c 			rxq->buf[i].skb = NULL;
rxq              1262 drivers/net/wireless/marvell/mwl8k.c 	kfree(rxq->buf);
rxq              1263 drivers/net/wireless/marvell/mwl8k.c 	rxq->buf = NULL;
rxq              1267 drivers/net/wireless/marvell/mwl8k.c 			    rxq->rxd, rxq->rxd_dma);
rxq              1268 drivers/net/wireless/marvell/mwl8k.c 	rxq->rxd = NULL;
rxq              1321 drivers/net/wireless/marvell/mwl8k.c 	struct mwl8k_rx_queue *rxq = priv->rxq + index;
rxq              1325 drivers/net/wireless/marvell/mwl8k.c 	while (rxq->rxd_count && limit--) {
rxq              1333 drivers/net/wireless/marvell/mwl8k.c 		skb = rxq->buf[rxq->head].skb;
rxq              1337 drivers/net/wireless/marvell/mwl8k.c 		rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size);
rxq              1344 drivers/net/wireless/marvell/mwl8k.c 		rxq->buf[rxq->head].skb = NULL;
rxq              1347 drivers/net/wireless/marvell/mwl8k.c 				 dma_unmap_addr(&rxq->buf[rxq->head], dma),
rxq              1349 drivers/net/wireless/marvell/mwl8k.c 		dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
rxq              1351 drivers/net/wireless/marvell/mwl8k.c 		rxq->head++;
rxq              1352 drivers/net/wireless/marvell/mwl8k.c 		if (rxq->head == MWL8K_RX_DESCS)
rxq              1353 drivers/net/wireless/marvell/mwl8k.c 			rxq->head = 0;
rxq              1355 drivers/net/wireless/marvell/mwl8k.c 		rxq->rxd_count--;
rxq              2470 drivers/net/wireless/marvell/mwl8k.c 	cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
rxq              2567 drivers/net/wireless/marvell/mwl8k.c 		iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
rxq              2570 drivers/net/wireless/marvell/mwl8k.c 		iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
rxq              2636 drivers/net/wireless/marvell/mwl8k.c 	cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
rxq               952 drivers/net/xen-netfront.c 				 struct sk_buff_head *rxq)
rxq               958 drivers/net/xen-netfront.c 	while ((skb = __skb_dequeue(rxq)) != NULL) {
rxq               997 drivers/net/xen-netfront.c 	struct sk_buff_head rxq;
rxq              1004 drivers/net/xen-netfront.c 	skb_queue_head_init(&rxq);
rxq              1058 drivers/net/xen-netfront.c 		__skb_queue_tail(&rxq, skb);
rxq              1066 drivers/net/xen-netfront.c 	work_done -= handle_incoming_queue(queue, &rxq);
rxq               196 drivers/target/iscsi/cxgbit/cxgbit.h 	struct sk_buff_head rxq;
rxq               782 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__skb_queue_purge(&csk->rxq);
rxq              1355 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb_queue_head_init(&csk->rxq);
rxq              1652 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	spin_lock_bh(&csk->rxq.lock);
rxq              1653 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__skb_queue_tail(&csk->rxq, skb);
rxq              1654 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	spin_unlock_bh(&csk->rxq.lock);
rxq               101 drivers/target/iscsi/cxgbit/cxgbit_main.c 		spin_lock_bh(&csk->rxq.lock);
rxq               102 drivers/target/iscsi/cxgbit/cxgbit_main.c 		__skb_queue_tail(&csk->rxq, skb);
rxq               103 drivers/target/iscsi/cxgbit/cxgbit_main.c 		if (skb_queue_len(&csk->rxq) == 1)
rxq               105 drivers/target/iscsi/cxgbit/cxgbit_main.c 		spin_unlock_bh(&csk->rxq.lock);
rxq               337 drivers/target/iscsi/cxgbit/cxgbit_main.c 	spin_lock(&csk->rxq.lock);
rxq               338 drivers/target/iscsi/cxgbit/cxgbit_main.c 	__skb_queue_tail(&csk->rxq, skb);
rxq               339 drivers/target/iscsi/cxgbit/cxgbit_main.c 	if (skb_queue_len(&csk->rxq) == 1)
rxq               341 drivers/target/iscsi/cxgbit/cxgbit_main.c 	spin_unlock(&csk->rxq.lock);
rxq               566 drivers/target/iscsi/cxgbit/cxgbit_main.c 		spin_lock(&csk->rxq.lock);
rxq               567 drivers/target/iscsi/cxgbit/cxgbit_main.c 		__skb_queue_tail(&csk->rxq, skb);
rxq               568 drivers/target/iscsi/cxgbit/cxgbit_main.c 		if (skb_queue_len(&csk->rxq) == 1)
rxq               570 drivers/target/iscsi/cxgbit/cxgbit_main.c 		spin_unlock(&csk->rxq.lock);
rxq              1578 drivers/target/iscsi/cxgbit/cxgbit_target.c static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
rxq              1580 drivers/target/iscsi/cxgbit/cxgbit_target.c 	spin_lock_bh(&csk->rxq.lock);
rxq              1581 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (skb_queue_len(&csk->rxq)) {
rxq              1582 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_queue_splice_init(&csk->rxq, rxq);
rxq              1583 drivers/target/iscsi/cxgbit/cxgbit_target.c 		spin_unlock_bh(&csk->rxq.lock);
rxq              1586 drivers/target/iscsi/cxgbit/cxgbit_target.c 	spin_unlock_bh(&csk->rxq.lock);
rxq              1593 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff_head rxq;
rxq              1595 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb_queue_head_init(&rxq);
rxq              1597 drivers/target/iscsi/cxgbit/cxgbit_target.c 	wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
rxq              1602 drivers/target/iscsi/cxgbit/cxgbit_target.c 	while ((skb = __skb_dequeue(&rxq))) {
rxq              1609 drivers/target/iscsi/cxgbit/cxgbit_target.c 	__skb_queue_purge(&rxq);
rxq               126 drivers/vhost/net.c 	struct vhost_net_buf rxq;
rxq               151 drivers/vhost/net.c static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
rxq               153 drivers/vhost/net.c 	if (rxq->tail != rxq->head)
rxq               154 drivers/vhost/net.c 		return rxq->queue[rxq->head];
rxq               159 drivers/vhost/net.c static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
rxq               161 drivers/vhost/net.c 	return rxq->tail - rxq->head;
rxq               164 drivers/vhost/net.c static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
rxq               166 drivers/vhost/net.c 	return rxq->tail == rxq->head;
rxq               169 drivers/vhost/net.c static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
rxq               171 drivers/vhost/net.c 	void *ret = vhost_net_buf_get_ptr(rxq);
rxq               172 drivers/vhost/net.c 	++rxq->head;
rxq               178 drivers/vhost/net.c 	struct vhost_net_buf *rxq = &nvq->rxq;
rxq               180 drivers/vhost/net.c 	rxq->head = 0;
rxq               181 drivers/vhost/net.c 	rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
rxq               183 drivers/vhost/net.c 	return rxq->tail;
rxq               188 drivers/vhost/net.c 	struct vhost_net_buf *rxq = &nvq->rxq;
rxq               190 drivers/vhost/net.c 	if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
rxq               191 drivers/vhost/net.c 		ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
rxq               192 drivers/vhost/net.c 				   vhost_net_buf_get_size(rxq),
rxq               194 drivers/vhost/net.c 		rxq->head = rxq->tail = 0;
rxq               211 drivers/vhost/net.c 	struct vhost_net_buf *rxq = &nvq->rxq;
rxq               213 drivers/vhost/net.c 	if (!vhost_net_buf_is_empty(rxq))
rxq               220 drivers/vhost/net.c 	return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
rxq               223 drivers/vhost/net.c static void vhost_net_buf_init(struct vhost_net_buf *rxq)
rxq               225 drivers/vhost/net.c 	rxq->head = rxq->tail = 0;
rxq               314 drivers/vhost/net.c 		vhost_net_buf_init(&n->vqs[i].rxq);
rxq              1170 drivers/vhost/net.c 			msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
rxq              1298 drivers/vhost/net.c 	n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
rxq              1323 drivers/vhost/net.c 		vhost_net_buf_init(&n->vqs[i].rxq);
rxq              1406 drivers/vhost/net.c 	kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
rxq               323 include/linux/avf/virtchnl.h 	struct virtchnl_rxq_info rxq;
rxq              3550 include/linux/netdevice.h int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
rxq              3561 include/linux/netdevice.h __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
rxq              3563 include/linux/netdevice.h 	return dev->_rx + rxq;
rxq                58 include/linux/usb/usbnet.h 	struct sk_buff_head	rxq;
rxq                72 include/net/xdp.h 	struct xdp_rxq_info *rxq;
rxq               104 include/net/xdp.h 	if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY)
rxq               123 include/net/xdp.h 	xdp_frame->mem = xdp->rxq->mem;
rxq               348 include/trace/events/xdp.h 		 const struct xdp_rxq_info *rxq),
rxq               350 include/trace/events/xdp.h 	TP_ARGS(xa, rxq),
rxq               357 include/trace/events/xdp.h 		__field(const struct xdp_rxq_info *,		rxq)
rxq               366 include/trace/events/xdp.h 		__entry->rxq		= rxq;
rxq               367 include/trace/events/xdp.h 		__entry->ifindex	= rxq->dev->ifindex;
rxq               366 net/bpf/test_run.c 	xdp.rxq = &rxqueue->xdp_rxq;
rxq              2624 net/core/dev.c int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
rxq              2628 net/core/dev.c 	if (rxq < 1 || rxq > dev->num_rx_queues)
rxq              2635 net/core/dev.c 						  rxq);
rxq              2640 net/core/dev.c 	dev->real_num_rx_queues = rxq;
rxq              4282 net/core/dev.c 	xdp->rxq = &rxqueue->xdp_rxq;
rxq              4852 net/core/filter.c 		return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
rxq              4857 net/core/filter.c 		return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
rxq              5429 net/core/filter.c 	struct net *caller_net = dev_net(ctx->rxq->dev);
rxq              5430 net/core/filter.c 	int ifindex = ctx->rxq->dev->ifindex;
rxq              5452 net/core/filter.c 	struct net *caller_net = dev_net(ctx->rxq->dev);
rxq              5453 net/core/filter.c 	int ifindex = ctx->rxq->dev->ifindex;
rxq              5475 net/core/filter.c 	struct net *caller_net = dev_net(ctx->rxq->dev);
rxq              5476 net/core/filter.c 	int ifindex = ctx->rxq->dev->ifindex;
rxq              7790 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
rxq              7792 net/core/filter.c 				      offsetof(struct xdp_buff, rxq));
rxq              7800 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
rxq              7802 net/core/filter.c 				      offsetof(struct xdp_buff, rxq));
rxq              1525 net/core/net-sysfs.c 	int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
rxq              1539 net/core/net-sysfs.c 	rxq = real_rx;
rxq              1550 net/core/net-sysfs.c 	net_rx_queue_update_kobjects(dev, rxq, 0);
rxq               417 net/core/xdp.c 	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle);
rxq               207 net/xdp/xsk.c  	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
rxq               212 net/xdp/xsk.c  	return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
rxq               233 net/xdp/xsk.c  	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
rxq               194 samples/bpf/xdp_rxq_info_user.c 	struct record *rxq;
rxq               241 samples/bpf/xdp_rxq_info_user.c 	rec->rxq = alloc_record_per_rxq();
rxq               243 samples/bpf/xdp_rxq_info_user.c 		rec->rxq[i].cpu = alloc_record_per_cpu();
rxq               255 samples/bpf/xdp_rxq_info_user.c 		free(r->rxq[i].cpu);
rxq               257 samples/bpf/xdp_rxq_info_user.c 	free(r->rxq);
rxq               301 samples/bpf/xdp_rxq_info_user.c 		map_collect_percpu(fd, i, &rec->rxq[i]);
rxq               350 samples/bpf/xdp_rxq_info_user.c 	int rxq;
rxq               390 samples/bpf/xdp_rxq_info_user.c 	for (rxq = 0; rxq < nr_rxqs; rxq++) {
rxq               394 samples/bpf/xdp_rxq_info_user.c 		int rxq_ = rxq;
rxq               400 samples/bpf/xdp_rxq_info_user.c 		rec  =  &stats_rec->rxq[rxq];
rxq               401 samples/bpf/xdp_rxq_info_user.c 		prev = &stats_prev->rxq[rxq];