Lines Matching refs:rq

105 	struct receive_queue *rq;  member
193 static void give_pages(struct receive_queue *rq, struct page *page) in give_pages() argument
199 end->private = (unsigned long)rq->pages; in give_pages()
200 rq->pages = page; in give_pages()
203 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) in get_a_page() argument
205 struct page *p = rq->pages; in get_a_page()
208 rq->pages = (struct page *)p->private; in get_a_page()
247 struct receive_queue *rq, in page_to_skb() argument
315 give_pages(rq, page); in page_to_skb()
332 struct receive_queue *rq, in receive_big() argument
337 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); in receive_big()
346 give_pages(rq, page); in receive_big()
352 struct receive_queue *rq, in receive_mergeable() argument
363 struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, in receive_mergeable()
372 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); in receive_mergeable()
416 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); in receive_mergeable()
422 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); in receive_mergeable()
438 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
454 give_pages(rq, buf); in receive_buf()
462 skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); in receive_buf()
464 skb = receive_big(dev, vi, rq, buf, len); in receive_buf()
525 skb_mark_napi_id(skb, &rq->napi); in receive_buf()
527 napi_gro_receive(&rq->napi, skb); in receive_buf()
535 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
549 sg_init_table(rq->sg, 2); in add_recvbuf_small()
550 sg_set_buf(rq->sg, hdr, vi->hdr_len); in add_recvbuf_small()
551 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); in add_recvbuf_small()
553 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); in add_recvbuf_small()
560 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
567 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); in add_recvbuf_big()
571 first = get_a_page(rq, gfp); in add_recvbuf_big()
574 give_pages(rq, list); in add_recvbuf_big()
577 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); in add_recvbuf_big()
584 first = get_a_page(rq, gfp); in add_recvbuf_big()
586 give_pages(rq, list); in add_recvbuf_big()
593 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
597 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); in add_recvbuf_big()
601 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, in add_recvbuf_big()
604 give_pages(rq, first); in add_recvbuf_big()
619 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) in add_recvbuf_mergeable() argument
621 struct page_frag *alloc_frag = &rq->alloc_frag; in add_recvbuf_mergeable()
627 len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); in add_recvbuf_mergeable()
646 sg_init_one(rq->sg, buf, len); in add_recvbuf_mergeable()
647 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); in add_recvbuf_mergeable()
661 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
670 err = add_recvbuf_mergeable(rq, gfp); in try_fill_recv()
672 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
674 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
679 } while (rq->vq->num_free); in try_fill_recv()
680 virtqueue_kick(rq->vq); in try_fill_recv()
687 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done() local
690 if (napi_schedule_prep(&rq->napi)) { in skb_recv_done()
692 __napi_schedule(&rq->napi); in skb_recv_done()
696 static void virtnet_napi_enable(struct receive_queue *rq) in virtnet_napi_enable() argument
698 napi_enable(&rq->napi); in virtnet_napi_enable()
704 if (napi_schedule_prep(&rq->napi)) { in virtnet_napi_enable()
705 virtqueue_disable_cb(rq->vq); in virtnet_napi_enable()
707 __napi_schedule(&rq->napi); in virtnet_napi_enable()
720 struct receive_queue *rq = &vi->rq[i]; in refill_work() local
722 napi_disable(&rq->napi); in refill_work()
723 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
724 virtnet_napi_enable(rq); in refill_work()
734 static int virtnet_receive(struct receive_queue *rq, int budget) in virtnet_receive() argument
736 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive()
741 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_receive()
742 receive_buf(vi, rq, buf, len); in virtnet_receive()
746 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { in virtnet_receive()
747 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) in virtnet_receive()
756 struct receive_queue *rq = in virtnet_poll() local
760 received = virtnet_receive(rq, budget); in virtnet_poll()
764 r = virtqueue_enable_cb_prepare(rq->vq); in virtnet_poll()
766 if (unlikely(virtqueue_poll(rq->vq, r)) && in virtnet_poll()
768 virtqueue_disable_cb(rq->vq); in virtnet_poll()
780 struct receive_queue *rq = in virtnet_busy_poll() local
782 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_busy_poll()
791 virtqueue_disable_cb(rq->vq); in virtnet_busy_poll()
794 received += virtnet_receive(rq, budget); in virtnet_busy_poll()
796 r = virtqueue_enable_cb_prepare(rq->vq); in virtnet_busy_poll()
798 if (unlikely(virtqueue_poll(rq->vq, r)) && in virtnet_busy_poll()
800 virtqueue_disable_cb(rq->vq); in virtnet_busy_poll()
821 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
823 virtnet_napi_enable(&vi->rq[i]); in virtnet_open()
1101 napi_schedule(&vi->rq[i].napi); in virtnet_netpoll()
1150 napi_disable(&vi->rq[i].napi); in virtnet_close()
1259 virtqueue_set_affinity(vi->rq[i].vq, -1); in virtnet_clean_affinity()
1284 virtqueue_set_affinity(vi->rq[i].vq, cpu); in virtnet_set_affinity()
1319 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
1464 napi_hash_del(&vi->rq[i].napi); in virtnet_free_queues()
1465 netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
1468 kfree(vi->rq); in virtnet_free_queues()
1477 while (vi->rq[i].pages) in free_receive_bufs()
1478 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in free_receive_bufs()
1486 if (vi->rq[i].alloc_frag.page) in free_receive_page_frags()
1487 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
1502 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
1510 give_pages(&vi->rq[i], buf); in free_unused_bufs()
1565 sprintf(vi->rq[i].name, "input.%d", i); in virtnet_find_vqs()
1567 names[rxq2vq(i)] = vi->rq[i].name; in virtnet_find_vqs()
1583 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
1610 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); in virtnet_alloc_queues()
1611 if (!vi->rq) in virtnet_alloc_queues()
1616 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
1617 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
1619 napi_hash_add(&vi->rq[i].napi); in virtnet_alloc_queues()
1621 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
1622 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
1668 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
1871 try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); in virtnet_probe()
1874 if (vi->rq[i].vq->num_free == in virtnet_probe()
1875 virtqueue_get_vring_size(vi->rq[i].vq)) { in virtnet_probe()
1967 napi_disable(&vi->rq[i].napi); in virtnet_freeze()
1988 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_restore()
1992 virtnet_napi_enable(&vi->rq[i]); in virtnet_restore()