Lines Matching refs:vq

68 	struct virtqueue *vq;  member
80 struct virtqueue *vq; member
164 static int vq2txq(struct virtqueue *vq) in vq2txq() argument
166 return (vq->index - 1) / 2; in vq2txq()
174 static int vq2rxq(struct virtqueue *vq) in vq2rxq() argument
176 return vq->index / 2; in vq2rxq()
216 static void skb_xmit_done(struct virtqueue *vq) in skb_xmit_done() argument
218 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done()
221 virtqueue_disable_cb(vq); in skb_xmit_done()
224 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
372 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); in receive_mergeable()
422 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); in receive_mergeable()
553 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); in add_recvbuf_small()
601 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, in add_recvbuf_big()
647 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); in add_recvbuf_mergeable()
679 } while (rq->vq->num_free); in try_fill_recv()
680 virtqueue_kick(rq->vq); in try_fill_recv()
705 virtqueue_disable_cb(rq->vq); in virtnet_napi_enable()
736 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive()
741 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_receive()
746 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { in virtnet_receive()
764 r = virtqueue_enable_cb_prepare(rq->vq); in virtnet_poll()
766 if (unlikely(virtqueue_poll(rq->vq, r)) && in virtnet_poll()
768 virtqueue_disable_cb(rq->vq); in virtnet_poll()
782 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_busy_poll()
791 virtqueue_disable_cb(rq->vq); in virtnet_busy_poll()
796 r = virtqueue_enable_cb_prepare(rq->vq); in virtnet_busy_poll()
798 if (unlikely(virtqueue_poll(rq->vq, r)) && in virtnet_busy_poll()
800 virtqueue_disable_cb(rq->vq); in virtnet_busy_poll()
833 struct virtnet_info *vi = sq->vq->vdev->priv; in free_old_xmit_skbs()
836 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { in free_old_xmit_skbs()
852 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
912 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); in xmit_skb()
958 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in start_xmit()
960 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in start_xmit()
963 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { in start_xmit()
965 virtqueue_disable_cb(sq->vq); in start_xmit()
971 virtqueue_kick(sq->vq); in start_xmit()
1259 virtqueue_set_affinity(vi->rq[i].vq, -1); in virtnet_clean_affinity()
1260 virtqueue_set_affinity(vi->sq[i].vq, -1); in virtnet_clean_affinity()
1284 virtqueue_set_affinity(vi->rq[i].vq, cpu); in virtnet_set_affinity()
1285 virtqueue_set_affinity(vi->sq[i].vq, cpu); in virtnet_set_affinity()
1319 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
1320 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
1496 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs() local
1497 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) in free_unused_bufs()
1502 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs() local
1504 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { in free_unused_bufs()
1583 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
1584 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
1874 if (vi->rq[i].vq->num_free == in virtnet_probe()
1875 virtqueue_get_vring_size(vi->rq[i].vq)) { in virtnet_probe()