Lines Matching refs:vq
81 struct vhost_virtqueue *vq; member
85 struct vhost_virtqueue vq; member
116 static void vhost_net_enable_zcopy(int vq) in vhost_net_enable_zcopy() argument
118 vhost_net_zcopy_mask |= 0x1 << vq; in vhost_net_enable_zcopy()
122 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) in vhost_net_ubuf_alloc() argument
133 ubufs->vq = vq; in vhost_net_ubuf_alloc()
239 struct vhost_virtqueue *vq) in vhost_zerocopy_signal_used() argument
242 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_zerocopy_signal_used()
247 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
249 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
250 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; in vhost_zerocopy_signal_used()
257 vhost_add_used_and_signal_n(vq->dev, vq, in vhost_zerocopy_signal_used()
258 &vq->heads[nvq->done_idx], add); in vhost_zerocopy_signal_used()
267 struct vhost_virtqueue *vq = ubufs->vq; in vhost_zerocopy_callback() local
273 vq->heads[ubuf->desc].len = success ? in vhost_zerocopy_callback()
285 vhost_poll_queue(&vq->poll); in vhost_zerocopy_callback()
295 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx() local
312 mutex_lock(&vq->mutex); in handle_tx()
313 sock = vq->private_data; in handle_tx()
317 vhost_disable_notify(&net->dev, vq); in handle_tx()
325 vhost_zerocopy_signal_used(net, vq); in handle_tx()
330 if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND) in handle_tx()
334 head = vhost_get_vq_desc(vq, vq->iov, in handle_tx()
335 ARRAY_SIZE(vq->iov), in handle_tx()
342 if (head == vq->num) { in handle_tx()
343 if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_tx()
344 vhost_disable_notify(&net->dev, vq); in handle_tx()
350 vq_err(vq, "Unexpected descriptor format for TX: " in handle_tx()
355 len = iov_length(vq->iov, out); in handle_tx()
356 iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len); in handle_tx()
360 vq_err(vq, "Unexpected header len for TX: " in handle_tx()
377 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); in handle_tx()
378 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; in handle_tx()
399 vhost_discard_vq_desc(vq, 1); in handle_tx()
406 vhost_add_used_and_signal(&net->dev, vq, head, 0); in handle_tx()
408 vhost_zerocopy_signal_used(net, vq); in handle_tx()
412 vhost_poll_queue(&vq->poll); in handle_tx()
417 mutex_unlock(&vq->mutex); in handle_tx()
448 static int get_rx_bufs(struct vhost_virtqueue *vq, in get_rx_bufs() argument
471 r = vhost_get_vq_desc(vq, vq->iov + seg, in get_rx_bufs()
472 ARRAY_SIZE(vq->iov) - seg, &out, in get_rx_bufs()
478 if (d == vq->num) { in get_rx_bufs()
483 vq_err(vq, "unexpected descriptor format for RX: " in get_rx_bufs()
492 heads[headcount].id = cpu_to_vhost32(vq, d); in get_rx_bufs()
493 len = iov_length(vq->iov + seg, in); in get_rx_bufs()
494 heads[headcount].len = cpu_to_vhost32(vq, len); in get_rx_bufs()
499 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); in get_rx_bufs()
511 vhost_discard_vq_desc(vq, headcount); in get_rx_bufs()
520 struct vhost_virtqueue *vq = &nvq->vq; in handle_rx() local
543 mutex_lock(&vq->mutex); in handle_rx()
544 sock = vq->private_data; in handle_rx()
547 vhost_disable_notify(&net->dev, vq); in handle_rx()
552 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? in handle_rx()
553 vq->log : NULL; in handle_rx()
554 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); in handle_rx()
559 headcount = get_rx_bufs(vq, vq->heads, vhost_len, in handle_rx()
567 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); in handle_rx()
575 if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_rx()
578 vhost_disable_notify(&net->dev, vq); in handle_rx()
586 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); in handle_rx()
602 vhost_discard_vq_desc(vq, headcount); in handle_rx()
609 vq_err(vq, "Unable to write vnet_hdr " in handle_rx()
610 "at addr %p\n", vq->iov->iov_base); in handle_rx()
621 num_buffers = cpu_to_vhost16(vq, headcount); in handle_rx()
625 vq_err(vq, "Failed num_buffers write"); in handle_rx()
626 vhost_discard_vq_desc(vq, headcount); in handle_rx()
629 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, in handle_rx()
632 vhost_log_write(vq, vq_log, log, vhost_len); in handle_rx()
635 vhost_poll_queue(&vq->poll); in handle_rx()
640 mutex_unlock(&vq->mutex); in handle_rx()
645 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_tx_kick() local
647 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_tx_kick()
654 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_rx_kick() local
656 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_rx_kick()
695 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
696 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; in vhost_net_open()
697 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
698 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; in vhost_net_open()
718 struct vhost_virtqueue *vq) in vhost_net_disable_vq() argument
721 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_disable_vq()
723 if (!vq->private_data) in vhost_net_disable_vq()
729 struct vhost_virtqueue *vq) in vhost_net_enable_vq() argument
732 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_enable_vq()
736 sock = vq->private_data; in vhost_net_enable_vq()
744 struct vhost_virtqueue *vq) in vhost_net_stop_vq() argument
748 mutex_lock(&vq->mutex); in vhost_net_stop_vq()
749 sock = vq->private_data; in vhost_net_stop_vq()
750 vhost_net_disable_vq(n, vq); in vhost_net_stop_vq()
751 vq->private_data = NULL; in vhost_net_stop_vq()
752 mutex_unlock(&vq->mutex); in vhost_net_stop_vq()
759 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
760 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); in vhost_net_stop()
766 vhost_poll_flush(&n->vqs[index].vq.poll); in vhost_net_flush_vq()
774 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
776 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
779 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
782 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
879 struct vhost_virtqueue *vq; in vhost_net_set_backend() local
893 vq = &n->vqs[index].vq; in vhost_net_set_backend()
895 mutex_lock(&vq->mutex); in vhost_net_set_backend()
898 if (!vhost_vq_access_ok(vq)) { in vhost_net_set_backend()
909 oldsock = vq->private_data; in vhost_net_set_backend()
911 ubufs = vhost_net_ubuf_alloc(vq, in vhost_net_set_backend()
918 vhost_net_disable_vq(n, vq); in vhost_net_set_backend()
919 vq->private_data = sock; in vhost_net_set_backend()
920 r = vhost_init_used(vq); in vhost_net_set_backend()
923 r = vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
935 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
939 mutex_lock(&vq->mutex); in vhost_net_set_backend()
940 vhost_zerocopy_signal_used(n, vq); in vhost_net_set_backend()
941 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
953 vq->private_data = oldsock; in vhost_net_set_backend()
954 vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
960 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1020 mutex_lock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1021 n->vqs[i].vq.acked_features = features; in vhost_net_set_features()
1024 mutex_unlock(&n->vqs[i].vq.mutex); in vhost_net_set_features()