Lines Matching refs:n

157 static void vhost_net_clear_ubuf_info(struct vhost_net *n)  in vhost_net_clear_ubuf_info()  argument
162 kfree(n->vqs[i].ubuf_info); in vhost_net_clear_ubuf_info()
163 n->vqs[i].ubuf_info = NULL; in vhost_net_clear_ubuf_info()
167 static int vhost_net_set_ubuf_info(struct vhost_net *n) in vhost_net_set_ubuf_info() argument
176 n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * in vhost_net_set_ubuf_info()
178 if (!n->vqs[i].ubuf_info) in vhost_net_set_ubuf_info()
184 vhost_net_clear_ubuf_info(n); in vhost_net_set_ubuf_info()
188 static void vhost_net_vq_reset(struct vhost_net *n) in vhost_net_vq_reset() argument
192 vhost_net_clear_ubuf_info(n); in vhost_net_vq_reset()
195 n->vqs[i].done_idx = 0; in vhost_net_vq_reset()
196 n->vqs[i].upend_idx = 0; in vhost_net_vq_reset()
197 n->vqs[i].ubufs = NULL; in vhost_net_vq_reset()
198 n->vqs[i].vhost_hlen = 0; in vhost_net_vq_reset()
199 n->vqs[i].sock_hlen = 0; in vhost_net_vq_reset()
677 struct vhost_net *n; in vhost_net_open() local
682 n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); in vhost_net_open()
683 if (!n) { in vhost_net_open()
684 n = vmalloc(sizeof *n); in vhost_net_open()
685 if (!n) in vhost_net_open()
690 kvfree(n); in vhost_net_open()
694 dev = &n->dev; in vhost_net_open()
695 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
696 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; in vhost_net_open()
697 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
698 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; in vhost_net_open()
700 n->vqs[i].ubufs = NULL; in vhost_net_open()
701 n->vqs[i].ubuf_info = NULL; in vhost_net_open()
702 n->vqs[i].upend_idx = 0; in vhost_net_open()
703 n->vqs[i].done_idx = 0; in vhost_net_open()
704 n->vqs[i].vhost_hlen = 0; in vhost_net_open()
705 n->vqs[i].sock_hlen = 0; in vhost_net_open()
709 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); in vhost_net_open()
710 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); in vhost_net_open()
712 f->private_data = n; in vhost_net_open()
717 static void vhost_net_disable_vq(struct vhost_net *n, in vhost_net_disable_vq() argument
722 struct vhost_poll *poll = n->poll + (nvq - n->vqs); in vhost_net_disable_vq()
728 static int vhost_net_enable_vq(struct vhost_net *n, in vhost_net_enable_vq() argument
733 struct vhost_poll *poll = n->poll + (nvq - n->vqs); in vhost_net_enable_vq()
743 static struct socket *vhost_net_stop_vq(struct vhost_net *n, in vhost_net_stop_vq() argument
750 vhost_net_disable_vq(n, vq); in vhost_net_stop_vq()
756 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, in vhost_net_stop() argument
759 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
760 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); in vhost_net_stop()
763 static void vhost_net_flush_vq(struct vhost_net *n, int index) in vhost_net_flush_vq() argument
765 vhost_poll_flush(n->poll + index); in vhost_net_flush_vq()
766 vhost_poll_flush(&n->vqs[index].vq.poll); in vhost_net_flush_vq()
769 static void vhost_net_flush(struct vhost_net *n) in vhost_net_flush() argument
771 vhost_net_flush_vq(n, VHOST_NET_VQ_TX); in vhost_net_flush()
772 vhost_net_flush_vq(n, VHOST_NET_VQ_RX); in vhost_net_flush()
773 if (n->vqs[VHOST_NET_VQ_TX].ubufs) { in vhost_net_flush()
774 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
775 n->tx_flush = true; in vhost_net_flush()
776 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
778 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); in vhost_net_flush()
779 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
780 n->tx_flush = false; in vhost_net_flush()
781 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); in vhost_net_flush()
782 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
788 struct vhost_net *n = f->private_data; in vhost_net_release() local
792 vhost_net_stop(n, &tx_sock, &rx_sock); in vhost_net_release()
793 vhost_net_flush(n); in vhost_net_release()
794 vhost_dev_stop(&n->dev); in vhost_net_release()
795 vhost_dev_cleanup(&n->dev, false); in vhost_net_release()
796 vhost_net_vq_reset(n); in vhost_net_release()
805 vhost_net_flush(n); in vhost_net_release()
806 kfree(n->dev.vqs); in vhost_net_release()
807 kvfree(n); in vhost_net_release()
876 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) in vhost_net_set_backend() argument
884 mutex_lock(&n->dev.mutex); in vhost_net_set_backend()
885 r = vhost_dev_check_owner(&n->dev); in vhost_net_set_backend()
893 vq = &n->vqs[index].vq; in vhost_net_set_backend()
894 nvq = &n->vqs[index]; in vhost_net_set_backend()
918 vhost_net_disable_vq(n, vq); in vhost_net_set_backend()
923 r = vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
930 n->tx_packets = 0; in vhost_net_set_backend()
931 n->tx_zcopy_err = 0; in vhost_net_set_backend()
932 n->tx_flush = false; in vhost_net_set_backend()
940 vhost_zerocopy_signal_used(n, vq); in vhost_net_set_backend()
945 vhost_net_flush_vq(n, index); in vhost_net_set_backend()
949 mutex_unlock(&n->dev.mutex); in vhost_net_set_backend()
954 vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
962 mutex_unlock(&n->dev.mutex); in vhost_net_set_backend()
966 static long vhost_net_reset_owner(struct vhost_net *n) in vhost_net_reset_owner() argument
973 mutex_lock(&n->dev.mutex); in vhost_net_reset_owner()
974 err = vhost_dev_check_owner(&n->dev); in vhost_net_reset_owner()
982 vhost_net_stop(n, &tx_sock, &rx_sock); in vhost_net_reset_owner()
983 vhost_net_flush(n); in vhost_net_reset_owner()
984 vhost_dev_reset_owner(&n->dev, memory); in vhost_net_reset_owner()
985 vhost_net_vq_reset(n); in vhost_net_reset_owner()
987 mutex_unlock(&n->dev.mutex); in vhost_net_reset_owner()
995 static int vhost_net_set_features(struct vhost_net *n, u64 features) in vhost_net_set_features() argument
1013 mutex_lock(&n->dev.mutex); in vhost_net_set_features()
1015 !vhost_log_access_ok(&n->dev)) { in vhost_net_set_features()
1016 mutex_unlock(&n->dev.mutex); in vhost_net_set_features()
1020 mutex_lock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1021 n->vqs[i].vq.acked_features = features; in vhost_net_set_features()
1022 n->vqs[i].vhost_hlen = vhost_hlen; in vhost_net_set_features()
1023 n->vqs[i].sock_hlen = sock_hlen; in vhost_net_set_features()
1024 mutex_unlock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1026 mutex_unlock(&n->dev.mutex); in vhost_net_set_features()
1030 static long vhost_net_set_owner(struct vhost_net *n) in vhost_net_set_owner() argument
1034 mutex_lock(&n->dev.mutex); in vhost_net_set_owner()
1035 if (vhost_dev_has_owner(&n->dev)) { in vhost_net_set_owner()
1039 r = vhost_net_set_ubuf_info(n); in vhost_net_set_owner()
1042 r = vhost_dev_set_owner(&n->dev); in vhost_net_set_owner()
1044 vhost_net_clear_ubuf_info(n); in vhost_net_set_owner()
1045 vhost_net_flush(n); in vhost_net_set_owner()
1047 mutex_unlock(&n->dev.mutex); in vhost_net_set_owner()
1054 struct vhost_net *n = f->private_data; in vhost_net_ioctl() local
1065 return vhost_net_set_backend(n, backend.index, backend.fd); in vhost_net_ioctl()
1076 return vhost_net_set_features(n, features); in vhost_net_ioctl()
1078 return vhost_net_reset_owner(n); in vhost_net_ioctl()
1080 return vhost_net_set_owner(n); in vhost_net_ioctl()
1082 mutex_lock(&n->dev.mutex); in vhost_net_ioctl()
1083 r = vhost_dev_ioctl(&n->dev, ioctl, argp); in vhost_net_ioctl()
1085 r = vhost_vring_ioctl(&n->dev, ioctl, argp); in vhost_net_ioctl()
1087 vhost_net_flush(n); in vhost_net_ioctl()
1088 mutex_unlock(&n->dev.mutex); in vhost_net_ioctl()