vqs               218 arch/arm64/kvm/guest.c #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
vqs               223 arch/arm64/kvm/guest.c 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
vqs               231 arch/arm64/kvm/guest.c 	memset(vqs, 0, sizeof(vqs));
vqs               236 arch/arm64/kvm/guest.c 			vqs[vq_word(vq)] |= vq_mask(vq);
vqs               238 arch/arm64/kvm/guest.c 	if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
vqs               247 arch/arm64/kvm/guest.c 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
vqs               258 arch/arm64/kvm/guest.c 	if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
vqs               263 arch/arm64/kvm/guest.c 		if (vq_present(vqs, vq))
vqs               277 arch/arm64/kvm/guest.c 		if (vq_present(vqs, vq) != sve_vq_available(vq))
vqs               751 arch/um/drivers/virtio_uml.c 	list_for_each_entry_reverse(vq, &vdev->vqs, list)
vqs               757 arch/um/drivers/virtio_uml.c 	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
vqs               865 arch/um/drivers/virtio_uml.c 		       struct virtqueue *vqs[], vq_callback_t *callbacks[],
vqs               879 arch/um/drivers/virtio_uml.c 			vqs[i] = NULL;
vqs               883 arch/um/drivers/virtio_uml.c 		vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
vqs               885 arch/um/drivers/virtio_uml.c 		if (IS_ERR(vqs[i])) {
vqs               886 arch/um/drivers/virtio_uml.c 			rc = PTR_ERR(vqs[i]);
vqs               891 arch/um/drivers/virtio_uml.c 	list_for_each_entry(vq, &vdev->vqs, list) {
vqs                71 drivers/block/virtio_blk.c 	struct virtio_blk_vq *vqs;
vqs               254 drivers/block/virtio_blk.c 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
vqs               257 drivers/block/virtio_blk.c 		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
vqs               270 drivers/block/virtio_blk.c 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
vqs               276 drivers/block/virtio_blk.c 	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
vqs               351 drivers/block/virtio_blk.c 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
vqs               353 drivers/block/virtio_blk.c 		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
vqs               355 drivers/block/virtio_blk.c 		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
vqs               357 drivers/block/virtio_blk.c 		virtqueue_kick(vblk->vqs[qid].vq);
vqs               363 drivers/block/virtio_blk.c 		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
vqs               374 drivers/block/virtio_blk.c 	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
vqs               376 drivers/block/virtio_blk.c 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
vqs               379 drivers/block/virtio_blk.c 		virtqueue_notify(vblk->vqs[qid].vq);
vqs               578 drivers/block/virtio_blk.c 	struct virtqueue **vqs;
vqs               591 drivers/block/virtio_blk.c 	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
vqs               592 drivers/block/virtio_blk.c 	if (!vblk->vqs)
vqs               597 drivers/block/virtio_blk.c 	vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
vqs               598 drivers/block/virtio_blk.c 	if (!names || !callbacks || !vqs) {
vqs               605 drivers/block/virtio_blk.c 		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
vqs               606 drivers/block/virtio_blk.c 		names[i] = vblk->vqs[i].name;
vqs               610 drivers/block/virtio_blk.c 	err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
vqs               615 drivers/block/virtio_blk.c 		spin_lock_init(&vblk->vqs[i].lock);
vqs               616 drivers/block/virtio_blk.c 		vblk->vqs[i].vq = vqs[i];
vqs               621 drivers/block/virtio_blk.c 	kfree(vqs);
vqs               625 drivers/block/virtio_blk.c 		kfree(vblk->vqs);
vqs               857 drivers/block/virtio_blk.c 		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
vqs              1023 drivers/block/virtio_blk.c 	kfree(vblk->vqs);
vqs              1849 drivers/char/virtio_console.c 	struct virtqueue **vqs;
vqs              1856 drivers/char/virtio_console.c 	vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL);
vqs              1864 drivers/char/virtio_console.c 	if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
vqs              1897 drivers/char/virtio_console.c 	err = virtio_find_vqs(portdev->vdev, nr_queues, vqs,
vqs              1904 drivers/char/virtio_console.c 	portdev->in_vqs[0] = vqs[0];
vqs              1905 drivers/char/virtio_console.c 	portdev->out_vqs[0] = vqs[1];
vqs              1908 drivers/char/virtio_console.c 		portdev->c_ivq = vqs[j];
vqs              1909 drivers/char/virtio_console.c 		portdev->c_ovq = vqs[j + 1];
vqs              1913 drivers/char/virtio_console.c 			portdev->in_vqs[i] = vqs[j];
vqs              1914 drivers/char/virtio_console.c 			portdev->out_vqs[i] = vqs[j + 1];
vqs              1919 drivers/char/virtio_console.c 	kfree(vqs);
vqs              1928 drivers/char/virtio_console.c 	kfree(vqs);
vqs                51 drivers/crypto/virtio/virtio_crypto_core.c 	struct virtqueue **vqs;
vqs                65 drivers/crypto/virtio/virtio_crypto_core.c 	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
vqs                66 drivers/crypto/virtio/virtio_crypto_core.c 	if (!vqs)
vqs                87 drivers/crypto/virtio/virtio_crypto_core.c 	ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
vqs                91 drivers/crypto/virtio/virtio_crypto_core.c 	vi->ctrl_vq = vqs[total_vqs - 1];
vqs                95 drivers/crypto/virtio/virtio_crypto_core.c 		vi->data_vq[i].vq = vqs[i];
vqs               106 drivers/crypto/virtio/virtio_crypto_core.c 	kfree(vqs);
vqs               116 drivers/crypto/virtio/virtio_crypto_core.c 	kfree(vqs);
vqs               120 drivers/gpu/drm/virtio/virtgpu_kms.c 	struct virtqueue *vqs[2];
vqs               163 drivers/gpu/drm/virtio/virtgpu_kms.c 	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
vqs               168 drivers/gpu/drm/virtio/virtgpu_kms.c 	vgdev->ctrlq.vq = vqs[0];
vqs               169 drivers/gpu/drm/virtio/virtgpu_kms.c 	vgdev->cursorq.vq = vqs[1];
vqs                42 drivers/iommu/virtio-iommu.c 	struct virtqueue		*vqs[VIOMMU_NR_VQS];
vqs               160 drivers/iommu/virtio-iommu.c 	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
vqs               226 drivers/iommu/virtio-iommu.c 	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
vqs               980 drivers/iommu/virtio-iommu.c 	return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
vqs               989 drivers/iommu/virtio-iommu.c 	struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
vqs               269 drivers/misc/mic/vop/vop_main.c 	list_for_each_entry_safe(vq, n, &dev->vqs, list)
vqs               384 drivers/misc/mic/vop/vop_main.c 			struct virtqueue *vqs[],
vqs               400 drivers/misc/mic/vop/vop_main.c 			vqs[i] = NULL;
vqs               406 drivers/misc/mic/vop/vop_main.c 		vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
vqs               408 drivers/misc/mic/vop/vop_main.c 		if (IS_ERR(vqs[i])) {
vqs               409 drivers/misc/mic/vop/vop_main.c 			err = PTR_ERR(vqs[i]);
vqs               460 drivers/misc/mic/vop/vop_main.c 	list_for_each_entry(vq, &vdev->vdev.vqs, list)
vqs              2331 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
vqs              2333 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
vqs              2334 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	    vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
vqs              2335 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	    vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
vqs              2350 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	struct virtchnl_queue_select *vqs =
vqs              2361 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
vqs              2366 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
vqs              2372 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
vqs              2377 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
vqs              2410 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	struct virtchnl_queue_select *vqs =
vqs              2423 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
vqs              2428 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
vqs              2434 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
vqs              2439 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
vqs              2505 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	struct virtchnl_queue_select *vqs =
vqs              2519 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
vqs               301 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	struct virtchnl_queue_select vqs;
vqs               310 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs               311 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs               312 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	vqs.rx_queues = vqs.tx_queues;
vqs               315 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 			 (u8 *)&vqs, sizeof(vqs));
vqs               326 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	struct virtchnl_queue_select vqs;
vqs               335 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs               336 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs               337 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	vqs.rx_queues = vqs.tx_queues;
vqs               340 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 			 (u8 *)&vqs, sizeof(vqs));
vqs               763 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	struct virtchnl_queue_select vqs;
vqs               770 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs               772 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 	if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
vqs               773 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c 			     sizeof(vqs)))
vqs              1874 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	struct virtchnl_queue_select *vqs =
vqs              1885 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
vqs              1916 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	struct virtchnl_queue_select *vqs =
vqs              1928 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
vqs              1933 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (!vqs->rx_queues && !vqs->tx_queues) {
vqs              1938 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
vqs              1939 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	    vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
vqs              1954 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	q_map = vqs->rx_queues;
vqs              1956 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
vqs              1978 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	q_map = vqs->tx_queues;
vqs              1980 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
vqs              2014 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	struct virtchnl_queue_select *vqs =
vqs              2027 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
vqs              2032 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (!vqs->rx_queues && !vqs->tx_queues) {
vqs              2037 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
vqs              2038 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	    vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
vqs              2049 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (vqs->tx_queues) {
vqs              2050 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		q_map = vqs->tx_queues;
vqs              2056 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
vqs              2082 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 	if (vqs->rx_queues) {
vqs              2083 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 		q_map = vqs->rx_queues;
vqs              2086 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
vqs              2754 drivers/net/virtio_net.c 	struct virtqueue **vqs;
vqs              2768 drivers/net/virtio_net.c 	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
vqs              2769 drivers/net/virtio_net.c 	if (!vqs)
vqs              2803 drivers/net/virtio_net.c 	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
vqs              2809 drivers/net/virtio_net.c 		vi->cvq = vqs[total_vqs - 1];
vqs              2815 drivers/net/virtio_net.c 		vi->rq[i].vq = vqs[rxq2vq(i)];
vqs              2817 drivers/net/virtio_net.c 		vi->sq[i].vq = vqs[txq2vq(i)];
vqs              2830 drivers/net/virtio_net.c 	kfree(vqs);
vqs               916 drivers/platform/mellanox/mlxbf-tmfifo.c 					struct virtqueue *vqs[],
vqs               950 drivers/platform/mellanox/mlxbf-tmfifo.c 		vqs[i] = vq;
vqs               131 drivers/remoteproc/remoteproc_virtio.c 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
vqs               144 drivers/remoteproc/remoteproc_virtio.c 				 struct virtqueue *vqs[],
vqs               154 drivers/remoteproc/remoteproc_virtio.c 			vqs[i] = NULL;
vqs               158 drivers/remoteproc/remoteproc_virtio.c 		vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
vqs               160 drivers/remoteproc/remoteproc_virtio.c 		if (IS_ERR(vqs[i])) {
vqs               161 drivers/remoteproc/remoteproc_virtio.c 			ret = PTR_ERR(vqs[i]);
vqs               874 drivers/rpmsg/virtio_rpmsg_bus.c 	struct virtqueue *vqs[2];
vqs               893 drivers/rpmsg/virtio_rpmsg_bus.c 	err = virtio_find_vqs(vdev, 2, vqs, vq_cbs, names, NULL);
vqs               897 drivers/rpmsg/virtio_rpmsg_bus.c 	vrp->rvq = vqs[0];
vqs               898 drivers/rpmsg/virtio_rpmsg_bus.c 	vrp->svq = vqs[1];
vqs               261 drivers/s390/virtio/virtio_ccw.c static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
vqs               289 drivers/s390/virtio/virtio_ccw.c 					(unsigned long)vqs[j]);
vqs               505 drivers/s390/virtio/virtio_ccw.c 	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
vqs               604 drivers/s390/virtio/virtio_ccw.c 					   struct virtqueue *vqs[], int nvqs,
vqs               618 drivers/s390/virtio/virtio_ccw.c 	thinint_area->indicator = get_airq_indicator(vqs, nvqs,
vqs               653 drivers/s390/virtio/virtio_ccw.c 			       struct virtqueue *vqs[],
vqs               670 drivers/s390/virtio/virtio_ccw.c 			vqs[i] = NULL;
vqs               674 drivers/s390/virtio/virtio_ccw.c 		vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
vqs               677 drivers/s390/virtio/virtio_ccw.c 		if (IS_ERR(vqs[i])) {
vqs               678 drivers/s390/virtio/virtio_ccw.c 			ret = PTR_ERR(vqs[i]);
vqs               679 drivers/s390/virtio/virtio_ccw.c 			vqs[i] = NULL;
vqs               694 drivers/s390/virtio/virtio_ccw.c 		ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
vqs               783 drivers/scsi/virtio_scsi.c 	struct virtqueue **vqs;
vqs               787 drivers/scsi/virtio_scsi.c 	vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
vqs               792 drivers/scsi/virtio_scsi.c 	if (!callbacks || !vqs || !names) {
vqs               807 drivers/scsi/virtio_scsi.c 	err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
vqs               811 drivers/scsi/virtio_scsi.c 	virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
vqs               812 drivers/scsi/virtio_scsi.c 	virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
vqs               815 drivers/scsi/virtio_scsi.c 				 vqs[i]);
vqs               825 drivers/scsi/virtio_scsi.c 	kfree(vqs);
vqs               133 drivers/vhost/net.c 	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
vqs               274 drivers/vhost/net.c 		kfree(n->vqs[i].ubuf_info);
vqs               275 drivers/vhost/net.c 		n->vqs[i].ubuf_info = NULL;
vqs               288 drivers/vhost/net.c 		n->vqs[i].ubuf_info =
vqs               290 drivers/vhost/net.c 				      sizeof(*n->vqs[i].ubuf_info),
vqs               292 drivers/vhost/net.c 		if  (!n->vqs[i].ubuf_info)
vqs               309 drivers/vhost/net.c 		n->vqs[i].done_idx = 0;
vqs               310 drivers/vhost/net.c 		n->vqs[i].upend_idx = 0;
vqs               311 drivers/vhost/net.c 		n->vqs[i].ubufs = NULL;
vqs               312 drivers/vhost/net.c 		n->vqs[i].vhost_hlen = 0;
vqs               313 drivers/vhost/net.c 		n->vqs[i].sock_hlen = 0;
vqs               314 drivers/vhost/net.c 		vhost_net_buf_init(&n->vqs[i].rxq);
vqs               426 drivers/vhost/net.c 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
vqs               437 drivers/vhost/net.c 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
vqs               564 drivers/vhost/net.c 	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
vqs               587 drivers/vhost/net.c 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
vqs               759 drivers/vhost/net.c 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
vqs               848 drivers/vhost/net.c 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
vqs               950 drivers/vhost/net.c 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
vqs               998 drivers/vhost/net.c 	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
vqs               999 drivers/vhost/net.c 	struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
vqs              1097 drivers/vhost/net.c 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
vqs              1277 drivers/vhost/net.c 	struct vhost_virtqueue **vqs;
vqs              1285 drivers/vhost/net.c 	vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
vqs              1286 drivers/vhost/net.c 	if (!vqs) {
vqs              1294 drivers/vhost/net.c 		kfree(vqs);
vqs              1298 drivers/vhost/net.c 	n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
vqs              1302 drivers/vhost/net.c 		kfree(vqs);
vqs              1307 drivers/vhost/net.c 	n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
vqs              1310 drivers/vhost/net.c 	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
vqs              1311 drivers/vhost/net.c 	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
vqs              1312 drivers/vhost/net.c 	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
vqs              1313 drivers/vhost/net.c 	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
vqs              1315 drivers/vhost/net.c 		n->vqs[i].ubufs = NULL;
vqs              1316 drivers/vhost/net.c 		n->vqs[i].ubuf_info = NULL;
vqs              1317 drivers/vhost/net.c 		n->vqs[i].upend_idx = 0;
vqs              1318 drivers/vhost/net.c 		n->vqs[i].done_idx = 0;
vqs              1319 drivers/vhost/net.c 		n->vqs[i].batched_xdp = 0;
vqs              1320 drivers/vhost/net.c 		n->vqs[i].vhost_hlen = 0;
vqs              1321 drivers/vhost/net.c 		n->vqs[i].sock_hlen = 0;
vqs              1322 drivers/vhost/net.c 		n->vqs[i].rx_ring = NULL;
vqs              1323 drivers/vhost/net.c 		vhost_net_buf_init(&n->vqs[i].rxq);
vqs              1325 drivers/vhost/net.c 	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
vqs              1359 drivers/vhost/net.c 	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
vqs              1360 drivers/vhost/net.c 	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
vqs              1366 drivers/vhost/net.c 	vhost_poll_flush(&n->vqs[index].vq.poll);
vqs              1373 drivers/vhost/net.c 	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
vqs              1374 drivers/vhost/net.c 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
vqs              1376 drivers/vhost/net.c 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
vqs              1378 drivers/vhost/net.c 		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
vqs              1379 drivers/vhost/net.c 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
vqs              1381 drivers/vhost/net.c 		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
vqs              1382 drivers/vhost/net.c 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
vqs              1406 drivers/vhost/net.c 	kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
vqs              1407 drivers/vhost/net.c 	kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
vqs              1408 drivers/vhost/net.c 	kfree(n->dev.vqs);
vqs              1507 drivers/vhost/net.c 	vq = &n->vqs[index].vq;
vqs              1508 drivers/vhost/net.c 	nvq = &n->vqs[index];
vqs              1620 drivers/vhost/net.c 		mutex_lock(&n->vqs[i].vq.mutex);
vqs              1621 drivers/vhost/net.c 		n->vqs[i].vq.acked_backend_features = features;
vqs              1622 drivers/vhost/net.c 		mutex_unlock(&n->vqs[i].vq.mutex);
vqs              1658 drivers/vhost/net.c 		mutex_lock(&n->vqs[i].vq.mutex);
vqs              1659 drivers/vhost/net.c 		n->vqs[i].vq.acked_features = features;
vqs              1660 drivers/vhost/net.c 		n->vqs[i].vhost_hlen = vhost_hlen;
vqs              1661 drivers/vhost/net.c 		n->vqs[i].sock_hlen = sock_hlen;
vqs              1662 drivers/vhost/net.c 		mutex_unlock(&n->vqs[i].vq.mutex);
vqs               200 drivers/vhost/scsi.c 	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
vqs               247 drivers/vhost/scsi.c 		vq = &vs->vqs[i].vq;
vqs               252 drivers/vhost/scsi.c 		idx = vs->vqs[i].inflight_idx;
vqs               254 drivers/vhost/scsi.c 			old_inflight[i] = &vs->vqs[i].inflights[idx];
vqs               257 drivers/vhost/scsi.c 		vs->vqs[i].inflight_idx = idx ^ 1;
vqs               258 drivers/vhost/scsi.c 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
vqs               410 drivers/vhost/scsi.c 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vqs               449 drivers/vhost/scsi.c 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vqs               500 drivers/vhost/scsi.c 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vqs               554 drivers/vhost/scsi.c 			vq = q - vs->vqs;
vqs               565 drivers/vhost/scsi.c 		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
vqs              1345 drivers/vhost/scsi.c 	vhost_poll_flush(&vs->vqs[index].vq.poll);
vqs              1401 drivers/vhost/scsi.c 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
vqs              1461 drivers/vhost/scsi.c 			vq = &vs->vqs[i].vq;
vqs              1502 drivers/vhost/scsi.c 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
vqs              1548 drivers/vhost/scsi.c 			vq = &vs->vqs[i].vq;
vqs              1590 drivers/vhost/scsi.c 		vq = &vs->vqs[i].vq;
vqs              1602 drivers/vhost/scsi.c 	struct vhost_virtqueue **vqs;
vqs              1612 drivers/vhost/scsi.c 	vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
vqs              1613 drivers/vhost/scsi.c 	if (!vqs)
vqs              1622 drivers/vhost/scsi.c 	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
vqs              1623 drivers/vhost/scsi.c 	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vqs              1624 drivers/vhost/scsi.c 	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vqs              1625 drivers/vhost/scsi.c 	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
vqs              1627 drivers/vhost/scsi.c 		vqs[i] = &vs->vqs[i].vq;
vqs              1628 drivers/vhost/scsi.c 		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
vqs              1630 drivers/vhost/scsi.c 	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
vqs              1657 drivers/vhost/scsi.c 	kfree(vs->dev.vqs);
vqs              1675 drivers/vhost/scsi.c 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vqs              1800 drivers/vhost/scsi.c 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vqs                38 drivers/vhost/test.c 	struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
vqs                45 drivers/vhost/test.c 	struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
vqs               109 drivers/vhost/test.c 	struct vhost_virtqueue **vqs;
vqs               113 drivers/vhost/test.c 	vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
vqs               114 drivers/vhost/test.c 	if (!vqs) {
vqs               120 drivers/vhost/test.c 	vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
vqs               121 drivers/vhost/test.c 	n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
vqs               122 drivers/vhost/test.c 	vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
vqs               144 drivers/vhost/test.c 	*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
vqs               149 drivers/vhost/test.c 	vhost_poll_flush(&n->vqs[index].poll);
vqs               189 drivers/vhost/test.c 		if (!vhost_vq_access_ok(&n->vqs[index])) {
vqs               196 drivers/vhost/test.c 		vq = n->vqs + index;
vqs               204 drivers/vhost/test.c 		r = vhost_vq_init_access(&n->vqs[index]);
vqs               258 drivers/vhost/test.c 	vq = &n->vqs[VHOST_TEST_VQ];
vqs               297 drivers/vhost/vhost.c 		__vhost_vq_meta_reset(d->vqs[i]);
vqs               387 drivers/vhost/vhost.c 		vq = dev->vqs[i];
vqs               402 drivers/vhost/vhost.c 		vhost_vq_free_iovecs(dev->vqs[i]);
vqs               411 drivers/vhost/vhost.c 		vhost_vq_free_iovecs(dev->vqs[i]);
vqs               456 drivers/vhost/vhost.c 		    struct vhost_virtqueue **vqs, int nvqs,
vqs               462 drivers/vhost/vhost.c 	dev->vqs = vqs;
vqs               481 drivers/vhost/vhost.c 		vq = dev->vqs[i];
vqs               599 drivers/vhost/vhost.c 		dev->vqs[i]->umem = umem;
vqs               608 drivers/vhost/vhost.c 		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
vqs               609 drivers/vhost/vhost.c 			vhost_poll_stop(&dev->vqs[i]->poll);
vqs               610 drivers/vhost/vhost.c 			vhost_poll_flush(&dev->vqs[i]->poll);
vqs               662 drivers/vhost/vhost.c 		if (dev->vqs[i]->error_ctx)
vqs               663 drivers/vhost/vhost.c 			eventfd_ctx_put(dev->vqs[i]->error_ctx);
vqs               664 drivers/vhost/vhost.c 		if (dev->vqs[i]->kick)
vqs               665 drivers/vhost/vhost.c 			fput(dev->vqs[i]->kick);
vqs               666 drivers/vhost/vhost.c 		if (dev->vqs[i]->call_ctx)
vqs               667 drivers/vhost/vhost.c 			eventfd_ctx_put(dev->vqs[i]->call_ctx);
vqs               668 drivers/vhost/vhost.c 		vhost_vq_reset(dev, dev->vqs[i]);
vqs               761 drivers/vhost/vhost.c 		mutex_lock(&d->vqs[i]->mutex);
vqs               762 drivers/vhost/vhost.c 		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
vqs               764 drivers/vhost/vhost.c 		if (d->vqs[i]->private_data)
vqs               765 drivers/vhost/vhost.c 			ok = vq_memory_access_ok(d->vqs[i]->log_base,
vqs               769 drivers/vhost/vhost.c 		mutex_unlock(&d->vqs[i]->mutex);
vqs               970 drivers/vhost/vhost.c 		mutex_lock_nested(&d->vqs[i]->mutex, i);
vqs               977 drivers/vhost/vhost.c 		mutex_unlock(&d->vqs[i]->mutex);
vqs              1472 drivers/vhost/vhost.c 		mutex_lock(&d->vqs[i]->mutex);
vqs              1473 drivers/vhost/vhost.c 		d->vqs[i]->umem = newumem;
vqs              1474 drivers/vhost/vhost.c 		mutex_unlock(&d->vqs[i]->mutex);
vqs              1604 drivers/vhost/vhost.c 	vq = d->vqs[idx];
vqs              1734 drivers/vhost/vhost.c 		struct vhost_virtqueue *vq = d->vqs[i];
vqs              1783 drivers/vhost/vhost.c 			vq = d->vqs[i];
vqs              1804 drivers/vhost/vhost.c 			mutex_lock(&d->vqs[i]->mutex);
vqs              1805 drivers/vhost/vhost.c 			d->vqs[i]->log_ctx = d->log_ctx;
vqs              1806 drivers/vhost/vhost.c 			mutex_unlock(&d->vqs[i]->mutex);
vqs               162 drivers/vhost/vhost.h 	struct vhost_virtqueue **vqs;
vqs               179 drivers/vhost/vhost.h void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
vqs                42 drivers/vhost/vsock.c 	struct vhost_virtqueue vqs[2];
vqs                87 drivers/vhost/vsock.c 	struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
vqs               234 drivers/vhost/vsock.c 	vq = &vsock->vqs[VSOCK_VQ_RX];
vqs               300 drivers/vhost/vsock.c 		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
vqs               378 drivers/vhost/vsock.c 	struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
vqs               483 drivers/vhost/vsock.c 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vqs               484 drivers/vhost/vsock.c 		vq = &vsock->vqs[i];
vqs               515 drivers/vhost/vsock.c 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vqs               516 drivers/vhost/vsock.c 		vq = &vsock->vqs[i];
vqs               538 drivers/vhost/vsock.c 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vqs               539 drivers/vhost/vsock.c 		struct vhost_virtqueue *vq = &vsock->vqs[i];
vqs               558 drivers/vhost/vsock.c 	struct vhost_virtqueue **vqs;
vqs               569 drivers/vhost/vsock.c 	vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
vqs               570 drivers/vhost/vsock.c 	if (!vqs) {
vqs               579 drivers/vhost/vsock.c 	vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
vqs               580 drivers/vhost/vsock.c 	vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
vqs               581 drivers/vhost/vsock.c 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
vqs               582 drivers/vhost/vsock.c 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
vqs               584 drivers/vhost/vsock.c 	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
vqs               603 drivers/vhost/vsock.c 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
vqs               604 drivers/vhost/vsock.c 		if (vsock->vqs[i].handle_kick)
vqs               605 drivers/vhost/vsock.c 			vhost_poll_flush(&vsock->vqs[i].poll);
vqs               667 drivers/vhost/vsock.c 	kfree(vsock->dev.vqs);
vqs               718 drivers/vhost/vsock.c 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vqs               719 drivers/vhost/vsock.c 		vq = &vsock->vqs[i];
vqs               344 drivers/virtio/virtio.c 	INIT_LIST_HEAD(&dev->vqs);
vqs               465 drivers/virtio/virtio_balloon.c 	struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
vqs               495 drivers/virtio/virtio_balloon.c 					 vqs, callbacks, names, NULL, NULL);
vqs               499 drivers/virtio/virtio_balloon.c 	vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE];
vqs               500 drivers/virtio/virtio_balloon.c 	vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE];
vqs               504 drivers/virtio/virtio_balloon.c 		vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS];
vqs               524 drivers/virtio/virtio_balloon.c 		vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
vqs               171 drivers/virtio/virtio_input.c 	struct virtqueue *vqs[2];
vqs               177 drivers/virtio/virtio_input.c 	err = virtio_find_vqs(vi->vdev, 2, vqs, cbs, names, NULL);
vqs               180 drivers/virtio/virtio_input.c 	vi->evt = vqs[0];
vqs               181 drivers/virtio/virtio_input.c 	vi->sts = vqs[1];
vqs               342 drivers/virtio/virtio_mmio.c 	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
vqs               459 drivers/virtio/virtio_mmio.c 		       struct virtqueue *vqs[],
vqs               481 drivers/virtio/virtio_mmio.c 			vqs[i] = NULL;
vqs               485 drivers/virtio/virtio_mmio.c 		vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
vqs               487 drivers/virtio/virtio_mmio.c 		if (IS_ERR(vqs[i])) {
vqs               489 drivers/virtio/virtio_mmio.c 			return PTR_ERR(vqs[i]);
vqs               203 drivers/virtio/virtio_pci_common.c 	vp_dev->vqs[index] = info;
vqs               214 drivers/virtio/virtio_pci_common.c 	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
vqs               232 drivers/virtio/virtio_pci_common.c 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
vqs               234 drivers/virtio/virtio_pci_common.c 			int v = vp_dev->vqs[vq->index]->msix_vector;
vqs               275 drivers/virtio/virtio_pci_common.c 	kfree(vp_dev->vqs);
vqs               276 drivers/virtio/virtio_pci_common.c 	vp_dev->vqs = NULL;
vqs               280 drivers/virtio/virtio_pci_common.c 		struct virtqueue *vqs[], vq_callback_t *callbacks[],
vqs               289 drivers/virtio/virtio_pci_common.c 	vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
vqs               290 drivers/virtio/virtio_pci_common.c 	if (!vp_dev->vqs)
vqs               313 drivers/virtio/virtio_pci_common.c 			vqs[i] = NULL;
vqs               323 drivers/virtio/virtio_pci_common.c 		vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
vqs               326 drivers/virtio/virtio_pci_common.c 		if (IS_ERR(vqs[i])) {
vqs               327 drivers/virtio/virtio_pci_common.c 			err = PTR_ERR(vqs[i]);
vqs               342 drivers/virtio/virtio_pci_common.c 				  vqs[i]);
vqs               354 drivers/virtio/virtio_pci_common.c 		struct virtqueue *vqs[], vq_callback_t *callbacks[],
vqs               360 drivers/virtio/virtio_pci_common.c 	vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
vqs               361 drivers/virtio/virtio_pci_common.c 	if (!vp_dev->vqs)
vqs               373 drivers/virtio/virtio_pci_common.c 			vqs[i] = NULL;
vqs               376 drivers/virtio/virtio_pci_common.c 		vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
vqs               379 drivers/virtio/virtio_pci_common.c 		if (IS_ERR(vqs[i])) {
vqs               380 drivers/virtio/virtio_pci_common.c 			err = PTR_ERR(vqs[i]);
vqs               393 drivers/virtio/virtio_pci_common.c 		struct virtqueue *vqs[], vq_callback_t *callbacks[],
vqs               400 drivers/virtio/virtio_pci_common.c 	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
vqs               404 drivers/virtio/virtio_pci_common.c 	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
vqs               408 drivers/virtio/virtio_pci_common.c 	return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
vqs               427 drivers/virtio/virtio_pci_common.c 	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
vqs               452 drivers/virtio/virtio_pci_common.c 	    vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
vqs               456 drivers/virtio/virtio_pci_common.c 				    vp_dev->vqs[index]->msix_vector);
vqs                80 drivers/virtio/virtio_pci_common.h 	struct virtio_pci_vq_info **vqs;
vqs               131 drivers/virtio/virtio_pci_common.h 		struct virtqueue *vqs[], vq_callback_t *callbacks[],
vqs               403 drivers/virtio/virtio_pci_modern.c 			      struct virtqueue *vqs[],
vqs               410 drivers/virtio/virtio_pci_modern.c 	int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
vqs               418 drivers/virtio/virtio_pci_modern.c 	list_for_each_entry(vq, &vdev->vqs, list) {
vqs              1611 drivers/virtio/virtio_ring.c 	list_add_tail(&vq->vq.list, &vdev->vqs);
vqs              2085 drivers/virtio/virtio_ring.c 	list_add_tail(&vq->vq.list, &vdev->vqs);
vqs              2274 drivers/virtio/virtio_ring.c 	list_for_each_entry(_vq, &dev->vqs, list) {
vqs                46 fs/fuse/virtio_fs.c 	struct virtio_fs_vq *vqs;
vqs                65 fs/fuse/virtio_fs.c 	return &fs->vqs[vq->index];
vqs                90 fs/fuse/virtio_fs.c 	kfree(vfs->vqs);
vqs               135 fs/fuse/virtio_fs.c 		fsvq = &fs->vqs[i];
vqs               146 fs/fuse/virtio_fs.c 		fsvq = &fs->vqs[i];
vqs               203 fs/fuse/virtio_fs.c 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
vqs               525 fs/fuse/virtio_fs.c 	struct virtqueue **vqs;
vqs               537 fs/fuse/virtio_fs.c 	fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
vqs               538 fs/fuse/virtio_fs.c 	if (!fs->vqs)
vqs               541 fs/fuse/virtio_fs.c 	vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
vqs               545 fs/fuse/virtio_fs.c 	if (!vqs || !callbacks || !names) {
vqs               551 fs/fuse/virtio_fs.c 	snprintf(fs->vqs[VQ_HIPRIO].name, sizeof(fs->vqs[VQ_HIPRIO].name),
vqs               553 fs/fuse/virtio_fs.c 	names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
vqs               554 fs/fuse/virtio_fs.c 	INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work);
vqs               555 fs/fuse/virtio_fs.c 	INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs);
vqs               556 fs/fuse/virtio_fs.c 	INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs);
vqs               557 fs/fuse/virtio_fs.c 	INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work,
vqs               559 fs/fuse/virtio_fs.c 	spin_lock_init(&fs->vqs[VQ_HIPRIO].lock);
vqs               563 fs/fuse/virtio_fs.c 		spin_lock_init(&fs->vqs[i].lock);
vqs               564 fs/fuse/virtio_fs.c 		INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work);
vqs               565 fs/fuse/virtio_fs.c 		INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work,
vqs               567 fs/fuse/virtio_fs.c 		INIT_LIST_HEAD(&fs->vqs[i].queued_reqs);
vqs               568 fs/fuse/virtio_fs.c 		INIT_LIST_HEAD(&fs->vqs[i].end_reqs);
vqs               569 fs/fuse/virtio_fs.c 		snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name),
vqs               572 fs/fuse/virtio_fs.c 		names[i] = fs->vqs[i].name;
vqs               575 fs/fuse/virtio_fs.c 	ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
vqs               580 fs/fuse/virtio_fs.c 		fs->vqs[i].vq = vqs[i];
vqs               586 fs/fuse/virtio_fs.c 	kfree(vqs);
vqs               588 fs/fuse/virtio_fs.c 		kfree(fs->vqs);
vqs               647 fs/fuse/virtio_fs.c 		fsvq = &fs->vqs[i];
vqs               726 fs/fuse/virtio_fs.c 	fsvq = &fs->vqs[VQ_HIPRIO];
vqs              1001 fs/fuse/virtio_fs.c 	fsvq = &fs->vqs[queue_id];
vqs              1069 fs/fuse/virtio_fs.c 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
vqs              1076 fs/fuse/virtio_fs.c 	ctx.fudptr = (void **)&fs->vqs[VQ_REQUEST].fud;
vqs              1081 fs/fuse/virtio_fs.c 	fc = fs->vqs[VQ_REQUEST].fud->fc;
vqs              1084 fs/fuse/virtio_fs.c 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
vqs              1115 fs/fuse/virtio_fs.c 	fsvq = &vfs->vqs[VQ_HIPRIO];
vqs               118 include/linux/virtio.h 	struct list_head vqs;
vqs               146 include/linux/virtio.h 	list_for_each_entry(vq, &vdev->vqs, list)
vqs                80 include/linux/virtio_config.h 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
vqs               193 include/linux/virtio_config.h 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
vqs               197 include/linux/virtio_config.h 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
vqs               202 include/linux/virtio_config.h 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
vqs               206 include/linux/virtio_config.h 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
vqs               545 net/sched/sch_gred.c static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
vqs               550 net/sched/sch_gred.c 	nla_for_each_nested(attr, vqs, rem) {
vqs               604 net/sched/sch_gred.c 			     struct nlattr *vqs, struct netlink_ext_ack *extack)
vqs               609 net/sched/sch_gred.c 	err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
vqs               614 net/sched/sch_gred.c 	nla_for_each_nested(attr, vqs, rem) {
vqs               760 net/sched/sch_gred.c 	struct nlattr *parms, *vqs, *opts = NULL;
vqs               842 net/sched/sch_gred.c 	vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
vqs               843 net/sched/sch_gred.c 	if (!vqs)
vqs               891 net/sched/sch_gred.c 	nla_nest_end(skb, vqs);
vqs                30 net/vmw_vsock/virtio_transport.c 	struct virtqueue *vqs[VSOCK_VQ_MAX];
vqs               144 net/vmw_vsock/virtio_transport.c 	vq = vsock->vqs[VSOCK_VQ_TX];
vqs               186 net/vmw_vsock/virtio_transport.c 			struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
vqs               273 net/vmw_vsock/virtio_transport.c 		struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
vqs               297 net/vmw_vsock/virtio_transport.c 	vq = vsock->vqs[VSOCK_VQ_RX];
vqs               337 net/vmw_vsock/virtio_transport.c 	vq = vsock->vqs[VSOCK_VQ_TX];
vqs               364 net/vmw_vsock/virtio_transport.c 	struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
vqs               379 net/vmw_vsock/virtio_transport.c 	vq = vsock->vqs[VSOCK_VQ_RX];
vqs               433 net/vmw_vsock/virtio_transport.c 	vq = vsock->vqs[VSOCK_VQ_EVENT];
vqs               451 net/vmw_vsock/virtio_transport.c 	virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
vqs               491 net/vmw_vsock/virtio_transport.c 	vq = vsock->vqs[VSOCK_VQ_EVENT];
vqs               511 net/vmw_vsock/virtio_transport.c 	virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
vqs               624 net/vmw_vsock/virtio_transport.c 			      vsock->vqs, callbacks, names,
vqs               709 net/vmw_vsock/virtio_transport.c 	while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
vqs               714 net/vmw_vsock/virtio_transport.c 	while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
vqs                39 tools/virtio/virtio_test.c 	struct vq_info vqs[1];
vqs                93 tools/virtio/virtio_test.c 	struct vq_info *info = &dev->vqs[dev->nvqs];
vqs               301 tools/virtio/virtio_test.c 	run_test(&dev, &dev.vqs[0], delayed, 0x100000);