Lines Matching refs:vdev

32 		dev_err(&(_vq)->vq.vdev->dev,			\
49 dev_err(&_vq->vq.vdev->dev, \
119 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); in alloc_indirect()
174 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); in virtqueue_add()
175 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); in virtqueue_add()
178 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); in virtqueue_add()
208 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); in virtqueue_add()
209 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); in virtqueue_add()
210 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); in virtqueue_add()
212 i = virtio16_to_cpu(_vq->vdev, desc[i].next); in virtqueue_add()
217 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); in virtqueue_add()
218 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); in virtqueue_add()
219 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); in virtqueue_add()
221 i = virtio16_to_cpu(_vq->vdev, desc[i].next); in virtqueue_add()
225 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); in virtqueue_add()
229 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); in virtqueue_add()
238 avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); in virtqueue_add()
239 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add()
244 …vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx)… in virtqueue_add()
357 old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; in virtqueue_kick_prepare()
358 new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); in virtqueue_kick_prepare()
370 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), in virtqueue_kick_prepare()
373 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); in virtqueue_kick_prepare()
435 if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) in detach_buf()
436 kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); in detach_buf()
438 while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { in detach_buf()
439 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); in detach_buf()
443 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); in detach_buf()
451 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); in more_used()
494 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); in virtqueue_get_buf()
495 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); in virtqueue_get_buf()
513 if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { in virtqueue_get_buf()
514 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); in virtqueue_get_buf()
540 vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); in virtqueue_disable_cb()
568 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); in virtqueue_enable_cb_prepare()
569 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare()
589 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); in virtqueue_poll()
636 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); in virtqueue_enable_cb_delayed()
638 bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed()
639 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); in virtqueue_enable_cb_delayed()
641 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed()
673 …vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx)… in virtqueue_detach_unused_buf()
708 struct virtio_device *vdev, in vring_new_virtqueue() argument
720 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); in vring_new_virtqueue()
730 vq->vq.vdev = vdev; in vring_new_virtqueue()
739 list_add_tail(&vq->vq.list, &vdev->vqs); in vring_new_virtqueue()
745 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); in vring_new_virtqueue()
746 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in vring_new_virtqueue()
750 vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT); in vring_new_virtqueue()
755 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); in vring_new_virtqueue()
772 void vring_transport_features(struct virtio_device *vdev) in vring_transport_features() argument
786 __virtio_clear_bit(vdev, i); in vring_transport_features()