Searched refs:_vq (Results 1 - 3 of 3) sorted by relevance

/linux-4.1.27/drivers/virtio/
H A Dvirtio_ring.c30 #define BAD_RING(_vq, fmt, args...) \
32 dev_err(&(_vq)->vq.vdev->dev, \
33 "%s:"fmt, (_vq)->vq.name, ##args); \
37 #define START_USE(_vq) \
39 if ((_vq)->in_use) \
41 (_vq)->vq.name, (_vq)->in_use); \
42 (_vq)->in_use = __LINE__; \
44 #define END_USE(_vq) \
45 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
47 #define BAD_RING(_vq, fmt, args...) \
49 dev_err(&_vq->vq.vdev->dev, \
50 "%s:"fmt, (_vq)->vq.name, ##args); \
51 (_vq)->broken = true; \
99 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
101 static struct vring_desc *alloc_indirect(struct virtqueue *_vq, alloc_indirect() argument
119 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); alloc_indirect()
123 static inline int virtqueue_add(struct virtqueue *_vq, virtqueue_add() argument
131 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_add()
168 desc = alloc_indirect(_vq, total_sg, gfp); virtqueue_add()
174 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); virtqueue_add()
175 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); virtqueue_add()
178 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); virtqueue_add()
208 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); virtqueue_add()
209 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); virtqueue_add()
210 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); virtqueue_add()
212 i = virtio16_to_cpu(_vq->vdev, desc[i].next); virtqueue_add()
217 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); virtqueue_add()
218 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); virtqueue_add()
219 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); virtqueue_add()
221 i = virtio16_to_cpu(_vq->vdev, desc[i].next); virtqueue_add()
225 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); virtqueue_add()
229 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); virtqueue_add()
238 avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); virtqueue_add()
239 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); virtqueue_add()
244 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); virtqueue_add()
253 virtqueue_kick(_vq); virtqueue_add()
272 int virtqueue_add_sgs(struct virtqueue *_vq, virtqueue_add_sgs() argument
287 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); virtqueue_add_sgs()
346 bool virtqueue_kick_prepare(struct virtqueue *_vq) virtqueue_kick_prepare() argument
348 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_kick_prepare()
357 old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; virtqueue_kick_prepare()
358 new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); virtqueue_kick_prepare()
370 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), virtqueue_kick_prepare()
373 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); virtqueue_kick_prepare()
388 bool virtqueue_notify(struct virtqueue *_vq) virtqueue_notify() argument
390 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_notify()
396 if (!vq->notify(_vq)) { virtqueue_notify()
470 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) virtqueue_get_buf() argument
472 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_get_buf()
494 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); virtqueue_get_buf()
495 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); virtqueue_get_buf()
513 if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { virtqueue_get_buf()
514 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); virtqueue_get_buf()
536 void virtqueue_disable_cb(struct virtqueue *_vq) virtqueue_disable_cb() argument
538 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_disable_cb()
540 vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); virtqueue_disable_cb()
556 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) virtqueue_enable_cb_prepare() argument
558 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_enable_cb_prepare()
568 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); virtqueue_enable_cb_prepare()
569 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); virtqueue_enable_cb_prepare()
584 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) virtqueue_poll() argument
586 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_poll()
589 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); virtqueue_poll()
604 bool virtqueue_enable_cb(struct virtqueue *_vq) virtqueue_enable_cb() argument
606 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); virtqueue_enable_cb()
607 return !virtqueue_poll(_vq, last_used_idx); virtqueue_enable_cb()
624 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) virtqueue_enable_cb_delayed() argument
626 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_enable_cb_delayed()
636 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); virtqueue_enable_cb_delayed()
638 bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; virtqueue_enable_cb_delayed()
639 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); virtqueue_enable_cb_delayed()
641 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { virtqueue_enable_cb_delayed()
659 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) virtqueue_detach_unused_buf() argument
661 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_detach_unused_buf()
673 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1); virtqueue_detach_unused_buf()
685 irqreturn_t vring_interrupt(int irq, void *_vq) vring_interrupt() argument
687 struct vring_virtqueue *vq = to_vvq(_vq); vring_interrupt()
799 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) virtqueue_get_vring_size() argument
802 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_get_vring_size()
808 bool virtqueue_is_broken(struct virtqueue *_vq) virtqueue_is_broken() argument
810 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_is_broken()
822 struct virtqueue *_vq; virtio_break_device() local
824 list_for_each_entry(_vq, &dev->vqs, list) { virtio_break_device()
825 struct vring_virtqueue *vq = to_vvq(_vq); virtio_break_device()
831 void *virtqueue_get_avail(struct virtqueue *_vq) virtqueue_get_avail() argument
833 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_get_avail()
839 void *virtqueue_get_used(struct virtqueue *_vq) virtqueue_get_used() argument
841 struct vring_virtqueue *vq = to_vvq(_vq); virtqueue_get_used()
/linux-4.1.27/include/linux/
H A Dvirtio_ring.h66 irqreturn_t vring_interrupt(int irq, void *_vq);
/linux-4.1.27/tools/lguest/
H A Dlguest.c1124 static int do_thread(void *_vq) do_thread() argument
1126 struct virtqueue *vq = _vq; do_thread()

Completed in 141 milliseconds