Lines Matching refs:vq
32 dev_err(&(_vq)->vq.vdev->dev, \
33 "%s:"fmt, (_vq)->vq.name, ##args); \
41 (_vq)->vq.name, (_vq)->in_use); \
49 dev_err(&_vq->vq.vdev->dev, \
50 "%s:"fmt, (_vq)->vq.name, ##args); \
53 #define START_USE(vq) argument
54 #define END_USE(vq) argument
58 struct virtqueue vq; member
90 bool (*notify)(struct virtqueue *vq);
105 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
137 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add() local
144 START_USE(vq); in virtqueue_add()
148 if (unlikely(vq->broken)) { in virtqueue_add()
149 END_USE(vq); in virtqueue_add()
158 if (vq->last_add_time_valid) in virtqueue_add()
159 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) in virtqueue_add()
161 vq->last_add_time = now; in virtqueue_add()
162 vq->last_add_time_valid = true; in virtqueue_add()
166 BUG_ON(total_sg > vq->vring.num); in virtqueue_add()
169 head = vq->free_head; in virtqueue_add()
173 if (vq->indirect && total_sg > 1 && vq->vq.num_free) in virtqueue_add()
180 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); in virtqueue_add()
181 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); in virtqueue_add()
184 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); in virtqueue_add()
191 desc = vq->vring.desc; in virtqueue_add()
197 if (vq->vq.num_free < descs_used) { in virtqueue_add()
199 descs_used, vq->vq.num_free); in virtqueue_add()
204 vq->notify(&vq->vq); in virtqueue_add()
205 END_USE(vq); in virtqueue_add()
210 vq->vq.num_free -= descs_used; in virtqueue_add()
235 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); in virtqueue_add()
237 vq->free_head = i; in virtqueue_add()
240 vq->data[head] = data; in virtqueue_add()
244 avail = vq->avail_idx_shadow & (vq->vring.num - 1); in virtqueue_add()
245 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add()
249 virtio_wmb(vq->weak_barriers); in virtqueue_add()
250 vq->avail_idx_shadow++; in virtqueue_add()
251 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); in virtqueue_add()
252 vq->num_added++; in virtqueue_add()
254 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add()
255 END_USE(vq); in virtqueue_add()
259 if (unlikely(vq->num_added == (1 << 16) - 1)) in virtqueue_add()
311 int virtqueue_add_outbuf(struct virtqueue *vq, in virtqueue_add_outbuf() argument
316 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); in virtqueue_add_outbuf()
333 int virtqueue_add_inbuf(struct virtqueue *vq, in virtqueue_add_inbuf() argument
338 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); in virtqueue_add_inbuf()
355 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare() local
359 START_USE(vq); in virtqueue_kick_prepare()
362 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare()
364 old = vq->avail_idx_shadow - vq->num_added; in virtqueue_kick_prepare()
365 new = vq->avail_idx_shadow; in virtqueue_kick_prepare()
366 vq->num_added = 0; in virtqueue_kick_prepare()
369 if (vq->last_add_time_valid) { in virtqueue_kick_prepare()
371 vq->last_add_time)) > 100); in virtqueue_kick_prepare()
373 vq->last_add_time_valid = false; in virtqueue_kick_prepare()
376 if (vq->event) { in virtqueue_kick_prepare()
377 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), in virtqueue_kick_prepare()
380 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); in virtqueue_kick_prepare()
382 END_USE(vq); in virtqueue_kick_prepare()
397 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_notify() local
399 if (unlikely(vq->broken)) in virtqueue_notify()
403 if (!vq->notify(_vq)) { in virtqueue_notify()
404 vq->broken = true; in virtqueue_notify()
423 bool virtqueue_kick(struct virtqueue *vq) in virtqueue_kick() argument
425 if (virtqueue_kick_prepare(vq)) in virtqueue_kick()
426 return virtqueue_notify(vq); in virtqueue_kick()
431 static void detach_buf(struct vring_virtqueue *vq, unsigned int head) in detach_buf() argument
436 vq->data[head] = NULL; in detach_buf()
442 if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) in detach_buf()
443 kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); in detach_buf()
445 while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { in detach_buf()
446 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); in detach_buf()
447 vq->vq.num_free++; in detach_buf()
450 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); in detach_buf()
451 vq->free_head = head; in detach_buf()
453 vq->vq.num_free++; in detach_buf()
456 static inline bool more_used(const struct vring_virtqueue *vq) in more_used() argument
458 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); in more_used()
479 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf() local
484 START_USE(vq); in virtqueue_get_buf()
486 if (unlikely(vq->broken)) { in virtqueue_get_buf()
487 END_USE(vq); in virtqueue_get_buf()
491 if (!more_used(vq)) { in virtqueue_get_buf()
493 END_USE(vq); in virtqueue_get_buf()
498 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf()
500 last_used = (vq->last_used_idx & (vq->vring.num - 1)); in virtqueue_get_buf()
501 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); in virtqueue_get_buf()
502 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); in virtqueue_get_buf()
504 if (unlikely(i >= vq->vring.num)) { in virtqueue_get_buf()
505 BAD_RING(vq, "id %u out of range\n", i); in virtqueue_get_buf()
508 if (unlikely(!vq->data[i])) { in virtqueue_get_buf()
509 BAD_RING(vq, "id %u is not a head!\n", i); in virtqueue_get_buf()
514 ret = vq->data[i]; in virtqueue_get_buf()
515 detach_buf(vq, i); in virtqueue_get_buf()
516 vq->last_used_idx++; in virtqueue_get_buf()
520 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_get_buf()
521 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); in virtqueue_get_buf()
522 virtio_mb(vq->weak_barriers); in virtqueue_get_buf()
526 vq->last_add_time_valid = false; in virtqueue_get_buf()
529 END_USE(vq); in virtqueue_get_buf()
545 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb() local
547 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_disable_cb()
548 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_cb()
549 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); in virtqueue_disable_cb()
569 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare() local
572 START_USE(vq); in virtqueue_enable_cb_prepare()
579 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_prepare()
580 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_prepare()
581 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); in virtqueue_enable_cb_prepare()
583 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare()
584 END_USE(vq); in virtqueue_enable_cb_prepare()
600 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll() local
602 virtio_mb(vq->weak_barriers); in virtqueue_poll()
603 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); in virtqueue_poll()
640 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed() local
643 START_USE(vq); in virtqueue_enable_cb_delayed()
650 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_delayed()
651 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_delayed()
652 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); in virtqueue_enable_cb_delayed()
655 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed()
656 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); in virtqueue_enable_cb_delayed()
657 virtio_mb(vq->weak_barriers); in virtqueue_enable_cb_delayed()
658 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed()
659 END_USE(vq); in virtqueue_enable_cb_delayed()
663 END_USE(vq); in virtqueue_enable_cb_delayed()
678 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf() local
682 START_USE(vq); in virtqueue_detach_unused_buf()
684 for (i = 0; i < vq->vring.num; i++) { in virtqueue_detach_unused_buf()
685 if (!vq->data[i]) in virtqueue_detach_unused_buf()
688 buf = vq->data[i]; in virtqueue_detach_unused_buf()
689 detach_buf(vq, i); in virtqueue_detach_unused_buf()
690 vq->avail_idx_shadow--; in virtqueue_detach_unused_buf()
691 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); in virtqueue_detach_unused_buf()
692 END_USE(vq); in virtqueue_detach_unused_buf()
696 BUG_ON(vq->vq.num_free != vq->vring.num); in virtqueue_detach_unused_buf()
698 END_USE(vq); in virtqueue_detach_unused_buf()
705 struct vring_virtqueue *vq = to_vvq(_vq); in vring_interrupt() local
707 if (!more_used(vq)) { in vring_interrupt()
708 pr_debug("virtqueue interrupt with no work for %p\n", vq); in vring_interrupt()
712 if (unlikely(vq->broken)) in vring_interrupt()
715 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); in vring_interrupt()
716 if (vq->vq.callback) in vring_interrupt()
717 vq->vq.callback(&vq->vq); in vring_interrupt()
733 struct vring_virtqueue *vq; in vring_new_virtqueue() local
742 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); in vring_new_virtqueue()
743 if (!vq) in vring_new_virtqueue()
746 vring_init(&vq->vring, num, pages, vring_align); in vring_new_virtqueue()
747 vq->vq.callback = callback; in vring_new_virtqueue()
748 vq->vq.vdev = vdev; in vring_new_virtqueue()
749 vq->vq.name = name; in vring_new_virtqueue()
750 vq->vq.num_free = num; in vring_new_virtqueue()
751 vq->vq.index = index; in vring_new_virtqueue()
752 vq->notify = notify; in vring_new_virtqueue()
753 vq->weak_barriers = weak_barriers; in vring_new_virtqueue()
754 vq->broken = false; in vring_new_virtqueue()
755 vq->last_used_idx = 0; in vring_new_virtqueue()
756 vq->avail_flags_shadow = 0; in vring_new_virtqueue()
757 vq->avail_idx_shadow = 0; in vring_new_virtqueue()
758 vq->num_added = 0; in vring_new_virtqueue()
759 list_add_tail(&vq->vq.list, &vdev->vqs); in vring_new_virtqueue()
761 vq->in_use = false; in vring_new_virtqueue()
762 vq->last_add_time_valid = false; in vring_new_virtqueue()
765 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); in vring_new_virtqueue()
766 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in vring_new_virtqueue()
770 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in vring_new_virtqueue()
771 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); in vring_new_virtqueue()
775 vq->free_head = 0; in vring_new_virtqueue()
777 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); in vring_new_virtqueue()
778 vq->data[i] = NULL; in vring_new_virtqueue()
780 vq->data[i] = NULL; in vring_new_virtqueue()
782 return &vq->vq; in vring_new_virtqueue()
786 void vring_del_virtqueue(struct virtqueue *vq) in vring_del_virtqueue() argument
788 list_del(&vq->list); in vring_del_virtqueue()
789 kfree(to_vvq(vq)); in vring_del_virtqueue()
824 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_vring_size() local
826 return vq->vring.num; in virtqueue_get_vring_size()
832 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_is_broken() local
834 return vq->broken; in virtqueue_is_broken()
847 struct vring_virtqueue *vq = to_vvq(_vq); in virtio_break_device() local
848 vq->broken = true; in virtio_break_device()
855 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_avail() local
857 return vq->vring.avail; in virtqueue_get_avail()
863 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_used() local
865 return vq->vring.used; in virtqueue_get_used()