Lines Matching refs:vq

42 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])  argument
43 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) argument
46 static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq) in vhost_vq_reset_user_be() argument
48 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_vq_reset_user_be()
51 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
55 if (vq->private_data) in vhost_set_vring_endian()
65 vq->user_be = s.num; in vhost_set_vring_endian()
70 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, in vhost_get_vring_endian() argument
75 .num = vq->user_be in vhost_get_vring_endian()
84 static void vhost_init_is_le(struct vhost_virtqueue *vq) in vhost_init_is_le() argument
91 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; in vhost_init_is_le()
94 static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq) in vhost_vq_reset_user_be() argument
98 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
103 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, in vhost_get_vring_endian() argument
109 static void vhost_init_is_le(struct vhost_virtqueue *vq) in vhost_init_is_le() argument
111 if (vhost_has_feature(vq, VIRTIO_F_VERSION_1)) in vhost_init_is_le()
112 vq->is_le = true; in vhost_init_is_le()
255 struct vhost_virtqueue *vq) in vhost_vq_reset() argument
257 vq->num = 1; in vhost_vq_reset()
258 vq->desc = NULL; in vhost_vq_reset()
259 vq->avail = NULL; in vhost_vq_reset()
260 vq->used = NULL; in vhost_vq_reset()
261 vq->last_avail_idx = 0; in vhost_vq_reset()
262 vq->avail_idx = 0; in vhost_vq_reset()
263 vq->last_used_idx = 0; in vhost_vq_reset()
264 vq->signalled_used = 0; in vhost_vq_reset()
265 vq->signalled_used_valid = false; in vhost_vq_reset()
266 vq->used_flags = 0; in vhost_vq_reset()
267 vq->log_used = false; in vhost_vq_reset()
268 vq->log_addr = -1ull; in vhost_vq_reset()
269 vq->private_data = NULL; in vhost_vq_reset()
270 vq->acked_features = 0; in vhost_vq_reset()
271 vq->log_base = NULL; in vhost_vq_reset()
272 vq->error_ctx = NULL; in vhost_vq_reset()
273 vq->error = NULL; in vhost_vq_reset()
274 vq->kick = NULL; in vhost_vq_reset()
275 vq->call_ctx = NULL; in vhost_vq_reset()
276 vq->call = NULL; in vhost_vq_reset()
277 vq->log_ctx = NULL; in vhost_vq_reset()
278 vq->memory = NULL; in vhost_vq_reset()
279 vq->is_le = virtio_legacy_is_little_endian(); in vhost_vq_reset()
280 vhost_vq_reset_user_be(vq); in vhost_vq_reset()
332 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) in vhost_vq_free_iovecs() argument
334 kfree(vq->indirect); in vhost_vq_free_iovecs()
335 vq->indirect = NULL; in vhost_vq_free_iovecs()
336 kfree(vq->log); in vhost_vq_free_iovecs()
337 vq->log = NULL; in vhost_vq_free_iovecs()
338 kfree(vq->heads); in vhost_vq_free_iovecs()
339 vq->heads = NULL; in vhost_vq_free_iovecs()
345 struct vhost_virtqueue *vq; in vhost_dev_alloc_iovecs() local
349 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
350 vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV, in vhost_dev_alloc_iovecs()
352 vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL); in vhost_dev_alloc_iovecs()
353 vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL); in vhost_dev_alloc_iovecs()
354 if (!vq->indirect || !vq->log || !vq->heads) in vhost_dev_alloc_iovecs()
376 struct vhost_virtqueue *vq; in vhost_dev_init() local
391 vq = dev->vqs[i]; in vhost_dev_init()
392 vq->log = NULL; in vhost_dev_init()
393 vq->indirect = NULL; in vhost_dev_init()
394 vq->heads = NULL; in vhost_dev_init()
395 vq->dev = dev; in vhost_dev_init()
396 mutex_init(&vq->mutex); in vhost_dev_init()
397 vhost_vq_reset(dev, vq); in vhost_dev_init()
398 if (vq->handle_kick) in vhost_dev_init()
399 vhost_poll_init(&vq->poll, vq->handle_kick, in vhost_dev_init()
628 static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, in vq_access_ok() argument
633 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; in vq_access_ok()
651 static int vq_log_access_ok(struct vhost_virtqueue *vq, in vq_log_access_ok() argument
654 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; in vq_log_access_ok()
656 return vq_memory_access_ok(log_base, vq->memory, in vq_log_access_ok()
657 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && in vq_log_access_ok()
658 (!vq->log_used || log_access_ok(log_base, vq->log_addr, in vq_log_access_ok()
659 sizeof *vq->used + in vq_log_access_ok()
660 vq->num * sizeof *vq->used->ring + s)); in vq_log_access_ok()
665 int vhost_vq_access_ok(struct vhost_virtqueue *vq) in vhost_vq_access_ok() argument
667 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) && in vhost_vq_access_ok()
668 vq_log_access_ok(vq, vq->log_base); in vhost_vq_access_ok()
739 struct vhost_virtqueue *vq; in vhost_vring_ioctl() local
752 vq = d->vqs[idx]; in vhost_vring_ioctl()
754 mutex_lock(&vq->mutex); in vhost_vring_ioctl()
760 if (vq->private_data) { in vhost_vring_ioctl()
772 vq->num = s.num; in vhost_vring_ioctl()
777 if (vq->private_data) { in vhost_vring_ioctl()
789 vq->last_avail_idx = s.num; in vhost_vring_ioctl()
791 vq->avail_idx = vq->last_avail_idx; in vhost_vring_ioctl()
795 s.num = vq->last_avail_idx; in vhost_vring_ioctl()
818 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); in vhost_vring_ioctl()
819 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); in vhost_vring_ioctl()
830 if (vq->private_data) { in vhost_vring_ioctl()
831 if (!vq_access_ok(vq, vq->num, in vhost_vring_ioctl()
841 !log_access_ok(vq->log_base, a.log_guest_addr, in vhost_vring_ioctl()
842 sizeof *vq->used + in vhost_vring_ioctl()
843 vq->num * sizeof *vq->used->ring)) { in vhost_vring_ioctl()
849 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); in vhost_vring_ioctl()
850 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; in vhost_vring_ioctl()
851 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; in vhost_vring_ioctl()
852 vq->log_addr = a.log_guest_addr; in vhost_vring_ioctl()
853 vq->used = (void __user *)(unsigned long)a.used_user_addr; in vhost_vring_ioctl()
865 if (eventfp != vq->kick) { in vhost_vring_ioctl()
866 pollstop = (filep = vq->kick) != NULL; in vhost_vring_ioctl()
867 pollstart = (vq->kick = eventfp) != NULL; in vhost_vring_ioctl()
881 if (eventfp != vq->call) { in vhost_vring_ioctl()
882 filep = vq->call; in vhost_vring_ioctl()
883 ctx = vq->call_ctx; in vhost_vring_ioctl()
884 vq->call = eventfp; in vhost_vring_ioctl()
885 vq->call_ctx = eventfp ? in vhost_vring_ioctl()
900 if (eventfp != vq->error) { in vhost_vring_ioctl()
901 filep = vq->error; in vhost_vring_ioctl()
902 vq->error = eventfp; in vhost_vring_ioctl()
903 ctx = vq->error_ctx; in vhost_vring_ioctl()
904 vq->error_ctx = eventfp ? in vhost_vring_ioctl()
910 r = vhost_set_vring_endian(vq, argp); in vhost_vring_ioctl()
913 r = vhost_get_vring_endian(vq, idx, argp); in vhost_vring_ioctl()
919 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
920 vhost_poll_stop(&vq->poll); in vhost_vring_ioctl()
927 if (pollstart && vq->handle_kick) in vhost_vring_ioctl()
928 r = vhost_poll_start(&vq->poll, vq->kick); in vhost_vring_ioctl()
930 mutex_unlock(&vq->mutex); in vhost_vring_ioctl()
932 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
933 vhost_poll_flush(&vq->poll); in vhost_vring_ioctl()
972 struct vhost_virtqueue *vq; in vhost_dev_ioctl() local
974 vq = d->vqs[i]; in vhost_dev_ioctl()
975 mutex_lock(&vq->mutex); in vhost_dev_ioctl()
977 if (vq->private_data && !vq_log_access_ok(vq, base)) in vhost_dev_ioctl()
980 vq->log_base = base; in vhost_dev_ioctl()
981 mutex_unlock(&vq->mutex); in vhost_dev_ioctl()
1092 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, in vhost_log_write() argument
1101 r = log_write(vq->log_base, log[i].addr, l); in vhost_log_write()
1106 if (vq->log_ctx) in vhost_log_write()
1107 eventfd_signal(vq->log_ctx, 1); in vhost_log_write()
1117 static int vhost_update_used_flags(struct vhost_virtqueue *vq) in vhost_update_used_flags() argument
1120 if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0) in vhost_update_used_flags()
1122 if (unlikely(vq->log_used)) { in vhost_update_used_flags()
1126 used = &vq->used->flags; in vhost_update_used_flags()
1127 log_write(vq->log_base, vq->log_addr + in vhost_update_used_flags()
1128 (used - (void __user *)vq->used), in vhost_update_used_flags()
1129 sizeof vq->used->flags); in vhost_update_used_flags()
1130 if (vq->log_ctx) in vhost_update_used_flags()
1131 eventfd_signal(vq->log_ctx, 1); in vhost_update_used_flags()
1136 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) in vhost_update_avail_event() argument
1138 if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq))) in vhost_update_avail_event()
1140 if (unlikely(vq->log_used)) { in vhost_update_avail_event()
1145 used = vhost_avail_event(vq); in vhost_update_avail_event()
1146 log_write(vq->log_base, vq->log_addr + in vhost_update_avail_event()
1147 (used - (void __user *)vq->used), in vhost_update_avail_event()
1148 sizeof *vhost_avail_event(vq)); in vhost_update_avail_event()
1149 if (vq->log_ctx) in vhost_update_avail_event()
1150 eventfd_signal(vq->log_ctx, 1); in vhost_update_avail_event()
1155 int vhost_init_used(struct vhost_virtqueue *vq) in vhost_init_used() argument
1159 if (!vq->private_data) { in vhost_init_used()
1160 vq->is_le = virtio_legacy_is_little_endian(); in vhost_init_used()
1164 vhost_init_is_le(vq); in vhost_init_used()
1166 r = vhost_update_used_flags(vq); in vhost_init_used()
1169 vq->signalled_used_valid = false; in vhost_init_used()
1170 if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) in vhost_init_used()
1172 r = __get_user(last_used_idx, &vq->used->idx); in vhost_init_used()
1175 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); in vhost_init_used()
1180 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, in translate_desc() argument
1189 mem = vq->memory; in translate_desc()
1217 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) in next_desc() argument
1222 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) in next_desc()
1226 next = vhost16_to_cpu(vq, desc->next); in next_desc()
1235 static int get_indirect(struct vhost_virtqueue *vq, in get_indirect() argument
1243 u32 len = vhost32_to_cpu(vq, indirect->len); in get_indirect()
1249 vq_err(vq, "Invalid length in indirect descriptor: " in get_indirect()
1256 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, in get_indirect()
1259 vq_err(vq, "Translation failure %d in indirect.\n", ret); in get_indirect()
1262 iov_iter_init(&from, READ, vq->indirect, ret, len); in get_indirect()
1272 vq_err(vq, "Indirect buffer length too big: %d\n", in get_indirect()
1280 vq_err(vq, "Loop detected: last one at %u " in get_indirect()
1287 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", in get_indirect()
1288 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
1291 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { in get_indirect()
1292 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", in get_indirect()
1293 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
1297 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), in get_indirect()
1298 vhost32_to_cpu(vq, desc.len), iov + iov_count, in get_indirect()
1301 vq_err(vq, "Translation failure %d indirect idx %d\n", in get_indirect()
1306 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { in get_indirect()
1309 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); in get_indirect()
1310 log[*log_num].len = vhost32_to_cpu(vq, desc.len); in get_indirect()
1317 vq_err(vq, "Indirect descriptor " in get_indirect()
1323 } while ((i = next_desc(vq, &desc)) != -1); in get_indirect()
1335 int vhost_get_vq_desc(struct vhost_virtqueue *vq, in vhost_get_vq_desc() argument
1348 last_avail_idx = vq->last_avail_idx; in vhost_get_vq_desc()
1349 if (unlikely(__get_user(avail_idx, &vq->avail->idx))) { in vhost_get_vq_desc()
1350 vq_err(vq, "Failed to access avail idx at %p\n", in vhost_get_vq_desc()
1351 &vq->avail->idx); in vhost_get_vq_desc()
1354 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_get_vq_desc()
1356 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { in vhost_get_vq_desc()
1357 vq_err(vq, "Guest moved used index from %u to %u", in vhost_get_vq_desc()
1358 last_avail_idx, vq->avail_idx); in vhost_get_vq_desc()
1363 if (vq->avail_idx == last_avail_idx) in vhost_get_vq_desc()
1364 return vq->num; in vhost_get_vq_desc()
1372 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) { in vhost_get_vq_desc()
1373 vq_err(vq, "Failed to read head: idx %d address %p\n", in vhost_get_vq_desc()
1375 &vq->avail->ring[last_avail_idx % vq->num]); in vhost_get_vq_desc()
1379 head = vhost16_to_cpu(vq, ring_head); in vhost_get_vq_desc()
1382 if (unlikely(head >= vq->num)) { in vhost_get_vq_desc()
1383 vq_err(vq, "Guest says index %u > %u is available", in vhost_get_vq_desc()
1384 head, vq->num); in vhost_get_vq_desc()
1396 if (unlikely(i >= vq->num)) { in vhost_get_vq_desc()
1397 vq_err(vq, "Desc index is %u > %u, head = %u", in vhost_get_vq_desc()
1398 i, vq->num, head); in vhost_get_vq_desc()
1401 if (unlikely(++found > vq->num)) { in vhost_get_vq_desc()
1402 vq_err(vq, "Loop detected: last one at %u " in vhost_get_vq_desc()
1404 i, vq->num, head); in vhost_get_vq_desc()
1407 ret = __copy_from_user(&desc, vq->desc + i, sizeof desc); in vhost_get_vq_desc()
1409 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", in vhost_get_vq_desc()
1410 i, vq->desc + i); in vhost_get_vq_desc()
1413 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { in vhost_get_vq_desc()
1414 ret = get_indirect(vq, iov, iov_size, in vhost_get_vq_desc()
1418 vq_err(vq, "Failure detected " in vhost_get_vq_desc()
1425 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), in vhost_get_vq_desc()
1426 vhost32_to_cpu(vq, desc.len), iov + iov_count, in vhost_get_vq_desc()
1429 vq_err(vq, "Translation failure %d descriptor idx %d\n", in vhost_get_vq_desc()
1433 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { in vhost_get_vq_desc()
1438 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); in vhost_get_vq_desc()
1439 log[*log_num].len = vhost32_to_cpu(vq, desc.len); in vhost_get_vq_desc()
1446 vq_err(vq, "Descriptor has out after in: " in vhost_get_vq_desc()
1452 } while ((i = next_desc(vq, &desc)) != -1); in vhost_get_vq_desc()
1455 vq->last_avail_idx++; in vhost_get_vq_desc()
1459 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); in vhost_get_vq_desc()
1465 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) in vhost_discard_vq_desc() argument
1467 vq->last_avail_idx -= n; in vhost_discard_vq_desc()
1473 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) in vhost_add_used() argument
1476 cpu_to_vhost32(vq, head), in vhost_add_used()
1477 cpu_to_vhost32(vq, len) in vhost_add_used()
1480 return vhost_add_used_n(vq, &heads, 1); in vhost_add_used()
1484 static int __vhost_add_used_n(struct vhost_virtqueue *vq, in __vhost_add_used_n() argument
1492 start = vq->last_used_idx & (vq->num - 1); in __vhost_add_used_n()
1493 used = vq->used->ring + start; in __vhost_add_used_n()
1496 vq_err(vq, "Failed to write used id"); in __vhost_add_used_n()
1500 vq_err(vq, "Failed to write used len"); in __vhost_add_used_n()
1504 vq_err(vq, "Failed to write used"); in __vhost_add_used_n()
1507 if (unlikely(vq->log_used)) { in __vhost_add_used_n()
1511 log_write(vq->log_base, in __vhost_add_used_n()
1512 vq->log_addr + in __vhost_add_used_n()
1513 ((void __user *)used - (void __user *)vq->used), in __vhost_add_used_n()
1516 old = vq->last_used_idx; in __vhost_add_used_n()
1517 new = (vq->last_used_idx += count); in __vhost_add_used_n()
1522 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) in __vhost_add_used_n()
1523 vq->signalled_used_valid = false; in __vhost_add_used_n()
1529 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, in vhost_add_used_n() argument
1534 start = vq->last_used_idx & (vq->num - 1); in vhost_add_used_n()
1535 n = vq->num - start; in vhost_add_used_n()
1537 r = __vhost_add_used_n(vq, heads, n); in vhost_add_used_n()
1543 r = __vhost_add_used_n(vq, heads, count); in vhost_add_used_n()
1547 if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) { in vhost_add_used_n()
1548 vq_err(vq, "Failed to increment used idx"); in vhost_add_used_n()
1551 if (unlikely(vq->log_used)) { in vhost_add_used_n()
1553 log_write(vq->log_base, in vhost_add_used_n()
1554 vq->log_addr + offsetof(struct vring_used, idx), in vhost_add_used_n()
1555 sizeof vq->used->idx); in vhost_add_used_n()
1556 if (vq->log_ctx) in vhost_add_used_n()
1557 eventfd_signal(vq->log_ctx, 1); in vhost_add_used_n()
1563 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_notify() argument
1573 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && in vhost_notify()
1574 unlikely(vq->avail_idx == vq->last_avail_idx)) in vhost_notify()
1577 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_notify()
1579 if (__get_user(flags, &vq->avail->flags)) { in vhost_notify()
1580 vq_err(vq, "Failed to get flags"); in vhost_notify()
1583 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); in vhost_notify()
1585 old = vq->signalled_used; in vhost_notify()
1586 v = vq->signalled_used_valid; in vhost_notify()
1587 new = vq->signalled_used = vq->last_used_idx; in vhost_notify()
1588 vq->signalled_used_valid = true; in vhost_notify()
1593 if (__get_user(event, vhost_used_event(vq))) { in vhost_notify()
1594 vq_err(vq, "Failed to get used event idx"); in vhost_notify()
1597 return vring_need_event(vhost16_to_cpu(vq, event), new, old); in vhost_notify()
1601 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_signal() argument
1604 if (vq->call_ctx && vhost_notify(dev, vq)) in vhost_signal()
1605 eventfd_signal(vq->call_ctx, 1); in vhost_signal()
1611 struct vhost_virtqueue *vq, in vhost_add_used_and_signal() argument
1614 vhost_add_used(vq, head, len); in vhost_add_used_and_signal()
1615 vhost_signal(dev, vq); in vhost_add_used_and_signal()
1621 struct vhost_virtqueue *vq, in vhost_add_used_and_signal_n() argument
1624 vhost_add_used_n(vq, heads, count); in vhost_add_used_and_signal_n()
1625 vhost_signal(dev, vq); in vhost_add_used_and_signal_n()
1630 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_enable_notify() argument
1635 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) in vhost_enable_notify()
1637 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; in vhost_enable_notify()
1638 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_enable_notify()
1639 r = vhost_update_used_flags(vq); in vhost_enable_notify()
1641 vq_err(vq, "Failed to enable notification at %p: %d\n", in vhost_enable_notify()
1642 &vq->used->flags, r); in vhost_enable_notify()
1646 r = vhost_update_avail_event(vq, vq->avail_idx); in vhost_enable_notify()
1648 vq_err(vq, "Failed to update avail event index at %p: %d\n", in vhost_enable_notify()
1649 vhost_avail_event(vq), r); in vhost_enable_notify()
1656 r = __get_user(avail_idx, &vq->avail->idx); in vhost_enable_notify()
1658 vq_err(vq, "Failed to check avail idx at %p: %d\n", in vhost_enable_notify()
1659 &vq->avail->idx, r); in vhost_enable_notify()
1663 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; in vhost_enable_notify()
1668 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_disable_notify() argument
1672 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) in vhost_disable_notify()
1674 vq->used_flags |= VRING_USED_F_NO_NOTIFY; in vhost_disable_notify()
1675 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_disable_notify()
1676 r = vhost_update_used_flags(vq); in vhost_disable_notify()
1678 vq_err(vq, "Failed to enable notification at %p: %d\n", in vhost_disable_notify()
1679 &vq->used->flags, r); in vhost_disable_notify()