1/* Simple test of virtio code, entirely in userpsace. */
2#define _GNU_SOURCE
3#include <sched.h>
4#include <err.h>
5#include <linux/kernel.h>
6#include <linux/err.h>
7#include <linux/virtio.h>
8#include <linux/vringh.h>
9#include <linux/virtio_ring.h>
10#include <linux/virtio_config.h>
11#include <linux/uaccess.h>
12#include <sys/types.h>
13#include <sys/stat.h>
14#include <sys/mman.h>
15#include <sys/wait.h>
16#include <fcntl.h>
17
18#define USER_MEM (1024*1024)
19void *__user_addr_min, *__user_addr_max;
20void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
21static u64 user_addr_offset;
22
23#define RINGSIZE 256
24#define ALIGN 4096
25
26static bool never_notify_host(struct virtqueue *vq)
27{
28	abort();
29}
30
31static void never_callback_guest(struct virtqueue *vq)
32{
33	abort();
34}
35
36static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r)
37{
38	if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
39		return false;
40	if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
41		return false;
42
43	r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset;
44	r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset;
45	r->offset = user_addr_offset;
46	return true;
47}
48
49/* We return single byte ranges. */
50static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r)
51{
52	if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
53		return false;
54	if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
55		return false;
56
57	r->start = addr;
58	r->end_incl = r->start;
59	r->offset = user_addr_offset;
60	return true;
61}
62
63struct guest_virtio_device {
64	struct virtio_device vdev;
65	int to_host_fd;
66	unsigned long notifies;
67};
68
69static bool parallel_notify_host(struct virtqueue *vq)
70{
71	int rc;
72	struct guest_virtio_device *gvdev;
73
74	gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
75	rc = write(gvdev->to_host_fd, "", 1);
76	if (rc < 0)
77		return false;
78	gvdev->notifies++;
79	return true;
80}
81
82static bool no_notify_host(struct virtqueue *vq)
83{
84	return true;
85}
86
87#define NUM_XFERS (10000000)
88
89/* We aim for two "distant" cpus. */
90static void find_cpus(unsigned int *first, unsigned int *last)
91{
92	unsigned int i;
93
94	*first = -1U;
95	*last = 0;
96	for (i = 0; i < 4096; i++) {
97		cpu_set_t set;
98		CPU_ZERO(&set);
99		CPU_SET(i, &set);
100		if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) {
101			if (i < *first)
102				*first = i;
103			if (i > *last)
104				*last = i;
105		}
106	}
107}
108
109/* Opencoded version for fast mode */
110static inline int vringh_get_head(struct vringh *vrh, u16 *head)
111{
112	u16 avail_idx, i;
113	int err;
114
115	err = get_user(avail_idx, &vrh->vring.avail->idx);
116	if (err)
117		return err;
118
119	if (vrh->last_avail_idx == avail_idx)
120		return 0;
121
122	/* Only get avail ring entries after they have been exposed by guest. */
123	virtio_rmb(vrh->weak_barriers);
124
125	i = vrh->last_avail_idx & (vrh->vring.num - 1);
126
127	err = get_user(*head, &vrh->vring.avail->ring[i]);
128	if (err)
129		return err;
130
131	vrh->last_avail_idx++;
132	return 1;
133}
134
135static int parallel_test(u64 features,
136			 bool (*getrange)(struct vringh *vrh,
137					  u64 addr, struct vringh_range *r),
138			 bool fast_vringh)
139{
140	void *host_map, *guest_map;
141	int fd, mapsize, to_guest[2], to_host[2];
142	unsigned long xfers = 0, notifies = 0, receives = 0;
143	unsigned int first_cpu, last_cpu;
144	cpu_set_t cpu_set;
145	char buf[128];
146
147	/* Create real file to mmap. */
148	fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600);
149	if (fd < 0)
150		err(1, "Opening /tmp/vringh_test-file");
151
152	/* Extra room at the end for some data, and indirects */
153	mapsize = vring_size(RINGSIZE, ALIGN)
154		+ RINGSIZE * 2 * sizeof(int)
155		+ RINGSIZE * 6 * sizeof(struct vring_desc);
156	mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1);
157	ftruncate(fd, mapsize);
158
159	/* Parent and child use separate addresses, to check our mapping logic! */
160	host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
161	guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
162
163	pipe(to_guest);
164	pipe(to_host);
165
166	CPU_ZERO(&cpu_set);
167	find_cpus(&first_cpu, &last_cpu);
168	printf("Using CPUS %u and %u\n", first_cpu, last_cpu);
169	fflush(stdout);
170
171	if (fork() != 0) {
172		struct vringh vrh;
173		int status, err, rlen = 0;
174		char rbuf[5];
175
176		/* We are the host: never access guest addresses! */
177		munmap(guest_map, mapsize);
178
179		__user_addr_min = host_map;
180		__user_addr_max = __user_addr_min + mapsize;
181		user_addr_offset = host_map - guest_map;
182		assert(user_addr_offset);
183
184		close(to_guest[0]);
185		close(to_host[1]);
186
187		vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
188		vringh_init_user(&vrh, features, RINGSIZE, true,
189				 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
190		CPU_SET(first_cpu, &cpu_set);
191		if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
192			errx(1, "Could not set affinity to cpu %u", first_cpu);
193
194		while (xfers < NUM_XFERS) {
195			struct iovec host_riov[2], host_wiov[2];
196			struct vringh_iov riov, wiov;
197			u16 head, written;
198
199			if (fast_vringh) {
200				for (;;) {
201					err = vringh_get_head(&vrh, &head);
202					if (err != 0)
203						break;
204					err = vringh_need_notify_user(&vrh);
205					if (err < 0)
206						errx(1, "vringh_need_notify_user: %i",
207						     err);
208					if (err) {
209						write(to_guest[1], "", 1);
210						notifies++;
211					}
212				}
213				if (err != 1)
214					errx(1, "vringh_get_head");
215				written = 0;
216				goto complete;
217			} else {
218				vringh_iov_init(&riov,
219						host_riov,
220						ARRAY_SIZE(host_riov));
221				vringh_iov_init(&wiov,
222						host_wiov,
223						ARRAY_SIZE(host_wiov));
224
225				err = vringh_getdesc_user(&vrh, &riov, &wiov,
226							  getrange, &head);
227			}
228			if (err == 0) {
229				err = vringh_need_notify_user(&vrh);
230				if (err < 0)
231					errx(1, "vringh_need_notify_user: %i",
232					     err);
233				if (err) {
234					write(to_guest[1], "", 1);
235					notifies++;
236				}
237
238				if (!vringh_notify_enable_user(&vrh))
239					continue;
240
241				/* Swallow all notifies at once. */
242				if (read(to_host[0], buf, sizeof(buf)) < 1)
243					break;
244
245				vringh_notify_disable_user(&vrh);
246				receives++;
247				continue;
248			}
249			if (err != 1)
250				errx(1, "vringh_getdesc_user: %i", err);
251
252			/* We simply copy bytes. */
253			if (riov.used) {
254				rlen = vringh_iov_pull_user(&riov, rbuf,
255							    sizeof(rbuf));
256				if (rlen != 4)
257					errx(1, "vringh_iov_pull_user: %i",
258					     rlen);
259				assert(riov.i == riov.used);
260				written = 0;
261			} else {
262				err = vringh_iov_push_user(&wiov, rbuf, rlen);
263				if (err != rlen)
264					errx(1, "vringh_iov_push_user: %i",
265					     err);
266				assert(wiov.i == wiov.used);
267				written = err;
268			}
269		complete:
270			xfers++;
271
272			err = vringh_complete_user(&vrh, head, written);
273			if (err != 0)
274				errx(1, "vringh_complete_user: %i", err);
275		}
276
277		err = vringh_need_notify_user(&vrh);
278		if (err < 0)
279			errx(1, "vringh_need_notify_user: %i", err);
280		if (err) {
281			write(to_guest[1], "", 1);
282			notifies++;
283		}
284		wait(&status);
285		if (!WIFEXITED(status))
286			errx(1, "Child died with signal %i?", WTERMSIG(status));
287		if (WEXITSTATUS(status) != 0)
288			errx(1, "Child exited %i?", WEXITSTATUS(status));
289		printf("Host: notified %lu, pinged %lu\n", notifies, receives);
290		return 0;
291	} else {
292		struct guest_virtio_device gvdev;
293		struct virtqueue *vq;
294		unsigned int *data;
295		struct vring_desc *indirects;
296		unsigned int finished = 0;
297
298		/* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
299		data = guest_map + vring_size(RINGSIZE, ALIGN);
300		indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
301
302		/* We are the guest. */
303		munmap(host_map, mapsize);
304
305		close(to_guest[1]);
306		close(to_host[0]);
307
308		gvdev.vdev.features = features;
309		gvdev.to_host_fd = to_host[1];
310		gvdev.notifies = 0;
311
312		CPU_SET(first_cpu, &cpu_set);
313		if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
314			err(1, "Could not set affinity to cpu %u", first_cpu);
315
316		vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
317					 guest_map, fast_vringh ? no_notify_host
318					 : parallel_notify_host,
319					 never_callback_guest, "guest vq");
320
321		/* Don't kfree indirects. */
322		__kfree_ignore_start = indirects;
323		__kfree_ignore_end = indirects + RINGSIZE * 6;
324
325		while (xfers < NUM_XFERS) {
326			struct scatterlist sg[4];
327			unsigned int num_sg, len;
328			int *dbuf, err;
329			bool output = !(xfers % 2);
330
331			/* Consume bufs. */
332			while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) {
333				if (len == 4)
334					assert(*dbuf == finished - 1);
335				else if (!fast_vringh)
336					assert(*dbuf == finished);
337				finished++;
338			}
339
340			/* Produce a buffer. */
341			dbuf = data + (xfers % (RINGSIZE + 1));
342
343			if (output)
344				*dbuf = xfers;
345			else
346				*dbuf = -1;
347
348			switch ((xfers / sizeof(*dbuf)) % 4) {
349			case 0:
350				/* Nasty three-element sg list. */
351				sg_init_table(sg, num_sg = 3);
352				sg_set_buf(&sg[0], (void *)dbuf, 1);
353				sg_set_buf(&sg[1], (void *)dbuf + 1, 2);
354				sg_set_buf(&sg[2], (void *)dbuf + 3, 1);
355				break;
356			case 1:
357				sg_init_table(sg, num_sg = 2);
358				sg_set_buf(&sg[0], (void *)dbuf, 1);
359				sg_set_buf(&sg[1], (void *)dbuf + 1, 3);
360				break;
361			case 2:
362				sg_init_table(sg, num_sg = 1);
363				sg_set_buf(&sg[0], (void *)dbuf, 4);
364				break;
365			case 3:
366				sg_init_table(sg, num_sg = 4);
367				sg_set_buf(&sg[0], (void *)dbuf, 1);
368				sg_set_buf(&sg[1], (void *)dbuf + 1, 1);
369				sg_set_buf(&sg[2], (void *)dbuf + 2, 1);
370				sg_set_buf(&sg[3], (void *)dbuf + 3, 1);
371				break;
372			}
373
374			/* May allocate an indirect, so force it to allocate
375			 * user addr */
376			__kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
377			if (output)
378				err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf,
379							   GFP_KERNEL);
380			else
381				err = virtqueue_add_inbuf(vq, sg, num_sg,
382							  dbuf, GFP_KERNEL);
383
384			if (err == -ENOSPC) {
385				if (!virtqueue_enable_cb_delayed(vq))
386					continue;
387				/* Swallow all notifies at once. */
388				if (read(to_guest[0], buf, sizeof(buf)) < 1)
389					break;
390
391				receives++;
392				virtqueue_disable_cb(vq);
393				continue;
394			}
395
396			if (err)
397				errx(1, "virtqueue_add_in/outbuf: %i", err);
398
399			xfers++;
400			virtqueue_kick(vq);
401		}
402
403		/* Any extra? */
404		while (finished != xfers) {
405			int *dbuf;
406			unsigned int len;
407
408			/* Consume bufs. */
409			dbuf = virtqueue_get_buf(vq, &len);
410			if (dbuf) {
411				if (len == 4)
412					assert(*dbuf == finished - 1);
413				else
414					assert(len == 0);
415				finished++;
416				continue;
417			}
418
419			if (!virtqueue_enable_cb_delayed(vq))
420				continue;
421			if (read(to_guest[0], buf, sizeof(buf)) < 1)
422				break;
423
424			receives++;
425			virtqueue_disable_cb(vq);
426		}
427
428		printf("Guest: notified %lu, pinged %lu\n",
429		       gvdev.notifies, receives);
430		vring_del_virtqueue(vq);
431		return 0;
432	}
433}
434
435int main(int argc, char *argv[])
436{
437	struct virtio_device vdev;
438	struct virtqueue *vq;
439	struct vringh vrh;
440	struct scatterlist guest_sg[RINGSIZE], *sgs[2];
441	struct iovec host_riov[2], host_wiov[2];
442	struct vringh_iov riov, wiov;
443	struct vring_used_elem used[RINGSIZE];
444	char buf[28];
445	u16 head;
446	int err;
447	unsigned i;
448	void *ret;
449	bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r);
450	bool fast_vringh = false, parallel = false;
451
452	getrange = getrange_iov;
453	vdev.features = 0;
454
455	while (argv[1]) {
456		if (strcmp(argv[1], "--indirect") == 0)
457			__virtio_set_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
458		else if (strcmp(argv[1], "--eventidx") == 0)
459			__virtio_set_bit(&vdev, VIRTIO_RING_F_EVENT_IDX);
460		else if (strcmp(argv[1], "--virtio-1") == 0)
461			__virtio_set_bit(&vdev, VIRTIO_F_VERSION_1);
462		else if (strcmp(argv[1], "--slow-range") == 0)
463			getrange = getrange_slow;
464		else if (strcmp(argv[1], "--fast-vringh") == 0)
465			fast_vringh = true;
466		else if (strcmp(argv[1], "--parallel") == 0)
467			parallel = true;
468		else
469			errx(1, "Unknown arg %s", argv[1]);
470		argv++;
471	}
472
473	if (parallel)
474		return parallel_test(vdev.features, getrange, fast_vringh);
475
476	if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
477		abort();
478	__user_addr_max = __user_addr_min + USER_MEM;
479	memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
480
481	/* Set up guest side. */
482	vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
483				 __user_addr_min,
484				 never_notify_host, never_callback_guest,
485				 "guest vq");
486
487	/* Set up host side. */
488	vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
489	vringh_init_user(&vrh, vdev.features, RINGSIZE, true,
490			 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
491
492	/* No descriptor to get yet... */
493	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
494	if (err != 0)
495		errx(1, "vringh_getdesc_user: %i", err);
496
497	/* Guest puts in a descriptor. */
498	memcpy(__user_addr_max - 1, "a", 1);
499	sg_init_table(guest_sg, 1);
500	sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
501	sg_init_table(guest_sg+1, 1);
502	sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2);
503	sgs[0] = &guest_sg[0];
504	sgs[1] = &guest_sg[1];
505
506	/* May allocate an indirect, so force it to allocate user addr */
507	__kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
508	err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL);
509	if (err)
510		errx(1, "virtqueue_add_sgs: %i", err);
511	__kmalloc_fake = NULL;
512
513	/* Host retreives it. */
514	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
515	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
516
517	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
518	if (err != 1)
519		errx(1, "vringh_getdesc_user: %i", err);
520
521	assert(riov.used == 1);
522	assert(riov.iov[0].iov_base == __user_addr_max - 1);
523	assert(riov.iov[0].iov_len == 1);
524	if (getrange != getrange_slow) {
525		assert(wiov.used == 1);
526		assert(wiov.iov[0].iov_base == __user_addr_max - 3);
527		assert(wiov.iov[0].iov_len == 2);
528	} else {
529		assert(wiov.used == 2);
530		assert(wiov.iov[0].iov_base == __user_addr_max - 3);
531		assert(wiov.iov[0].iov_len == 1);
532		assert(wiov.iov[1].iov_base == __user_addr_max - 2);
533		assert(wiov.iov[1].iov_len == 1);
534	}
535
536	err = vringh_iov_pull_user(&riov, buf, 5);
537	if (err != 1)
538		errx(1, "vringh_iov_pull_user: %i", err);
539	assert(buf[0] == 'a');
540	assert(riov.i == 1);
541	assert(vringh_iov_pull_user(&riov, buf, 5) == 0);
542
543	memcpy(buf, "bcdef", 5);
544	err = vringh_iov_push_user(&wiov, buf, 5);
545	if (err != 2)
546		errx(1, "vringh_iov_push_user: %i", err);
547	assert(memcmp(__user_addr_max - 3, "bc", 2) == 0);
548	assert(wiov.i == wiov.used);
549	assert(vringh_iov_push_user(&wiov, buf, 5) == 0);
550
551	/* Host is done. */
552	err = vringh_complete_user(&vrh, head, err);
553	if (err != 0)
554		errx(1, "vringh_complete_user: %i", err);
555
556	/* Guest should see used token now. */
557	__kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
558	__kfree_ignore_end = __kfree_ignore_start + 1;
559	ret = virtqueue_get_buf(vq, &i);
560	if (ret != &err)
561		errx(1, "virtqueue_get_buf: %p", ret);
562	assert(i == 2);
563
564	/* Guest puts in a huge descriptor. */
565	sg_init_table(guest_sg, RINGSIZE);
566	for (i = 0; i < RINGSIZE; i++) {
567		sg_set_buf(&guest_sg[i],
568			   __user_addr_max - USER_MEM/4, USER_MEM/4);
569	}
570
571	/* Fill contents with recognisable garbage. */
572	for (i = 0; i < USER_MEM/4; i++)
573		((char *)__user_addr_max - USER_MEM/4)[i] = i;
574
575	/* This will allocate an indirect, so force it to allocate user addr */
576	__kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
577	err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL);
578	if (err)
579		errx(1, "virtqueue_add_outbuf (large): %i", err);
580	__kmalloc_fake = NULL;
581
582	/* Host picks it up (allocates new iov). */
583	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
584	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
585
586	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
587	if (err != 1)
588		errx(1, "vringh_getdesc_user: %i", err);
589
590	assert(riov.max_num & VRINGH_IOV_ALLOCATED);
591	assert(riov.iov != host_riov);
592	if (getrange != getrange_slow)
593		assert(riov.used == RINGSIZE);
594	else
595		assert(riov.used == RINGSIZE * USER_MEM/4);
596
597	assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED));
598	assert(wiov.used == 0);
599
600	/* Pull data back out (in odd chunks), should be as expected. */
601	for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
602		err = vringh_iov_pull_user(&riov, buf, 3);
603		if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
604			errx(1, "vringh_iov_pull_user large: %i", err);
605		assert(buf[0] == (char)i);
606		assert(err < 2 || buf[1] == (char)(i + 1));
607		assert(err < 3 || buf[2] == (char)(i + 2));
608	}
609	assert(riov.i == riov.used);
610	vringh_iov_cleanup(&riov);
611	vringh_iov_cleanup(&wiov);
612
613	/* Complete using multi interface, just because we can. */
614	used[0].id = head;
615	used[0].len = 0;
616	err = vringh_complete_multi_user(&vrh, used, 1);
617	if (err)
618		errx(1, "vringh_complete_multi_user(1): %i", err);
619
620	/* Free up those descriptors. */
621	ret = virtqueue_get_buf(vq, &i);
622	if (ret != &err)
623		errx(1, "virtqueue_get_buf: %p", ret);
624
625	/* Add lots of descriptors. */
626	sg_init_table(guest_sg, 1);
627	sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
628	for (i = 0; i < RINGSIZE; i++) {
629		err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL);
630		if (err)
631			errx(1, "virtqueue_add_outbuf (multiple): %i", err);
632	}
633
634	/* Now get many, and consume them all at once. */
635	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
636	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
637
638	for (i = 0; i < RINGSIZE; i++) {
639		err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
640		if (err != 1)
641			errx(1, "vringh_getdesc_user: %i", err);
642		used[i].id = head;
643		used[i].len = 0;
644	}
645	/* Make sure it wraps around ring, to test! */
646	assert(vrh.vring.used->idx % RINGSIZE != 0);
647	err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
648	if (err)
649		errx(1, "vringh_complete_multi_user: %i", err);
650
651	/* Free those buffers. */
652	for (i = 0; i < RINGSIZE; i++) {
653		unsigned len;
654		assert(virtqueue_get_buf(vq, &len) != NULL);
655	}
656
657	/* Test weird (but legal!) indirect. */
658	if (__virtio_test_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
659		char *data = __user_addr_max - USER_MEM/4;
660		struct vring_desc *d = __user_addr_max - USER_MEM/2;
661		struct vring vring;
662
663		/* Force creation of direct, which we modify. */
664		__virtio_clear_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
665		vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
666					 __user_addr_min,
667					 never_notify_host,
668					 never_callback_guest,
669					 "guest vq");
670
671		sg_init_table(guest_sg, 4);
672		sg_set_buf(&guest_sg[0], d, sizeof(*d)*2);
673		sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1);
674		sg_set_buf(&guest_sg[2], data + 6, 4);
675		sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3);
676
677		err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL);
678		if (err)
679			errx(1, "virtqueue_add_outbuf (indirect): %i", err);
680
681		vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);
682
683		/* They're used in order, but double-check... */
684		assert(vring.desc[0].addr == (unsigned long)d);
685		assert(vring.desc[1].addr == (unsigned long)(d+2));
686		assert(vring.desc[2].addr == (unsigned long)data + 6);
687		assert(vring.desc[3].addr == (unsigned long)(d+3));
688		vring.desc[0].flags |= VRING_DESC_F_INDIRECT;
689		vring.desc[1].flags |= VRING_DESC_F_INDIRECT;
690		vring.desc[3].flags |= VRING_DESC_F_INDIRECT;
691
692		/* First indirect */
693		d[0].addr = (unsigned long)data;
694		d[0].len = 1;
695		d[0].flags = VRING_DESC_F_NEXT;
696		d[0].next = 1;
697		d[1].addr = (unsigned long)data + 1;
698		d[1].len = 2;
699		d[1].flags = 0;
700
701		/* Second indirect */
702		d[2].addr = (unsigned long)data + 3;
703		d[2].len = 3;
704		d[2].flags = 0;
705
706		/* Third indirect */
707		d[3].addr = (unsigned long)data + 10;
708		d[3].len = 5;
709		d[3].flags = VRING_DESC_F_NEXT;
710		d[3].next = 1;
711		d[4].addr = (unsigned long)data + 15;
712		d[4].len = 6;
713		d[4].flags = VRING_DESC_F_NEXT;
714		d[4].next = 2;
715		d[5].addr = (unsigned long)data + 21;
716		d[5].len = 7;
717		d[5].flags = 0;
718
719		/* Host picks it up (allocates new iov). */
720		vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
721		vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
722
723		err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
724		if (err != 1)
725			errx(1, "vringh_getdesc_user: %i", err);
726
727		if (head != 0)
728			errx(1, "vringh_getdesc_user: head %i not 0", head);
729
730		assert(riov.max_num & VRINGH_IOV_ALLOCATED);
731		if (getrange != getrange_slow)
732			assert(riov.used == 7);
733		else
734			assert(riov.used == 28);
735		err = vringh_iov_pull_user(&riov, buf, 29);
736		assert(err == 28);
737
738		/* Data should be linear. */
739		for (i = 0; i < err; i++)
740			assert(buf[i] == i);
741		vringh_iov_cleanup(&riov);
742	}
743
744	/* Don't leak memory... */
745	vring_del_virtqueue(vq);
746	free(__user_addr_min);
747
748	return 0;
749}
750