1/*
2 * Copyright (C) 2011 Google, Inc.
3 * Copyright (C) 2012 Intel, Inc.
4 * Copyright (C) 2013 Intel, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This source file contains the implementation of a special device driver
18 * that intends to provide a *very* fast communication channel between the
19 * guest system and the QEMU emulator.
20 *
21 * Usage from the guest is simply the following (error handling simplified):
22 *
23 *    int  fd = open("/dev/qemu_pipe",O_RDWR);
24 *    .... write() or read() through the pipe.
25 *
26 * This driver doesn't deal with the exact protocol used during the session.
27 * It is intended to be as simple as something like:
28 *
29 *    // do this _just_ after opening the fd to connect to a specific
30 *    // emulator service.
31 *    const char*  msg = "<pipename>";
32 *    if (write(fd, msg, strlen(msg)+1) < 0) {
33 *       ... could not connect to <pipename> service
34 *       close(fd);
35 *    }
36 *
37 *    // after this, simply read() and write() to communicate with the
38 *    // service. Exact protocol details left as an exercise to the reader.
39 *
40 * This driver is very fast because it doesn't copy any data through
41 * intermediate buffers, since the emulator is capable of translating
42 * guest user addresses into host ones.
43 *
44 * Note that we must however ensure that each user page involved in the
45 * exchange is properly mapped during a transfer.
46 */
47
48#include <linux/module.h>
49#include <linux/interrupt.h>
50#include <linux/kernel.h>
51#include <linux/spinlock.h>
52#include <linux/miscdevice.h>
53#include <linux/platform_device.h>
54#include <linux/poll.h>
55#include <linux/sched.h>
56#include <linux/bitops.h>
57#include <linux/slab.h>
58#include <linux/io.h>
59#include <linux/goldfish.h>
60
61/*
62 * IMPORTANT: The following constants must match the ones used and defined
63 * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
64 */
65
66/* pipe device registers */
67#define PIPE_REG_COMMAND		0x00  /* write: value = command */
68#define PIPE_REG_STATUS			0x04  /* read */
69#define PIPE_REG_CHANNEL		0x08  /* read/write: channel id */
70#define PIPE_REG_CHANNEL_HIGH	        0x30  /* read/write: channel id */
71#define PIPE_REG_SIZE			0x0c  /* read/write: buffer size */
72#define PIPE_REG_ADDRESS		0x10  /* write: physical address */
73#define PIPE_REG_ADDRESS_HIGH	        0x34  /* write: physical address */
74#define PIPE_REG_WAKES			0x14  /* read: wake flags */
75#define PIPE_REG_PARAMS_ADDR_LOW	0x18  /* read/write: batch data address */
76#define PIPE_REG_PARAMS_ADDR_HIGH	0x1c  /* read/write: batch data address */
77#define PIPE_REG_ACCESS_PARAMS		0x20  /* write: batch access */
78
79/* list of commands for PIPE_REG_COMMAND */
80#define CMD_OPEN			1  /* open new channel */
81#define CMD_CLOSE			2  /* close channel (from guest) */
82#define CMD_POLL			3  /* poll read/write status */
83
84/* List of bitflags returned in status of CMD_POLL command */
85#define PIPE_POLL_IN			(1 << 0)
86#define PIPE_POLL_OUT			(1 << 1)
87#define PIPE_POLL_HUP			(1 << 2)
88
89/* The following commands are related to write operations */
90#define CMD_WRITE_BUFFER	4  /* send a user buffer to the emulator */
91#define CMD_WAKE_ON_WRITE	5  /* tell the emulator to wake us when writing
92				     is possible */
93
94/* The following commands are related to read operations, they must be
95 * listed in the same order than the corresponding write ones, since we
96 * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
97 * in goldfish_pipe_read_write() below.
98 */
99#define CMD_READ_BUFFER        6  /* receive a user buffer from the emulator */
100#define CMD_WAKE_ON_READ       7  /* tell the emulator to wake us when reading
101				   * is possible */
102
103/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
104#define PIPE_ERROR_INVAL       -1
105#define PIPE_ERROR_AGAIN       -2
106#define PIPE_ERROR_NOMEM       -3
107#define PIPE_ERROR_IO          -4
108
109/* Bit-flags used to signal events from the emulator */
110#define PIPE_WAKE_CLOSED       (1 << 0)  /* emulator closed pipe */
111#define PIPE_WAKE_READ         (1 << 1)  /* pipe can now be read from */
112#define PIPE_WAKE_WRITE        (1 << 2)  /* pipe can now be written to */
113
114struct access_params {
115	unsigned long channel;
116	u32 size;
117	unsigned long address;
118	u32 cmd;
119	u32 result;
120	/* reserved for future extension */
121	u32 flags;
122};
123
124/* The global driver data. Holds a reference to the i/o page used to
125 * communicate with the emulator, and a wake queue for blocked tasks
126 * waiting to be awoken.
127 */
128struct goldfish_pipe_dev {
129	spinlock_t lock;
130	unsigned char __iomem *base;
131	struct access_params *aps;
132	int irq;
133};
134
135static struct goldfish_pipe_dev   pipe_dev[1];
136
137/* This data type models a given pipe instance */
138struct goldfish_pipe {
139	struct goldfish_pipe_dev *dev;
140	struct mutex lock;
141	unsigned long flags;
142	wait_queue_head_t wake_queue;
143};
144
145
146/* Bit flags for the 'flags' field */
147enum {
148	BIT_CLOSED_ON_HOST = 0,  /* pipe closed by host */
149	BIT_WAKE_ON_WRITE  = 1,  /* want to be woken on writes */
150	BIT_WAKE_ON_READ   = 2,  /* want to be woken on reads */
151};
152
153
154static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
155{
156	unsigned long flags;
157	u32 status;
158	struct goldfish_pipe_dev *dev = pipe->dev;
159
160	spin_lock_irqsave(&dev->lock, flags);
161	gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
162		     dev->base + PIPE_REG_CHANNEL_HIGH);
163	writel(cmd, dev->base + PIPE_REG_COMMAND);
164	status = readl(dev->base + PIPE_REG_STATUS);
165	spin_unlock_irqrestore(&dev->lock, flags);
166	return status;
167}
168
169static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
170{
171	unsigned long flags;
172	struct goldfish_pipe_dev *dev = pipe->dev;
173
174	spin_lock_irqsave(&dev->lock, flags);
175	gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
176		     dev->base + PIPE_REG_CHANNEL_HIGH);
177	writel(cmd, dev->base + PIPE_REG_COMMAND);
178	spin_unlock_irqrestore(&dev->lock, flags);
179}
180
181/* This function converts an error code returned by the emulator through
182 * the PIPE_REG_STATUS i/o register into a valid negative errno value.
183 */
184static int goldfish_pipe_error_convert(int status)
185{
186	switch (status) {
187	case PIPE_ERROR_AGAIN:
188		return -EAGAIN;
189	case PIPE_ERROR_NOMEM:
190		return -ENOMEM;
191	case PIPE_ERROR_IO:
192		return -EIO;
193	default:
194		return -EINVAL;
195	}
196}
197
198/*
199 * Notice: QEMU will return 0 for un-known register access, indicating
200 * param_acess is supported or not
201 */
202static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
203				  struct access_params *aps)
204{
205	u32 aph, apl;
206	u64 paddr;
207	aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
208	apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
209
210	paddr = ((u64)aph << 32) | apl;
211	if (paddr != (__pa(aps)))
212		return 0;
213	return 1;
214}
215
216/* 0 on success */
217static int setup_access_params_addr(struct platform_device *pdev,
218					struct goldfish_pipe_dev *dev)
219{
220	u64 paddr;
221	struct access_params *aps;
222
223	aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
224	if (!aps)
225		return -1;
226
227	/* FIXME */
228	paddr = __pa(aps);
229	writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
230	writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
231
232	if (valid_batchbuffer_addr(dev, aps)) {
233		dev->aps = aps;
234		return 0;
235	} else
236		return -1;
237}
238
239/* A value that will not be set by qemu emulator */
240#define INITIAL_BATCH_RESULT (0xdeadbeaf)
241static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
242				unsigned long address, unsigned long avail,
243				struct goldfish_pipe *pipe, int *status)
244{
245	struct access_params *aps = dev->aps;
246
247	if (aps == NULL)
248		return -1;
249
250	aps->result = INITIAL_BATCH_RESULT;
251	aps->channel = (unsigned long)pipe;
252	aps->size = avail;
253	aps->address = address;
254	aps->cmd = cmd;
255	writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
256	/*
257	 * If the aps->result has not changed, that means
258	 * that the batch command failed
259	 */
260	if (aps->result == INITIAL_BATCH_RESULT)
261		return -1;
262	*status = aps->result;
263	return 0;
264}
265
266/* This function is used for both reading from and writing to a given
267 * pipe.
268 */
269static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
270				    size_t bufflen, int is_write)
271{
272	unsigned long irq_flags;
273	struct goldfish_pipe *pipe = filp->private_data;
274	struct goldfish_pipe_dev *dev = pipe->dev;
275	const int cmd_offset = is_write ? 0
276					: (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
277	unsigned long address, address_end;
278	int ret = 0;
279
280	/* If the emulator already closed the pipe, no need to go further */
281	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
282		return -EIO;
283
284	/* Null reads or writes succeeds */
285	if (unlikely(bufflen == 0))
286		return 0;
287
288	/* Check the buffer range for access */
289	if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
290			buffer, bufflen))
291		return -EFAULT;
292
293	/* Serialize access to the pipe */
294	if (mutex_lock_interruptible(&pipe->lock))
295		return -ERESTARTSYS;
296
297	address = (unsigned long)(void *)buffer;
298	address_end = address + bufflen;
299
300	while (address < address_end) {
301		unsigned long  page_end = (address & PAGE_MASK) + PAGE_SIZE;
302		unsigned long  next     = page_end < address_end ? page_end
303								 : address_end;
304		unsigned long  avail    = next - address;
305		int status, wakeBit;
306
307		/* Ensure that the corresponding page is properly mapped */
308		/* FIXME: this isn't safe or sufficient - use get_user_pages */
309		if (is_write) {
310			char c;
311			/* Ensure that the page is mapped and readable */
312			if (__get_user(c, (char __user *)address)) {
313				if (!ret)
314					ret = -EFAULT;
315				break;
316			}
317		} else {
318			/* Ensure that the page is mapped and writable */
319			if (__put_user(0, (char __user *)address)) {
320				if (!ret)
321					ret = -EFAULT;
322				break;
323			}
324		}
325
326		/* Now, try to transfer the bytes in the current page */
327		spin_lock_irqsave(&dev->lock, irq_flags);
328		if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
329				address, avail, pipe, &status)) {
330			gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
331				     dev->base + PIPE_REG_CHANNEL_HIGH);
332			writel(avail, dev->base + PIPE_REG_SIZE);
333			gf_write_ptr((void *)address,
334				     dev->base + PIPE_REG_ADDRESS,
335				     dev->base + PIPE_REG_ADDRESS_HIGH);
336			writel(CMD_WRITE_BUFFER + cmd_offset,
337					dev->base + PIPE_REG_COMMAND);
338			status = readl(dev->base + PIPE_REG_STATUS);
339		}
340		spin_unlock_irqrestore(&dev->lock, irq_flags);
341
342		if (status > 0) { /* Correct transfer */
343			ret += status;
344			address += status;
345			continue;
346		}
347
348		if (status == 0)  /* EOF */
349			break;
350
351		/* An error occured. If we already transfered stuff, just
352		* return with its count. We expect the next call to return
353		* an error code */
354		if (ret > 0)
355			break;
356
357		/* If the error is not PIPE_ERROR_AGAIN, or if we are not in
358		* non-blocking mode, just return the error code.
359		*/
360		if (status != PIPE_ERROR_AGAIN ||
361			(filp->f_flags & O_NONBLOCK) != 0) {
362			ret = goldfish_pipe_error_convert(status);
363			break;
364		}
365
366		/* We will have to wait until more data/space is available.
367		* First, mark the pipe as waiting for a specific wake signal.
368		*/
369		wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
370		set_bit(wakeBit, &pipe->flags);
371
372		/* Tell the emulator we're going to wait for a wake event */
373		goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
374
375		/* Unlock the pipe, then wait for the wake signal */
376		mutex_unlock(&pipe->lock);
377
378		while (test_bit(wakeBit, &pipe->flags)) {
379			if (wait_event_interruptible(
380					pipe->wake_queue,
381					!test_bit(wakeBit, &pipe->flags)))
382				return -ERESTARTSYS;
383
384			if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
385				return -EIO;
386		}
387
388		/* Try to re-acquire the lock */
389		if (mutex_lock_interruptible(&pipe->lock))
390			return -ERESTARTSYS;
391
392		/* Try the transfer again */
393		continue;
394	}
395	mutex_unlock(&pipe->lock);
396	return ret;
397}
398
399static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
400			      size_t bufflen, loff_t *ppos)
401{
402	return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
403}
404
405static ssize_t goldfish_pipe_write(struct file *filp,
406				const char __user *buffer, size_t bufflen,
407				loff_t *ppos)
408{
409	return goldfish_pipe_read_write(filp, (char __user *)buffer,
410								bufflen, 1);
411}
412
413
414static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
415{
416	struct goldfish_pipe *pipe = filp->private_data;
417	unsigned int mask = 0;
418	int status;
419
420	mutex_lock(&pipe->lock);
421
422	poll_wait(filp, &pipe->wake_queue, wait);
423
424	status = goldfish_cmd_status(pipe, CMD_POLL);
425
426	mutex_unlock(&pipe->lock);
427
428	if (status & PIPE_POLL_IN)
429		mask |= POLLIN | POLLRDNORM;
430
431	if (status & PIPE_POLL_OUT)
432		mask |= POLLOUT | POLLWRNORM;
433
434	if (status & PIPE_POLL_HUP)
435		mask |= POLLHUP;
436
437	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
438		mask |= POLLERR;
439
440	return mask;
441}
442
443static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
444{
445	struct goldfish_pipe_dev *dev = dev_id;
446	unsigned long irq_flags;
447	int count = 0;
448
449	/* We're going to read from the emulator a list of (channel,flags)
450	* pairs corresponding to the wake events that occured on each
451	* blocked pipe (i.e. channel).
452	*/
453	spin_lock_irqsave(&dev->lock, irq_flags);
454	for (;;) {
455		/* First read the channel, 0 means the end of the list */
456		struct goldfish_pipe *pipe;
457		unsigned long wakes;
458		unsigned long channel = 0;
459
460#ifdef CONFIG_64BIT
461		channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
462
463		if (channel == 0)
464			break;
465#endif
466		channel |= readl(dev->base + PIPE_REG_CHANNEL);
467
468		if (channel == 0)
469			break;
470
471		/* Convert channel to struct pipe pointer + read wake flags */
472		wakes = readl(dev->base + PIPE_REG_WAKES);
473		pipe  = (struct goldfish_pipe *)(ptrdiff_t)channel;
474
475		/* Did the emulator just closed a pipe? */
476		if (wakes & PIPE_WAKE_CLOSED) {
477			set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
478			wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
479		}
480		if (wakes & PIPE_WAKE_READ)
481			clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
482		if (wakes & PIPE_WAKE_WRITE)
483			clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
484
485		wake_up_interruptible(&pipe->wake_queue);
486		count++;
487	}
488	spin_unlock_irqrestore(&dev->lock, irq_flags);
489
490	return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
491}
492
493/**
494 *	goldfish_pipe_open	-	open a channel to the AVD
495 *	@inode: inode of device
496 *	@file: file struct of opener
497 *
498 *	Create a new pipe link between the emulator and the use application.
499 *	Each new request produces a new pipe.
500 *
501 *	Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
502 *	right now so this is fine. A move to 64bit will need this addressing
503 */
504static int goldfish_pipe_open(struct inode *inode, struct file *file)
505{
506	struct goldfish_pipe *pipe;
507	struct goldfish_pipe_dev *dev = pipe_dev;
508	int32_t status;
509
510	/* Allocate new pipe kernel object */
511	pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
512	if (pipe == NULL)
513		return -ENOMEM;
514
515	pipe->dev = dev;
516	mutex_init(&pipe->lock);
517	init_waitqueue_head(&pipe->wake_queue);
518
519	/*
520	 * Now, tell the emulator we're opening a new pipe. We use the
521	 * pipe object's address as the channel identifier for simplicity.
522	 */
523
524	status = goldfish_cmd_status(pipe, CMD_OPEN);
525	if (status < 0) {
526		kfree(pipe);
527		return status;
528	}
529
530	/* All is done, save the pipe into the file's private data field */
531	file->private_data = pipe;
532	return 0;
533}
534
535static int goldfish_pipe_release(struct inode *inode, struct file *filp)
536{
537	struct goldfish_pipe *pipe = filp->private_data;
538
539	/* The guest is closing the channel, so tell the emulator right now */
540	goldfish_cmd(pipe, CMD_CLOSE);
541	kfree(pipe);
542	filp->private_data = NULL;
543	return 0;
544}
545
546static const struct file_operations goldfish_pipe_fops = {
547	.owner = THIS_MODULE,
548	.read = goldfish_pipe_read,
549	.write = goldfish_pipe_write,
550	.poll = goldfish_pipe_poll,
551	.open = goldfish_pipe_open,
552	.release = goldfish_pipe_release,
553};
554
555static struct miscdevice goldfish_pipe_device = {
556	.minor = MISC_DYNAMIC_MINOR,
557	.name = "goldfish_pipe",
558	.fops = &goldfish_pipe_fops,
559};
560
561static int goldfish_pipe_probe(struct platform_device *pdev)
562{
563	int err;
564	struct resource *r;
565	struct goldfish_pipe_dev *dev = pipe_dev;
566
567	/* not thread safe, but this should not happen */
568	WARN_ON(dev->base != NULL);
569
570	spin_lock_init(&dev->lock);
571
572	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
573	if (r == NULL || resource_size(r) < PAGE_SIZE) {
574		dev_err(&pdev->dev, "can't allocate i/o page\n");
575		return -EINVAL;
576	}
577	dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
578	if (dev->base == NULL) {
579		dev_err(&pdev->dev, "ioremap failed\n");
580		return -EINVAL;
581	}
582
583	r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
584	if (r == NULL) {
585		err = -EINVAL;
586		goto error;
587	}
588	dev->irq = r->start;
589
590	err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
591				IRQF_SHARED, "goldfish_pipe", dev);
592	if (err) {
593		dev_err(&pdev->dev, "unable to allocate IRQ\n");
594		goto error;
595	}
596
597	err = misc_register(&goldfish_pipe_device);
598	if (err) {
599		dev_err(&pdev->dev, "unable to register device\n");
600		goto error;
601	}
602	setup_access_params_addr(pdev, dev);
603	return 0;
604
605error:
606	dev->base = NULL;
607	return err;
608}
609
610static int goldfish_pipe_remove(struct platform_device *pdev)
611{
612	struct goldfish_pipe_dev *dev = pipe_dev;
613	misc_deregister(&goldfish_pipe_device);
614	dev->base = NULL;
615	return 0;
616}
617
618static struct platform_driver goldfish_pipe = {
619	.probe = goldfish_pipe_probe,
620	.remove = goldfish_pipe_remove,
621	.driver = {
622		.name = "goldfish_pipe"
623	}
624};
625
626module_platform_driver(goldfish_pipe);
627MODULE_AUTHOR("David Turner <digit@google.com>");
628MODULE_LICENSE("GPL");
629