1/*
2 * Copyright (C) 2013 - Virtual Open Systems
3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/device.h>
16#include <linux/iommu.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/uaccess.h>
22#include <linux/vfio.h>
23
24#include "vfio_platform_private.h"
25
26#define DRIVER_VERSION  "0.10"
27#define DRIVER_AUTHOR   "Antonios Motakis <a.motakis@virtualopensystems.com>"
28#define DRIVER_DESC     "VFIO platform base module"
29
30static LIST_HEAD(reset_list);
31static DEFINE_MUTEX(driver_lock);
32
33static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
34					struct module **module)
35{
36	struct vfio_platform_reset_node *iter;
37	vfio_platform_reset_fn_t reset_fn = NULL;
38
39	mutex_lock(&driver_lock);
40	list_for_each_entry(iter, &reset_list, link) {
41		if (!strcmp(iter->compat, compat) &&
42			try_module_get(iter->owner)) {
43			*module = iter->owner;
44			reset_fn = iter->reset;
45			break;
46		}
47	}
48	mutex_unlock(&driver_lock);
49	return reset_fn;
50}
51
52static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
53{
54	vdev->reset = vfio_platform_lookup_reset(vdev->compat,
55						&vdev->reset_module);
56	if (!vdev->reset) {
57		request_module("vfio-reset:%s", vdev->compat);
58		vdev->reset = vfio_platform_lookup_reset(vdev->compat,
59							 &vdev->reset_module);
60	}
61}
62
63static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
64{
65	if (vdev->reset)
66		module_put(vdev->reset_module);
67}
68
69static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
70{
71	int cnt = 0, i;
72
73	while (vdev->get_resource(vdev, cnt))
74		cnt++;
75
76	vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
77				GFP_KERNEL);
78	if (!vdev->regions)
79		return -ENOMEM;
80
81	for (i = 0; i < cnt;  i++) {
82		struct resource *res =
83			vdev->get_resource(vdev, i);
84
85		if (!res)
86			goto err;
87
88		vdev->regions[i].addr = res->start;
89		vdev->regions[i].size = resource_size(res);
90		vdev->regions[i].flags = 0;
91
92		switch (resource_type(res)) {
93		case IORESOURCE_MEM:
94			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
95			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
96			if (!(res->flags & IORESOURCE_READONLY))
97				vdev->regions[i].flags |=
98					VFIO_REGION_INFO_FLAG_WRITE;
99
100			/*
101			 * Only regions addressed with PAGE granularity may be
102			 * MMAPed securely.
103			 */
104			if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
105					!(vdev->regions[i].size & ~PAGE_MASK))
106				vdev->regions[i].flags |=
107					VFIO_REGION_INFO_FLAG_MMAP;
108
109			break;
110		case IORESOURCE_IO:
111			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
112			break;
113		default:
114			goto err;
115		}
116	}
117
118	vdev->num_regions = cnt;
119
120	return 0;
121err:
122	kfree(vdev->regions);
123	return -EINVAL;
124}
125
126static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
127{
128	int i;
129
130	for (i = 0; i < vdev->num_regions; i++)
131		iounmap(vdev->regions[i].ioaddr);
132
133	vdev->num_regions = 0;
134	kfree(vdev->regions);
135}
136
137static void vfio_platform_release(void *device_data)
138{
139	struct vfio_platform_device *vdev = device_data;
140
141	mutex_lock(&driver_lock);
142
143	if (!(--vdev->refcnt)) {
144		if (vdev->reset) {
145			dev_info(vdev->device, "reset\n");
146			vdev->reset(vdev);
147		} else {
148			dev_warn(vdev->device, "no reset function found!\n");
149		}
150		vfio_platform_regions_cleanup(vdev);
151		vfio_platform_irq_cleanup(vdev);
152	}
153
154	mutex_unlock(&driver_lock);
155
156	module_put(vdev->parent_module);
157}
158
159static int vfio_platform_open(void *device_data)
160{
161	struct vfio_platform_device *vdev = device_data;
162	int ret;
163
164	if (!try_module_get(vdev->parent_module))
165		return -ENODEV;
166
167	mutex_lock(&driver_lock);
168
169	if (!vdev->refcnt) {
170		ret = vfio_platform_regions_init(vdev);
171		if (ret)
172			goto err_reg;
173
174		ret = vfio_platform_irq_init(vdev);
175		if (ret)
176			goto err_irq;
177
178		if (vdev->reset) {
179			dev_info(vdev->device, "reset\n");
180			vdev->reset(vdev);
181		} else {
182			dev_warn(vdev->device, "no reset function found!\n");
183		}
184	}
185
186	vdev->refcnt++;
187
188	mutex_unlock(&driver_lock);
189	return 0;
190
191err_irq:
192	vfio_platform_regions_cleanup(vdev);
193err_reg:
194	mutex_unlock(&driver_lock);
195	module_put(THIS_MODULE);
196	return ret;
197}
198
199static long vfio_platform_ioctl(void *device_data,
200				unsigned int cmd, unsigned long arg)
201{
202	struct vfio_platform_device *vdev = device_data;
203	unsigned long minsz;
204
205	if (cmd == VFIO_DEVICE_GET_INFO) {
206		struct vfio_device_info info;
207
208		minsz = offsetofend(struct vfio_device_info, num_irqs);
209
210		if (copy_from_user(&info, (void __user *)arg, minsz))
211			return -EFAULT;
212
213		if (info.argsz < minsz)
214			return -EINVAL;
215
216		if (vdev->reset)
217			vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
218		info.flags = vdev->flags;
219		info.num_regions = vdev->num_regions;
220		info.num_irqs = vdev->num_irqs;
221
222		return copy_to_user((void __user *)arg, &info, minsz) ?
223			-EFAULT : 0;
224
225	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
226		struct vfio_region_info info;
227
228		minsz = offsetofend(struct vfio_region_info, offset);
229
230		if (copy_from_user(&info, (void __user *)arg, minsz))
231			return -EFAULT;
232
233		if (info.argsz < minsz)
234			return -EINVAL;
235
236		if (info.index >= vdev->num_regions)
237			return -EINVAL;
238
239		/* map offset to the physical address  */
240		info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
241		info.size = vdev->regions[info.index].size;
242		info.flags = vdev->regions[info.index].flags;
243
244		return copy_to_user((void __user *)arg, &info, minsz) ?
245			-EFAULT : 0;
246
247	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
248		struct vfio_irq_info info;
249
250		minsz = offsetofend(struct vfio_irq_info, count);
251
252		if (copy_from_user(&info, (void __user *)arg, minsz))
253			return -EFAULT;
254
255		if (info.argsz < minsz)
256			return -EINVAL;
257
258		if (info.index >= vdev->num_irqs)
259			return -EINVAL;
260
261		info.flags = vdev->irqs[info.index].flags;
262		info.count = vdev->irqs[info.index].count;
263
264		return copy_to_user((void __user *)arg, &info, minsz) ?
265			-EFAULT : 0;
266
267	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
268		struct vfio_irq_set hdr;
269		u8 *data = NULL;
270		int ret = 0;
271
272		minsz = offsetofend(struct vfio_irq_set, count);
273
274		if (copy_from_user(&hdr, (void __user *)arg, minsz))
275			return -EFAULT;
276
277		if (hdr.argsz < minsz)
278			return -EINVAL;
279
280		if (hdr.index >= vdev->num_irqs)
281			return -EINVAL;
282
283		if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
284				  VFIO_IRQ_SET_ACTION_TYPE_MASK))
285			return -EINVAL;
286
287		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
288			size_t size;
289
290			if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
291				size = sizeof(uint8_t);
292			else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
293				size = sizeof(int32_t);
294			else
295				return -EINVAL;
296
297			if (hdr.argsz - minsz < size)
298				return -EINVAL;
299
300			data = memdup_user((void __user *)(arg + minsz), size);
301			if (IS_ERR(data))
302				return PTR_ERR(data);
303		}
304
305		mutex_lock(&vdev->igate);
306
307		ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
308						   hdr.start, hdr.count, data);
309		mutex_unlock(&vdev->igate);
310		kfree(data);
311
312		return ret;
313
314	} else if (cmd == VFIO_DEVICE_RESET) {
315		if (vdev->reset)
316			return vdev->reset(vdev);
317		else
318			return -EINVAL;
319	}
320
321	return -ENOTTY;
322}
323
324static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
325				       char __user *buf, size_t count,
326				       loff_t off)
327{
328	unsigned int done = 0;
329
330	if (!reg->ioaddr) {
331		reg->ioaddr =
332			ioremap_nocache(reg->addr, reg->size);
333
334		if (!reg->ioaddr)
335			return -ENOMEM;
336	}
337
338	while (count) {
339		size_t filled;
340
341		if (count >= 4 && !(off % 4)) {
342			u32 val;
343
344			val = ioread32(reg->ioaddr + off);
345			if (copy_to_user(buf, &val, 4))
346				goto err;
347
348			filled = 4;
349		} else if (count >= 2 && !(off % 2)) {
350			u16 val;
351
352			val = ioread16(reg->ioaddr + off);
353			if (copy_to_user(buf, &val, 2))
354				goto err;
355
356			filled = 2;
357		} else {
358			u8 val;
359
360			val = ioread8(reg->ioaddr + off);
361			if (copy_to_user(buf, &val, 1))
362				goto err;
363
364			filled = 1;
365		}
366
367
368		count -= filled;
369		done += filled;
370		off += filled;
371		buf += filled;
372	}
373
374	return done;
375err:
376	return -EFAULT;
377}
378
379static ssize_t vfio_platform_read(void *device_data, char __user *buf,
380				  size_t count, loff_t *ppos)
381{
382	struct vfio_platform_device *vdev = device_data;
383	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
384	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
385
386	if (index >= vdev->num_regions)
387		return -EINVAL;
388
389	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
390		return -EINVAL;
391
392	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
393		return vfio_platform_read_mmio(&vdev->regions[index],
394							buf, count, off);
395	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
396		return -EINVAL; /* not implemented */
397
398	return -EINVAL;
399}
400
401static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
402					const char __user *buf, size_t count,
403					loff_t off)
404{
405	unsigned int done = 0;
406
407	if (!reg->ioaddr) {
408		reg->ioaddr =
409			ioremap_nocache(reg->addr, reg->size);
410
411		if (!reg->ioaddr)
412			return -ENOMEM;
413	}
414
415	while (count) {
416		size_t filled;
417
418		if (count >= 4 && !(off % 4)) {
419			u32 val;
420
421			if (copy_from_user(&val, buf, 4))
422				goto err;
423			iowrite32(val, reg->ioaddr + off);
424
425			filled = 4;
426		} else if (count >= 2 && !(off % 2)) {
427			u16 val;
428
429			if (copy_from_user(&val, buf, 2))
430				goto err;
431			iowrite16(val, reg->ioaddr + off);
432
433			filled = 2;
434		} else {
435			u8 val;
436
437			if (copy_from_user(&val, buf, 1))
438				goto err;
439			iowrite8(val, reg->ioaddr + off);
440
441			filled = 1;
442		}
443
444		count -= filled;
445		done += filled;
446		off += filled;
447		buf += filled;
448	}
449
450	return done;
451err:
452	return -EFAULT;
453}
454
455static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
456				   size_t count, loff_t *ppos)
457{
458	struct vfio_platform_device *vdev = device_data;
459	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
460	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
461
462	if (index >= vdev->num_regions)
463		return -EINVAL;
464
465	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
466		return -EINVAL;
467
468	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
469		return vfio_platform_write_mmio(&vdev->regions[index],
470							buf, count, off);
471	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
472		return -EINVAL; /* not implemented */
473
474	return -EINVAL;
475}
476
477static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
478				   struct vm_area_struct *vma)
479{
480	u64 req_len, pgoff, req_start;
481
482	req_len = vma->vm_end - vma->vm_start;
483	pgoff = vma->vm_pgoff &
484		((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
485	req_start = pgoff << PAGE_SHIFT;
486
487	if (region.size < PAGE_SIZE || req_start + req_len > region.size)
488		return -EINVAL;
489
490	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
491	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
492
493	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
494			       req_len, vma->vm_page_prot);
495}
496
497static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
498{
499	struct vfio_platform_device *vdev = device_data;
500	unsigned int index;
501
502	index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
503
504	if (vma->vm_end < vma->vm_start)
505		return -EINVAL;
506	if (!(vma->vm_flags & VM_SHARED))
507		return -EINVAL;
508	if (index >= vdev->num_regions)
509		return -EINVAL;
510	if (vma->vm_start & ~PAGE_MASK)
511		return -EINVAL;
512	if (vma->vm_end & ~PAGE_MASK)
513		return -EINVAL;
514
515	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
516		return -EINVAL;
517
518	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
519			&& (vma->vm_flags & VM_READ))
520		return -EINVAL;
521
522	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
523			&& (vma->vm_flags & VM_WRITE))
524		return -EINVAL;
525
526	vma->vm_private_data = vdev;
527
528	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
529		return vfio_platform_mmap_mmio(vdev->regions[index], vma);
530
531	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
532		return -EINVAL; /* not implemented */
533
534	return -EINVAL;
535}
536
537static const struct vfio_device_ops vfio_platform_ops = {
538	.name		= "vfio-platform",
539	.open		= vfio_platform_open,
540	.release	= vfio_platform_release,
541	.ioctl		= vfio_platform_ioctl,
542	.read		= vfio_platform_read,
543	.write		= vfio_platform_write,
544	.mmap		= vfio_platform_mmap,
545};
546
547int vfio_platform_probe_common(struct vfio_platform_device *vdev,
548			       struct device *dev)
549{
550	struct iommu_group *group;
551	int ret;
552
553	if (!vdev)
554		return -EINVAL;
555
556	ret = device_property_read_string(dev, "compatible", &vdev->compat);
557	if (ret) {
558		pr_err("VFIO: cannot retrieve compat for %s\n", vdev->name);
559		return -EINVAL;
560	}
561
562	vdev->device = dev;
563
564	group = iommu_group_get(dev);
565	if (!group) {
566		pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
567		return -EINVAL;
568	}
569
570	ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
571	if (ret) {
572		iommu_group_put(group);
573		return ret;
574	}
575
576	vfio_platform_get_reset(vdev);
577
578	mutex_init(&vdev->igate);
579
580	return 0;
581}
582EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
583
584struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
585{
586	struct vfio_platform_device *vdev;
587
588	vdev = vfio_del_group_dev(dev);
589
590	if (vdev) {
591		vfio_platform_put_reset(vdev);
592		iommu_group_put(dev->iommu_group);
593	}
594
595	return vdev;
596}
597EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
598
599void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
600{
601	mutex_lock(&driver_lock);
602	list_add(&node->link, &reset_list);
603	mutex_unlock(&driver_lock);
604}
605EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
606
607void vfio_platform_unregister_reset(const char *compat,
608				    vfio_platform_reset_fn_t fn)
609{
610	struct vfio_platform_reset_node *iter, *temp;
611
612	mutex_lock(&driver_lock);
613	list_for_each_entry_safe(iter, temp, &reset_list, link) {
614		if (!strcmp(iter->compat, compat) && (iter->reset == fn)) {
615			list_del(&iter->link);
616			break;
617		}
618	}
619
620	mutex_unlock(&driver_lock);
621
622}
623EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
624
625MODULE_VERSION(DRIVER_VERSION);
626MODULE_LICENSE("GPL v2");
627MODULE_AUTHOR(DRIVER_AUTHOR);
628MODULE_DESCRIPTION(DRIVER_DESC);
629