This source file includes following definitions.
- vm_get_features
- vm_finalize_features
- vm_get
- vm_set
- vm_generation
- vm_get_status
- vm_set_status
- vm_reset
- vm_notify
- vm_interrupt
- vm_del_vq
- vm_del_vqs
- vm_setup_vq
- vm_find_vqs
- vm_bus_name
- virtio_mmio_release_dev
- virtio_mmio_probe
- virtio_mmio_remove
- vm_cmdline_set
- vm_cmdline_get_device
- vm_cmdline_get
- vm_unregister_cmdline_device
- vm_unregister_cmdline_devices
- vm_unregister_cmdline_devices
- virtio_mmio_init
- virtio_mmio_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55 #define pr_fmt(fmt) "virtio-mmio: " fmt
56
57 #include <linux/acpi.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/highmem.h>
60 #include <linux/interrupt.h>
61 #include <linux/io.h>
62 #include <linux/list.h>
63 #include <linux/module.h>
64 #include <linux/platform_device.h>
65 #include <linux/slab.h>
66 #include <linux/spinlock.h>
67 #include <linux/virtio.h>
68 #include <linux/virtio_config.h>
69 #include <uapi/linux/virtio_mmio.h>
70 #include <linux/virtio_ring.h>
71
72
73
74
75
76 #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
77
78
79
80 #define to_virtio_mmio_device(_plat_dev) \
81 container_of(_plat_dev, struct virtio_mmio_device, vdev)
82
83 struct virtio_mmio_device {
84 struct virtio_device vdev;
85 struct platform_device *pdev;
86
87 void __iomem *base;
88 unsigned long version;
89
90
91 spinlock_t lock;
92 struct list_head virtqueues;
93 };
94
95 struct virtio_mmio_vq_info {
96
97 struct virtqueue *vq;
98
99
100 struct list_head node;
101 };
102
103
104
105
106
107 static u64 vm_get_features(struct virtio_device *vdev)
108 {
109 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
110 u64 features;
111
112 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
113 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
114 features <<= 32;
115
116 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
117 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
118
119 return features;
120 }
121
122 static int vm_finalize_features(struct virtio_device *vdev)
123 {
124 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
125
126
127 vring_transport_features(vdev);
128
129
130 if (vm_dev->version == 2 &&
131 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
132 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
133 return -EINVAL;
134 }
135
136 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
137 writel((u32)(vdev->features >> 32),
138 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
139
140 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
141 writel((u32)vdev->features,
142 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
143
144 return 0;
145 }
146
147 static void vm_get(struct virtio_device *vdev, unsigned offset,
148 void *buf, unsigned len)
149 {
150 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
151 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
152 u8 b;
153 __le16 w;
154 __le32 l;
155
156 if (vm_dev->version == 1) {
157 u8 *ptr = buf;
158 int i;
159
160 for (i = 0; i < len; i++)
161 ptr[i] = readb(base + offset + i);
162 return;
163 }
164
165 switch (len) {
166 case 1:
167 b = readb(base + offset);
168 memcpy(buf, &b, sizeof b);
169 break;
170 case 2:
171 w = cpu_to_le16(readw(base + offset));
172 memcpy(buf, &w, sizeof w);
173 break;
174 case 4:
175 l = cpu_to_le32(readl(base + offset));
176 memcpy(buf, &l, sizeof l);
177 break;
178 case 8:
179 l = cpu_to_le32(readl(base + offset));
180 memcpy(buf, &l, sizeof l);
181 l = cpu_to_le32(ioread32(base + offset + sizeof l));
182 memcpy(buf + sizeof l, &l, sizeof l);
183 break;
184 default:
185 BUG();
186 }
187 }
188
189 static void vm_set(struct virtio_device *vdev, unsigned offset,
190 const void *buf, unsigned len)
191 {
192 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
193 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
194 u8 b;
195 __le16 w;
196 __le32 l;
197
198 if (vm_dev->version == 1) {
199 const u8 *ptr = buf;
200 int i;
201
202 for (i = 0; i < len; i++)
203 writeb(ptr[i], base + offset + i);
204
205 return;
206 }
207
208 switch (len) {
209 case 1:
210 memcpy(&b, buf, sizeof b);
211 writeb(b, base + offset);
212 break;
213 case 2:
214 memcpy(&w, buf, sizeof w);
215 writew(le16_to_cpu(w), base + offset);
216 break;
217 case 4:
218 memcpy(&l, buf, sizeof l);
219 writel(le32_to_cpu(l), base + offset);
220 break;
221 case 8:
222 memcpy(&l, buf, sizeof l);
223 writel(le32_to_cpu(l), base + offset);
224 memcpy(&l, buf + sizeof l, sizeof l);
225 writel(le32_to_cpu(l), base + offset + sizeof l);
226 break;
227 default:
228 BUG();
229 }
230 }
231
232 static u32 vm_generation(struct virtio_device *vdev)
233 {
234 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
235
236 if (vm_dev->version == 1)
237 return 0;
238 else
239 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
240 }
241
242 static u8 vm_get_status(struct virtio_device *vdev)
243 {
244 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
245
246 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
247 }
248
249 static void vm_set_status(struct virtio_device *vdev, u8 status)
250 {
251 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
252
253
254 BUG_ON(status == 0);
255
256 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
257 }
258
259 static void vm_reset(struct virtio_device *vdev)
260 {
261 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
262
263
264 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
265 }
266
267
268
269
270
271
272 static bool vm_notify(struct virtqueue *vq)
273 {
274 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
275
276
277
278 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
279 return true;
280 }
281
282
283 static irqreturn_t vm_interrupt(int irq, void *opaque)
284 {
285 struct virtio_mmio_device *vm_dev = opaque;
286 struct virtio_mmio_vq_info *info;
287 unsigned long status;
288 unsigned long flags;
289 irqreturn_t ret = IRQ_NONE;
290
291
292 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
293 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
294
295 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
296 virtio_config_changed(&vm_dev->vdev);
297 ret = IRQ_HANDLED;
298 }
299
300 if (likely(status & VIRTIO_MMIO_INT_VRING)) {
301 spin_lock_irqsave(&vm_dev->lock, flags);
302 list_for_each_entry(info, &vm_dev->virtqueues, node)
303 ret |= vring_interrupt(irq, info->vq);
304 spin_unlock_irqrestore(&vm_dev->lock, flags);
305 }
306
307 return ret;
308 }
309
310
311
312 static void vm_del_vq(struct virtqueue *vq)
313 {
314 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
315 struct virtio_mmio_vq_info *info = vq->priv;
316 unsigned long flags;
317 unsigned int index = vq->index;
318
319 spin_lock_irqsave(&vm_dev->lock, flags);
320 list_del(&info->node);
321 spin_unlock_irqrestore(&vm_dev->lock, flags);
322
323
324 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
325 if (vm_dev->version == 1) {
326 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
327 } else {
328 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
329 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
330 }
331
332 vring_del_virtqueue(vq);
333
334 kfree(info);
335 }
336
337 static void vm_del_vqs(struct virtio_device *vdev)
338 {
339 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
340 struct virtqueue *vq, *n;
341
342 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
343 vm_del_vq(vq);
344
345 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
346 }
347
348 static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
349 void (*callback)(struct virtqueue *vq),
350 const char *name, bool ctx)
351 {
352 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
353 struct virtio_mmio_vq_info *info;
354 struct virtqueue *vq;
355 unsigned long flags;
356 unsigned int num;
357 int err;
358
359 if (!name)
360 return NULL;
361
362
363 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
364
365
366 if (readl(vm_dev->base + (vm_dev->version == 1 ?
367 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
368 err = -ENOENT;
369 goto error_available;
370 }
371
372
373 info = kmalloc(sizeof(*info), GFP_KERNEL);
374 if (!info) {
375 err = -ENOMEM;
376 goto error_kmalloc;
377 }
378
379 num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
380 if (num == 0) {
381 err = -ENOENT;
382 goto error_new_virtqueue;
383 }
384
385
386 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
387 true, true, ctx, vm_notify, callback, name);
388 if (!vq) {
389 err = -ENOMEM;
390 goto error_new_virtqueue;
391 }
392
393
394 writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
395 if (vm_dev->version == 1) {
396 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
397
398
399
400
401
402
403 if (q_pfn >> 32) {
404 dev_err(&vdev->dev,
405 "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
406 0x1ULL << (32 + PAGE_SHIFT - 30));
407 err = -E2BIG;
408 goto error_bad_pfn;
409 }
410
411 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
412 writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
413 } else {
414 u64 addr;
415
416 addr = virtqueue_get_desc_addr(vq);
417 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
418 writel((u32)(addr >> 32),
419 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
420
421 addr = virtqueue_get_avail_addr(vq);
422 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
423 writel((u32)(addr >> 32),
424 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
425
426 addr = virtqueue_get_used_addr(vq);
427 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
428 writel((u32)(addr >> 32),
429 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
430
431 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
432 }
433
434 vq->priv = info;
435 info->vq = vq;
436
437 spin_lock_irqsave(&vm_dev->lock, flags);
438 list_add(&info->node, &vm_dev->virtqueues);
439 spin_unlock_irqrestore(&vm_dev->lock, flags);
440
441 return vq;
442
443 error_bad_pfn:
444 vring_del_virtqueue(vq);
445 error_new_virtqueue:
446 if (vm_dev->version == 1) {
447 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
448 } else {
449 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
450 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
451 }
452 kfree(info);
453 error_kmalloc:
454 error_available:
455 return ERR_PTR(err);
456 }
457
458 static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
459 struct virtqueue *vqs[],
460 vq_callback_t *callbacks[],
461 const char * const names[],
462 const bool *ctx,
463 struct irq_affinity *desc)
464 {
465 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
466 int irq = platform_get_irq(vm_dev->pdev, 0);
467 int i, err, queue_idx = 0;
468
469 if (irq < 0) {
470 dev_err(&vdev->dev, "Cannot get IRQ resource\n");
471 return irq;
472 }
473
474 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
475 dev_name(&vdev->dev), vm_dev);
476 if (err)
477 return err;
478
479 for (i = 0; i < nvqs; ++i) {
480 if (!names[i]) {
481 vqs[i] = NULL;
482 continue;
483 }
484
485 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
486 ctx ? ctx[i] : false);
487 if (IS_ERR(vqs[i])) {
488 vm_del_vqs(vdev);
489 return PTR_ERR(vqs[i]);
490 }
491 }
492
493 return 0;
494 }
495
496 static const char *vm_bus_name(struct virtio_device *vdev)
497 {
498 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
499
500 return vm_dev->pdev->name;
501 }
502
503 static const struct virtio_config_ops virtio_mmio_config_ops = {
504 .get = vm_get,
505 .set = vm_set,
506 .generation = vm_generation,
507 .get_status = vm_get_status,
508 .set_status = vm_set_status,
509 .reset = vm_reset,
510 .find_vqs = vm_find_vqs,
511 .del_vqs = vm_del_vqs,
512 .get_features = vm_get_features,
513 .finalize_features = vm_finalize_features,
514 .bus_name = vm_bus_name,
515 };
516
517
518 static void virtio_mmio_release_dev(struct device *_d)
519 {
520 struct virtio_device *vdev =
521 container_of(_d, struct virtio_device, dev);
522 struct virtio_mmio_device *vm_dev =
523 container_of(vdev, struct virtio_mmio_device, vdev);
524 struct platform_device *pdev = vm_dev->pdev;
525
526 devm_kfree(&pdev->dev, vm_dev);
527 }
528
529
530
531 static int virtio_mmio_probe(struct platform_device *pdev)
532 {
533 struct virtio_mmio_device *vm_dev;
534 struct resource *mem;
535 unsigned long magic;
536 int rc;
537
538 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
539 if (!mem)
540 return -EINVAL;
541
542 if (!devm_request_mem_region(&pdev->dev, mem->start,
543 resource_size(mem), pdev->name))
544 return -EBUSY;
545
546 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
547 if (!vm_dev)
548 return -ENOMEM;
549
550 vm_dev->vdev.dev.parent = &pdev->dev;
551 vm_dev->vdev.dev.release = virtio_mmio_release_dev;
552 vm_dev->vdev.config = &virtio_mmio_config_ops;
553 vm_dev->pdev = pdev;
554 INIT_LIST_HEAD(&vm_dev->virtqueues);
555 spin_lock_init(&vm_dev->lock);
556
557 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
558 if (vm_dev->base == NULL)
559 return -EFAULT;
560
561
562 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
563 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
564 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
565 return -ENODEV;
566 }
567
568
569 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
570 if (vm_dev->version < 1 || vm_dev->version > 2) {
571 dev_err(&pdev->dev, "Version %ld not supported!\n",
572 vm_dev->version);
573 return -ENXIO;
574 }
575
576 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
577 if (vm_dev->vdev.id.device == 0) {
578
579
580
581
582 return -ENODEV;
583 }
584 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
585
586 if (vm_dev->version == 1) {
587 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
588
589 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
590
591
592
593
594 if (!rc)
595 dma_set_coherent_mask(&pdev->dev,
596 DMA_BIT_MASK(32 + PAGE_SHIFT));
597 } else {
598 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
599 }
600 if (rc)
601 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
602 if (rc)
603 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
604
605 platform_set_drvdata(pdev, vm_dev);
606
607 rc = register_virtio_device(&vm_dev->vdev);
608 if (rc)
609 put_device(&vm_dev->vdev.dev);
610
611 return rc;
612 }
613
614 static int virtio_mmio_remove(struct platform_device *pdev)
615 {
616 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
617 unregister_virtio_device(&vm_dev->vdev);
618
619 return 0;
620 }
621
622
623
624
625
626 #if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
627
628 static struct device vm_cmdline_parent = {
629 .init_name = "virtio-mmio-cmdline",
630 };
631
632 static int vm_cmdline_parent_registered;
633 static int vm_cmdline_id;
634
635 static int vm_cmdline_set(const char *device,
636 const struct kernel_param *kp)
637 {
638 int err;
639 struct resource resources[2] = {};
640 char *str;
641 long long int base, size;
642 unsigned int irq;
643 int processed, consumed = 0;
644 struct platform_device *pdev;
645
646
647 size = memparse(device, &str);
648
649
650 processed = sscanf(str, "@%lli:%u%n:%d%n",
651 &base, &irq, &consumed,
652 &vm_cmdline_id, &consumed);
653
654
655
656
657
658
659 if (processed < 2 || str[consumed])
660 return -EINVAL;
661
662 resources[0].flags = IORESOURCE_MEM;
663 resources[0].start = base;
664 resources[0].end = base + size - 1;
665
666 resources[1].flags = IORESOURCE_IRQ;
667 resources[1].start = resources[1].end = irq;
668
669 if (!vm_cmdline_parent_registered) {
670 err = device_register(&vm_cmdline_parent);
671 if (err) {
672 pr_err("Failed to register parent device!\n");
673 return err;
674 }
675 vm_cmdline_parent_registered = 1;
676 }
677
678 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
679 vm_cmdline_id,
680 (unsigned long long)resources[0].start,
681 (unsigned long long)resources[0].end,
682 (int)resources[1].start);
683
684 pdev = platform_device_register_resndata(&vm_cmdline_parent,
685 "virtio-mmio", vm_cmdline_id++,
686 resources, ARRAY_SIZE(resources), NULL, 0);
687
688 return PTR_ERR_OR_ZERO(pdev);
689 }
690
691 static int vm_cmdline_get_device(struct device *dev, void *data)
692 {
693 char *buffer = data;
694 unsigned int len = strlen(buffer);
695 struct platform_device *pdev = to_platform_device(dev);
696
697 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
698 pdev->resource[0].end - pdev->resource[0].start + 1ULL,
699 (unsigned long long)pdev->resource[0].start,
700 (unsigned long long)pdev->resource[1].start,
701 pdev->id);
702 return 0;
703 }
704
705 static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
706 {
707 buffer[0] = '\0';
708 device_for_each_child(&vm_cmdline_parent, buffer,
709 vm_cmdline_get_device);
710 return strlen(buffer) + 1;
711 }
712
713 static const struct kernel_param_ops vm_cmdline_param_ops = {
714 .set = vm_cmdline_set,
715 .get = vm_cmdline_get,
716 };
717
718 device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
719
720 static int vm_unregister_cmdline_device(struct device *dev,
721 void *data)
722 {
723 platform_device_unregister(to_platform_device(dev));
724
725 return 0;
726 }
727
728 static void vm_unregister_cmdline_devices(void)
729 {
730 if (vm_cmdline_parent_registered) {
731 device_for_each_child(&vm_cmdline_parent, NULL,
732 vm_unregister_cmdline_device);
733 device_unregister(&vm_cmdline_parent);
734 vm_cmdline_parent_registered = 0;
735 }
736 }
737
738 #else
739
740 static void vm_unregister_cmdline_devices(void)
741 {
742 }
743
744 #endif
745
746
747
748 static const struct of_device_id virtio_mmio_match[] = {
749 { .compatible = "virtio,mmio", },
750 {},
751 };
752 MODULE_DEVICE_TABLE(of, virtio_mmio_match);
753
754 #ifdef CONFIG_ACPI
755 static const struct acpi_device_id virtio_mmio_acpi_match[] = {
756 { "LNRO0005", },
757 { }
758 };
759 MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
760 #endif
761
762 static struct platform_driver virtio_mmio_driver = {
763 .probe = virtio_mmio_probe,
764 .remove = virtio_mmio_remove,
765 .driver = {
766 .name = "virtio-mmio",
767 .of_match_table = virtio_mmio_match,
768 .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
769 },
770 };
771
772 static int __init virtio_mmio_init(void)
773 {
774 return platform_driver_register(&virtio_mmio_driver);
775 }
776
777 static void __exit virtio_mmio_exit(void)
778 {
779 platform_driver_unregister(&virtio_mmio_driver);
780 vm_unregister_cmdline_devices();
781 }
782
783 module_init(virtio_mmio_init);
784 module_exit(virtio_mmio_exit);
785
786 MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
787 MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
788 MODULE_LICENSE("GPL");