This source file includes following definitions.
- mdpy_find_type
- mdpy_create_config_space
- handle_pci_cfg_write
- mdev_access
- mdpy_reset
- mdpy_create
- mdpy_remove
- mdpy_read
- mdpy_write
- mdpy_mmap
- mdpy_get_region_info
- mdpy_get_irq_info
- mdpy_get_device_info
- mdpy_query_gfx_plane
- mdpy_ioctl
- mdpy_open
- mdpy_close
- resolution_show
- name_show
- description_show
- available_instances_show
- device_api_show
- mdpy_device_release
- mdpy_dev_init
- mdpy_dev_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/cdev.h>
25 #include <linux/vfio.h>
26 #include <linux/iommu.h>
27 #include <linux/sysfs.h>
28 #include <linux/mdev.h>
29 #include <linux/pci.h>
30 #include <drm/drm_fourcc.h>
31 #include "mdpy-defs.h"
32
33 #define MDPY_NAME "mdpy"
34 #define MDPY_CLASS_NAME "mdpy"
35
36 #define MDPY_CONFIG_SPACE_SIZE 0xff
37 #define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
38 #define MDPY_DISPLAY_REGION 16
39
40 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
41 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
42
43
44 MODULE_LICENSE("GPL v2");
45
46 static int max_devices = 4;
47 module_param_named(count, max_devices, int, 0444);
48 MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
49
50
51 #define MDPY_TYPE_1 "vga"
52 #define MDPY_TYPE_2 "xga"
53 #define MDPY_TYPE_3 "hd"
54
55 static const struct mdpy_type {
56 const char *name;
57 u32 format;
58 u32 bytepp;
59 u32 width;
60 u32 height;
61 } mdpy_types[] = {
62 {
63 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_1,
64 .format = DRM_FORMAT_XRGB8888,
65 .bytepp = 4,
66 .width = 640,
67 .height = 480,
68 }, {
69 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_2,
70 .format = DRM_FORMAT_XRGB8888,
71 .bytepp = 4,
72 .width = 1024,
73 .height = 768,
74 }, {
75 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_3,
76 .format = DRM_FORMAT_XRGB8888,
77 .bytepp = 4,
78 .width = 1920,
79 .height = 1080,
80 },
81 };
82
83 static dev_t mdpy_devt;
84 static struct class *mdpy_class;
85 static struct cdev mdpy_cdev;
86 static struct device mdpy_dev;
87 static u32 mdpy_count;
88
89
90 struct mdev_state {
91 u8 *vconfig;
92 u32 bar_mask;
93 struct mutex ops_lock;
94 struct mdev_device *mdev;
95 struct vfio_device_info dev_info;
96
97 const struct mdpy_type *type;
98 u32 memsize;
99 void *memblk;
100 };
101
102 static const struct mdpy_type *mdpy_find_type(struct kobject *kobj)
103 {
104 int i;
105
106 for (i = 0; i < ARRAY_SIZE(mdpy_types); i++)
107 if (strcmp(mdpy_types[i].name, kobj->name) == 0)
108 return mdpy_types + i;
109 return NULL;
110 }
111
112 static void mdpy_create_config_space(struct mdev_state *mdev_state)
113 {
114 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
115 MDPY_PCI_VENDOR_ID);
116 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
117 MDPY_PCI_DEVICE_ID);
118 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
119 MDPY_PCI_SUBVENDOR_ID);
120 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
121 MDPY_PCI_SUBDEVICE_ID);
122
123 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
124 PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
125 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
126 PCI_STATUS_CAP_LIST);
127 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
128 PCI_CLASS_DISPLAY_OTHER);
129 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
130
131 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
132 PCI_BASE_ADDRESS_SPACE_MEMORY |
133 PCI_BASE_ADDRESS_MEM_TYPE_32 |
134 PCI_BASE_ADDRESS_MEM_PREFETCH);
135 mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
136
137
138 mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET;
139 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09;
140 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00;
141 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
142 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
143 mdev_state->type->format);
144 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
145 mdev_state->type->width);
146 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
147 mdev_state->type->height);
148 }
149
150 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
151 char *buf, u32 count)
152 {
153 struct device *dev = mdev_dev(mdev_state->mdev);
154 u32 cfg_addr;
155
156 switch (offset) {
157 case PCI_BASE_ADDRESS_0:
158 cfg_addr = *(u32 *)buf;
159
160 if (cfg_addr == 0xffffffff) {
161 cfg_addr = (cfg_addr & mdev_state->bar_mask);
162 } else {
163 cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
164 if (cfg_addr)
165 dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
166 }
167
168 cfg_addr |= (mdev_state->vconfig[offset] &
169 ~PCI_BASE_ADDRESS_MEM_MASK);
170 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
171 break;
172 }
173 }
174
175 static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
176 loff_t pos, bool is_write)
177 {
178 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
179 struct device *dev = mdev_dev(mdev);
180 int ret = 0;
181
182 mutex_lock(&mdev_state->ops_lock);
183
184 if (pos < MDPY_CONFIG_SPACE_SIZE) {
185 if (is_write)
186 handle_pci_cfg_write(mdev_state, pos, buf, count);
187 else
188 memcpy(buf, (mdev_state->vconfig + pos), count);
189
190 } else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
191 (pos + count <=
192 MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
193 pos -= MDPY_MEMORY_BAR_OFFSET;
194 if (is_write)
195 memcpy(mdev_state->memblk, buf, count);
196 else
197 memcpy(buf, mdev_state->memblk, count);
198
199 } else {
200 dev_info(dev, "%s: %s @0x%llx (unhandled)\n",
201 __func__, is_write ? "WR" : "RD", pos);
202 ret = -1;
203 goto accessfailed;
204 }
205
206 ret = count;
207
208
209 accessfailed:
210 mutex_unlock(&mdev_state->ops_lock);
211
212 return ret;
213 }
214
215 static int mdpy_reset(struct mdev_device *mdev)
216 {
217 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
218 u32 stride, i;
219
220
221 stride = mdev_state->type->width * mdev_state->type->bytepp;
222 for (i = 0; i < mdev_state->type->height; i++)
223 memset(mdev_state->memblk + i * stride,
224 i * 255 / mdev_state->type->height,
225 stride);
226 return 0;
227 }
228
229 static int mdpy_create(struct kobject *kobj, struct mdev_device *mdev)
230 {
231 const struct mdpy_type *type = mdpy_find_type(kobj);
232 struct device *dev = mdev_dev(mdev);
233 struct mdev_state *mdev_state;
234 u32 fbsize;
235
236 if (mdpy_count >= max_devices)
237 return -ENOMEM;
238
239 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
240 if (mdev_state == NULL)
241 return -ENOMEM;
242
243 mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
244 if (mdev_state->vconfig == NULL) {
245 kfree(mdev_state);
246 return -ENOMEM;
247 }
248
249 if (!type)
250 type = &mdpy_types[0];
251 fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
252
253 mdev_state->memblk = vmalloc_user(fbsize);
254 if (!mdev_state->memblk) {
255 kfree(mdev_state->vconfig);
256 kfree(mdev_state);
257 return -ENOMEM;
258 }
259 dev_info(dev, "%s: %s (%dx%d)\n",
260 __func__, kobj->name, type->width, type->height);
261
262 mutex_init(&mdev_state->ops_lock);
263 mdev_state->mdev = mdev;
264 mdev_set_drvdata(mdev, mdev_state);
265
266 mdev_state->type = type;
267 mdev_state->memsize = fbsize;
268 mdpy_create_config_space(mdev_state);
269 mdpy_reset(mdev);
270
271 mdpy_count++;
272 return 0;
273 }
274
275 static int mdpy_remove(struct mdev_device *mdev)
276 {
277 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
278 struct device *dev = mdev_dev(mdev);
279
280 dev_info(dev, "%s\n", __func__);
281
282 mdev_set_drvdata(mdev, NULL);
283 vfree(mdev_state->memblk);
284 kfree(mdev_state->vconfig);
285 kfree(mdev_state);
286
287 mdpy_count--;
288 return 0;
289 }
290
291 static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf,
292 size_t count, loff_t *ppos)
293 {
294 unsigned int done = 0;
295 int ret;
296
297 while (count) {
298 size_t filled;
299
300 if (count >= 4 && !(*ppos % 4)) {
301 u32 val;
302
303 ret = mdev_access(mdev, (char *)&val, sizeof(val),
304 *ppos, false);
305 if (ret <= 0)
306 goto read_err;
307
308 if (copy_to_user(buf, &val, sizeof(val)))
309 goto read_err;
310
311 filled = 4;
312 } else if (count >= 2 && !(*ppos % 2)) {
313 u16 val;
314
315 ret = mdev_access(mdev, (char *)&val, sizeof(val),
316 *ppos, false);
317 if (ret <= 0)
318 goto read_err;
319
320 if (copy_to_user(buf, &val, sizeof(val)))
321 goto read_err;
322
323 filled = 2;
324 } else {
325 u8 val;
326
327 ret = mdev_access(mdev, (char *)&val, sizeof(val),
328 *ppos, false);
329 if (ret <= 0)
330 goto read_err;
331
332 if (copy_to_user(buf, &val, sizeof(val)))
333 goto read_err;
334
335 filled = 1;
336 }
337
338 count -= filled;
339 done += filled;
340 *ppos += filled;
341 buf += filled;
342 }
343
344 return done;
345
346 read_err:
347 return -EFAULT;
348 }
349
350 static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf,
351 size_t count, loff_t *ppos)
352 {
353 unsigned int done = 0;
354 int ret;
355
356 while (count) {
357 size_t filled;
358
359 if (count >= 4 && !(*ppos % 4)) {
360 u32 val;
361
362 if (copy_from_user(&val, buf, sizeof(val)))
363 goto write_err;
364
365 ret = mdev_access(mdev, (char *)&val, sizeof(val),
366 *ppos, true);
367 if (ret <= 0)
368 goto write_err;
369
370 filled = 4;
371 } else if (count >= 2 && !(*ppos % 2)) {
372 u16 val;
373
374 if (copy_from_user(&val, buf, sizeof(val)))
375 goto write_err;
376
377 ret = mdev_access(mdev, (char *)&val, sizeof(val),
378 *ppos, true);
379 if (ret <= 0)
380 goto write_err;
381
382 filled = 2;
383 } else {
384 u8 val;
385
386 if (copy_from_user(&val, buf, sizeof(val)))
387 goto write_err;
388
389 ret = mdev_access(mdev, (char *)&val, sizeof(val),
390 *ppos, true);
391 if (ret <= 0)
392 goto write_err;
393
394 filled = 1;
395 }
396 count -= filled;
397 done += filled;
398 *ppos += filled;
399 buf += filled;
400 }
401
402 return done;
403 write_err:
404 return -EFAULT;
405 }
406
407 static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
408 {
409 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
410
411 if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
412 return -EINVAL;
413 if (vma->vm_end < vma->vm_start)
414 return -EINVAL;
415 if (vma->vm_end - vma->vm_start > mdev_state->memsize)
416 return -EINVAL;
417 if ((vma->vm_flags & VM_SHARED) == 0)
418 return -EINVAL;
419
420 return remap_vmalloc_range_partial(vma, vma->vm_start,
421 mdev_state->memblk, 0,
422 vma->vm_end - vma->vm_start);
423 }
424
425 static int mdpy_get_region_info(struct mdev_device *mdev,
426 struct vfio_region_info *region_info,
427 u16 *cap_type_id, void **cap_type)
428 {
429 struct mdev_state *mdev_state;
430
431 mdev_state = mdev_get_drvdata(mdev);
432 if (!mdev_state)
433 return -EINVAL;
434
435 if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
436 region_info->index != MDPY_DISPLAY_REGION)
437 return -EINVAL;
438
439 switch (region_info->index) {
440 case VFIO_PCI_CONFIG_REGION_INDEX:
441 region_info->offset = 0;
442 region_info->size = MDPY_CONFIG_SPACE_SIZE;
443 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
444 VFIO_REGION_INFO_FLAG_WRITE);
445 break;
446 case VFIO_PCI_BAR0_REGION_INDEX:
447 case MDPY_DISPLAY_REGION:
448 region_info->offset = MDPY_MEMORY_BAR_OFFSET;
449 region_info->size = mdev_state->memsize;
450 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
451 VFIO_REGION_INFO_FLAG_WRITE |
452 VFIO_REGION_INFO_FLAG_MMAP);
453 break;
454 default:
455 region_info->size = 0;
456 region_info->offset = 0;
457 region_info->flags = 0;
458 }
459
460 return 0;
461 }
462
463 static int mdpy_get_irq_info(struct mdev_device *mdev,
464 struct vfio_irq_info *irq_info)
465 {
466 irq_info->count = 0;
467 return 0;
468 }
469
470 static int mdpy_get_device_info(struct mdev_device *mdev,
471 struct vfio_device_info *dev_info)
472 {
473 dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
474 dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
475 dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
476 return 0;
477 }
478
479 static int mdpy_query_gfx_plane(struct mdev_device *mdev,
480 struct vfio_device_gfx_plane_info *plane)
481 {
482 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
483
484 if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
485 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
486 VFIO_GFX_PLANE_TYPE_REGION))
487 return 0;
488 return -EINVAL;
489 }
490
491 if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
492 return -EINVAL;
493
494 plane->drm_format = mdev_state->type->format;
495 plane->width = mdev_state->type->width;
496 plane->height = mdev_state->type->height;
497 plane->stride = (mdev_state->type->width *
498 mdev_state->type->bytepp);
499 plane->size = mdev_state->memsize;
500 plane->region_index = MDPY_DISPLAY_REGION;
501
502
503 plane->drm_format_mod = 0;
504 plane->x_pos = 0;
505 plane->y_pos = 0;
506 plane->x_hot = 0;
507 plane->y_hot = 0;
508
509 return 0;
510 }
511
512 static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
513 unsigned long arg)
514 {
515 int ret = 0;
516 unsigned long minsz;
517 struct mdev_state *mdev_state;
518
519 mdev_state = mdev_get_drvdata(mdev);
520
521 switch (cmd) {
522 case VFIO_DEVICE_GET_INFO:
523 {
524 struct vfio_device_info info;
525
526 minsz = offsetofend(struct vfio_device_info, num_irqs);
527
528 if (copy_from_user(&info, (void __user *)arg, minsz))
529 return -EFAULT;
530
531 if (info.argsz < minsz)
532 return -EINVAL;
533
534 ret = mdpy_get_device_info(mdev, &info);
535 if (ret)
536 return ret;
537
538 memcpy(&mdev_state->dev_info, &info, sizeof(info));
539
540 if (copy_to_user((void __user *)arg, &info, minsz))
541 return -EFAULT;
542
543 return 0;
544 }
545 case VFIO_DEVICE_GET_REGION_INFO:
546 {
547 struct vfio_region_info info;
548 u16 cap_type_id = 0;
549 void *cap_type = NULL;
550
551 minsz = offsetofend(struct vfio_region_info, offset);
552
553 if (copy_from_user(&info, (void __user *)arg, minsz))
554 return -EFAULT;
555
556 if (info.argsz < minsz)
557 return -EINVAL;
558
559 ret = mdpy_get_region_info(mdev, &info, &cap_type_id,
560 &cap_type);
561 if (ret)
562 return ret;
563
564 if (copy_to_user((void __user *)arg, &info, minsz))
565 return -EFAULT;
566
567 return 0;
568 }
569
570 case VFIO_DEVICE_GET_IRQ_INFO:
571 {
572 struct vfio_irq_info info;
573
574 minsz = offsetofend(struct vfio_irq_info, count);
575
576 if (copy_from_user(&info, (void __user *)arg, minsz))
577 return -EFAULT;
578
579 if ((info.argsz < minsz) ||
580 (info.index >= mdev_state->dev_info.num_irqs))
581 return -EINVAL;
582
583 ret = mdpy_get_irq_info(mdev, &info);
584 if (ret)
585 return ret;
586
587 if (copy_to_user((void __user *)arg, &info, minsz))
588 return -EFAULT;
589
590 return 0;
591 }
592
593 case VFIO_DEVICE_QUERY_GFX_PLANE:
594 {
595 struct vfio_device_gfx_plane_info plane;
596
597 minsz = offsetofend(struct vfio_device_gfx_plane_info,
598 region_index);
599
600 if (copy_from_user(&plane, (void __user *)arg, minsz))
601 return -EFAULT;
602
603 if (plane.argsz < minsz)
604 return -EINVAL;
605
606 ret = mdpy_query_gfx_plane(mdev, &plane);
607 if (ret)
608 return ret;
609
610 if (copy_to_user((void __user *)arg, &plane, minsz))
611 return -EFAULT;
612
613 return 0;
614 }
615
616 case VFIO_DEVICE_SET_IRQS:
617 return -EINVAL;
618
619 case VFIO_DEVICE_RESET:
620 return mdpy_reset(mdev);
621 }
622 return -ENOTTY;
623 }
624
625 static int mdpy_open(struct mdev_device *mdev)
626 {
627 if (!try_module_get(THIS_MODULE))
628 return -ENODEV;
629
630 return 0;
631 }
632
633 static void mdpy_close(struct mdev_device *mdev)
634 {
635 module_put(THIS_MODULE);
636 }
637
638 static ssize_t
639 resolution_show(struct device *dev, struct device_attribute *attr,
640 char *buf)
641 {
642 struct mdev_device *mdev = mdev_from_dev(dev);
643 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
644
645 return sprintf(buf, "%dx%d\n",
646 mdev_state->type->width,
647 mdev_state->type->height);
648 }
649 static DEVICE_ATTR_RO(resolution);
650
651 static struct attribute *mdev_dev_attrs[] = {
652 &dev_attr_resolution.attr,
653 NULL,
654 };
655
656 static const struct attribute_group mdev_dev_group = {
657 .name = "vendor",
658 .attrs = mdev_dev_attrs,
659 };
660
661 const struct attribute_group *mdev_dev_groups[] = {
662 &mdev_dev_group,
663 NULL,
664 };
665
666 static ssize_t
667 name_show(struct kobject *kobj, struct device *dev, char *buf)
668 {
669 return sprintf(buf, "%s\n", kobj->name);
670 }
671 MDEV_TYPE_ATTR_RO(name);
672
673 static ssize_t
674 description_show(struct kobject *kobj, struct device *dev, char *buf)
675 {
676 const struct mdpy_type *type = mdpy_find_type(kobj);
677
678 return sprintf(buf, "virtual display, %dx%d framebuffer\n",
679 type ? type->width : 0,
680 type ? type->height : 0);
681 }
682 MDEV_TYPE_ATTR_RO(description);
683
684 static ssize_t
685 available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
686 {
687 return sprintf(buf, "%d\n", max_devices - mdpy_count);
688 }
689 MDEV_TYPE_ATTR_RO(available_instances);
690
691 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
692 char *buf)
693 {
694 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
695 }
696 MDEV_TYPE_ATTR_RO(device_api);
697
698 static struct attribute *mdev_types_attrs[] = {
699 &mdev_type_attr_name.attr,
700 &mdev_type_attr_description.attr,
701 &mdev_type_attr_device_api.attr,
702 &mdev_type_attr_available_instances.attr,
703 NULL,
704 };
705
706 static struct attribute_group mdev_type_group1 = {
707 .name = MDPY_TYPE_1,
708 .attrs = mdev_types_attrs,
709 };
710
711 static struct attribute_group mdev_type_group2 = {
712 .name = MDPY_TYPE_2,
713 .attrs = mdev_types_attrs,
714 };
715
716 static struct attribute_group mdev_type_group3 = {
717 .name = MDPY_TYPE_3,
718 .attrs = mdev_types_attrs,
719 };
720
721 static struct attribute_group *mdev_type_groups[] = {
722 &mdev_type_group1,
723 &mdev_type_group2,
724 &mdev_type_group3,
725 NULL,
726 };
727
728 static const struct mdev_parent_ops mdev_fops = {
729 .owner = THIS_MODULE,
730 .mdev_attr_groups = mdev_dev_groups,
731 .supported_type_groups = mdev_type_groups,
732 .create = mdpy_create,
733 .remove = mdpy_remove,
734 .open = mdpy_open,
735 .release = mdpy_close,
736 .read = mdpy_read,
737 .write = mdpy_write,
738 .ioctl = mdpy_ioctl,
739 .mmap = mdpy_mmap,
740 };
741
742 static const struct file_operations vd_fops = {
743 .owner = THIS_MODULE,
744 };
745
746 static void mdpy_device_release(struct device *dev)
747 {
748
749 }
750
751 static int __init mdpy_dev_init(void)
752 {
753 int ret = 0;
754
755 ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
756 if (ret < 0) {
757 pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
758 return ret;
759 }
760 cdev_init(&mdpy_cdev, &vd_fops);
761 cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
762 pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
763
764 mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
765 if (IS_ERR(mdpy_class)) {
766 pr_err("Error: failed to register mdpy_dev class\n");
767 ret = PTR_ERR(mdpy_class);
768 goto failed1;
769 }
770 mdpy_dev.class = mdpy_class;
771 mdpy_dev.release = mdpy_device_release;
772 dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
773
774 ret = device_register(&mdpy_dev);
775 if (ret)
776 goto failed2;
777
778 ret = mdev_register_device(&mdpy_dev, &mdev_fops);
779 if (ret)
780 goto failed3;
781
782 return 0;
783
784 failed3:
785 device_unregister(&mdpy_dev);
786 failed2:
787 class_destroy(mdpy_class);
788 failed1:
789 cdev_del(&mdpy_cdev);
790 unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
791 return ret;
792 }
793
794 static void __exit mdpy_dev_exit(void)
795 {
796 mdpy_dev.bus = NULL;
797 mdev_unregister_device(&mdpy_dev);
798
799 device_unregister(&mdpy_dev);
800 cdev_del(&mdpy_cdev);
801 unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
802 class_destroy(mdpy_class);
803 mdpy_class = NULL;
804 }
805
806 module_init(mdpy_dev_init)
807 module_exit(mdpy_dev_exit)