root/samples/vfio-mdev/mbochs.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vbe_name
  2. mbochs_find_type
  3. mbochs_create_config_space
  4. mbochs_check_framebuffer
  5. mbochs_modes_equal
  6. handle_pci_cfg_write
  7. handle_mmio_write
  8. handle_mmio_read
  9. handle_edid_regs
  10. handle_edid_blob
  11. mdev_access
  12. mbochs_reset
  13. mbochs_create
  14. mbochs_remove
  15. mbochs_read
  16. mbochs_write
  17. __mbochs_get_page
  18. mbochs_get_page
  19. mbochs_put_pages
  20. mbochs_region_vm_fault
  21. mbochs_mmap
  22. mbochs_dmabuf_vm_fault
  23. mbochs_mmap_dmabuf
  24. mbochs_print_dmabuf
  25. mbochs_map_dmabuf
  26. mbochs_unmap_dmabuf
  27. mbochs_release_dmabuf
  28. mbochs_kmap_dmabuf
  29. mbochs_kunmap_dmabuf
  30. mbochs_dmabuf_alloc
  31. mbochs_dmabuf_find_by_mode
  32. mbochs_dmabuf_find_by_id
  33. mbochs_dmabuf_export
  34. mbochs_get_region_info
  35. mbochs_get_irq_info
  36. mbochs_get_device_info
  37. mbochs_query_gfx_plane
  38. mbochs_get_gfx_dmabuf
  39. mbochs_ioctl
  40. mbochs_open
  41. mbochs_close
  42. memory_show
  43. name_show
  44. description_show
  45. available_instances_show
  46. device_api_show
  47. mbochs_device_release
  48. mbochs_dev_init
  49. mbochs_dev_exit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Mediated virtual PCI display host device driver
   4  *
   5  * Emulate enough of qemu stdvga to make bochs-drm.ko happy.  That is
   6  * basically the vram memory bar and the bochs dispi interface vbe
   7  * registers in the mmio register bar.  Specifically it does *not*
   8  * include any legacy vga stuff.  Device looks a lot like "qemu -device
   9  * secondary-vga".
  10  *
  11  *   (c) Gerd Hoffmann <kraxel@redhat.com>
  12  *
  13  * based on mtty driver which is:
  14  *   Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
  15  *       Author: Neo Jia <cjia@nvidia.com>
  16  *               Kirti Wankhede <kwankhede@nvidia.com>
  17  *
  18  * This program is free software; you can redistribute it and/or modify
  19  * it under the terms of the GNU General Public License version 2 as
  20  * published by the Free Software Foundation.
  21  */
  22 #include <linux/init.h>
  23 #include <linux/module.h>
  24 #include <linux/device.h>
  25 #include <linux/kernel.h>
  26 #include <linux/slab.h>
  27 #include <linux/vmalloc.h>
  28 #include <linux/cdev.h>
  29 #include <linux/vfio.h>
  30 #include <linux/iommu.h>
  31 #include <linux/sysfs.h>
  32 #include <linux/mdev.h>
  33 #include <linux/pci.h>
  34 #include <linux/dma-buf.h>
  35 #include <linux/highmem.h>
  36 #include <drm/drm_fourcc.h>
  37 #include <drm/drm_rect.h>
  38 #include <drm/drm_modeset_lock.h>
  39 #include <drm/drm_property.h>
  40 #include <drm/drm_plane.h>
  41 
  42 
  43 #define VBE_DISPI_INDEX_ID              0x0
  44 #define VBE_DISPI_INDEX_XRES            0x1
  45 #define VBE_DISPI_INDEX_YRES            0x2
  46 #define VBE_DISPI_INDEX_BPP             0x3
  47 #define VBE_DISPI_INDEX_ENABLE          0x4
  48 #define VBE_DISPI_INDEX_BANK            0x5
  49 #define VBE_DISPI_INDEX_VIRT_WIDTH      0x6
  50 #define VBE_DISPI_INDEX_VIRT_HEIGHT     0x7
  51 #define VBE_DISPI_INDEX_X_OFFSET        0x8
  52 #define VBE_DISPI_INDEX_Y_OFFSET        0x9
  53 #define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
  54 #define VBE_DISPI_INDEX_COUNT           0xb
  55 
  56 #define VBE_DISPI_ID0                   0xB0C0
  57 #define VBE_DISPI_ID1                   0xB0C1
  58 #define VBE_DISPI_ID2                   0xB0C2
  59 #define VBE_DISPI_ID3                   0xB0C3
  60 #define VBE_DISPI_ID4                   0xB0C4
  61 #define VBE_DISPI_ID5                   0xB0C5
  62 
  63 #define VBE_DISPI_DISABLED              0x00
  64 #define VBE_DISPI_ENABLED               0x01
  65 #define VBE_DISPI_GETCAPS               0x02
  66 #define VBE_DISPI_8BIT_DAC              0x20
  67 #define VBE_DISPI_LFB_ENABLED           0x40
  68 #define VBE_DISPI_NOCLEARMEM            0x80
  69 
  70 
  71 #define MBOCHS_NAME               "mbochs"
  72 #define MBOCHS_CLASS_NAME         "mbochs"
  73 
  74 #define MBOCHS_EDID_REGION_INDEX  VFIO_PCI_NUM_REGIONS
  75 #define MBOCHS_NUM_REGIONS        (MBOCHS_EDID_REGION_INDEX+1)
  76 
  77 #define MBOCHS_CONFIG_SPACE_SIZE  0xff
  78 #define MBOCHS_MMIO_BAR_OFFSET    PAGE_SIZE
  79 #define MBOCHS_MMIO_BAR_SIZE      PAGE_SIZE
  80 #define MBOCHS_EDID_OFFSET        (MBOCHS_MMIO_BAR_OFFSET +     \
  81                                    MBOCHS_MMIO_BAR_SIZE)
  82 #define MBOCHS_EDID_SIZE          PAGE_SIZE
  83 #define MBOCHS_MEMORY_BAR_OFFSET  (MBOCHS_EDID_OFFSET + \
  84                                    MBOCHS_EDID_SIZE)
  85 
  86 #define MBOCHS_EDID_BLOB_OFFSET   (MBOCHS_EDID_SIZE/2)
  87 
  88 #define STORE_LE16(addr, val)   (*(u16 *)addr = val)
  89 #define STORE_LE32(addr, val)   (*(u32 *)addr = val)
  90 
  91 
  92 MODULE_LICENSE("GPL v2");
  93 
  94 static int max_mbytes = 256;
  95 module_param_named(count, max_mbytes, int, 0444);
  96 MODULE_PARM_DESC(mem, "megabytes available to " MBOCHS_NAME " devices");
  97 
  98 
  99 #define MBOCHS_TYPE_1 "small"
 100 #define MBOCHS_TYPE_2 "medium"
 101 #define MBOCHS_TYPE_3 "large"
 102 
 103 static const struct mbochs_type {
 104         const char *name;
 105         u32 mbytes;
 106         u32 max_x;
 107         u32 max_y;
 108 } mbochs_types[] = {
 109         {
 110                 .name   = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1,
 111                 .mbytes = 4,
 112                 .max_x  = 800,
 113                 .max_y  = 600,
 114         }, {
 115                 .name   = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2,
 116                 .mbytes = 16,
 117                 .max_x  = 1920,
 118                 .max_y  = 1440,
 119         }, {
 120                 .name   = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3,
 121                 .mbytes = 64,
 122                 .max_x  = 0,
 123                 .max_y  = 0,
 124         },
 125 };
 126 
 127 
 128 static dev_t            mbochs_devt;
 129 static struct class     *mbochs_class;
 130 static struct cdev      mbochs_cdev;
 131 static struct device    mbochs_dev;
 132 static int              mbochs_used_mbytes;
 133 
 134 struct vfio_region_info_ext {
 135         struct vfio_region_info          base;
 136         struct vfio_region_info_cap_type type;
 137 };
 138 
 139 struct mbochs_mode {
 140         u32 drm_format;
 141         u32 bytepp;
 142         u32 width;
 143         u32 height;
 144         u32 stride;
 145         u32 __pad;
 146         u64 offset;
 147         u64 size;
 148 };
 149 
 150 struct mbochs_dmabuf {
 151         struct mbochs_mode mode;
 152         u32 id;
 153         struct page **pages;
 154         pgoff_t pagecount;
 155         struct dma_buf *buf;
 156         struct mdev_state *mdev_state;
 157         struct list_head next;
 158         bool unlinked;
 159 };
 160 
 161 /* State of each mdev device */
 162 struct mdev_state {
 163         u8 *vconfig;
 164         u64 bar_mask[3];
 165         u32 memory_bar_mask;
 166         struct mutex ops_lock;
 167         struct mdev_device *mdev;
 168 
 169         const struct mbochs_type *type;
 170         u16 vbe[VBE_DISPI_INDEX_COUNT];
 171         u64 memsize;
 172         struct page **pages;
 173         pgoff_t pagecount;
 174         struct vfio_region_gfx_edid edid_regs;
 175         u8 edid_blob[0x400];
 176 
 177         struct list_head dmabufs;
 178         u32 active_id;
 179         u32 next_id;
 180 };
 181 
 182 static const char *vbe_name_list[VBE_DISPI_INDEX_COUNT] = {
 183         [VBE_DISPI_INDEX_ID]               = "id",
 184         [VBE_DISPI_INDEX_XRES]             = "xres",
 185         [VBE_DISPI_INDEX_YRES]             = "yres",
 186         [VBE_DISPI_INDEX_BPP]              = "bpp",
 187         [VBE_DISPI_INDEX_ENABLE]           = "enable",
 188         [VBE_DISPI_INDEX_BANK]             = "bank",
 189         [VBE_DISPI_INDEX_VIRT_WIDTH]       = "virt-width",
 190         [VBE_DISPI_INDEX_VIRT_HEIGHT]      = "virt-height",
 191         [VBE_DISPI_INDEX_X_OFFSET]         = "x-offset",
 192         [VBE_DISPI_INDEX_Y_OFFSET]         = "y-offset",
 193         [VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = "video-mem",
 194 };
 195 
 196 static const char *vbe_name(u32 index)
 197 {
 198         if (index < ARRAY_SIZE(vbe_name_list))
 199                 return vbe_name_list[index];
 200         return "(invalid)";
 201 }
 202 
 203 static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
 204                                       pgoff_t pgoff);
 205 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
 206                                     pgoff_t pgoff);
 207 
 208 static const struct mbochs_type *mbochs_find_type(struct kobject *kobj)
 209 {
 210         int i;
 211 
 212         for (i = 0; i < ARRAY_SIZE(mbochs_types); i++)
 213                 if (strcmp(mbochs_types[i].name, kobj->name) == 0)
 214                         return mbochs_types + i;
 215         return NULL;
 216 }
 217 
 218 static void mbochs_create_config_space(struct mdev_state *mdev_state)
 219 {
 220         STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
 221                    0x1234);
 222         STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
 223                    0x1111);
 224         STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
 225                    PCI_SUBVENDOR_ID_REDHAT_QUMRANET);
 226         STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
 227                    PCI_SUBDEVICE_ID_QEMU);
 228 
 229         STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
 230                    PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
 231         STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
 232                    PCI_CLASS_DISPLAY_OTHER);
 233         mdev_state->vconfig[PCI_CLASS_REVISION] =  0x01;
 234 
 235         STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
 236                    PCI_BASE_ADDRESS_SPACE_MEMORY |
 237                    PCI_BASE_ADDRESS_MEM_TYPE_32  |
 238                    PCI_BASE_ADDRESS_MEM_PREFETCH);
 239         mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1;
 240 
 241         STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2],
 242                    PCI_BASE_ADDRESS_SPACE_MEMORY |
 243                    PCI_BASE_ADDRESS_MEM_TYPE_32);
 244         mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1;
 245 }
 246 
 247 static int mbochs_check_framebuffer(struct mdev_state *mdev_state,
 248                                     struct mbochs_mode *mode)
 249 {
 250         struct device *dev = mdev_dev(mdev_state->mdev);
 251         u16 *vbe = mdev_state->vbe;
 252         u32 virt_width;
 253 
 254         WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
 255 
 256         if (!(vbe[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED))
 257                 goto nofb;
 258 
 259         memset(mode, 0, sizeof(*mode));
 260         switch (vbe[VBE_DISPI_INDEX_BPP]) {
 261         case 32:
 262                 mode->drm_format = DRM_FORMAT_XRGB8888;
 263                 mode->bytepp = 4;
 264                 break;
 265         default:
 266                 dev_info_ratelimited(dev, "%s: bpp %d not supported\n",
 267                                      __func__, vbe[VBE_DISPI_INDEX_BPP]);
 268                 goto nofb;
 269         }
 270 
 271         mode->width  = vbe[VBE_DISPI_INDEX_XRES];
 272         mode->height = vbe[VBE_DISPI_INDEX_YRES];
 273         virt_width  = vbe[VBE_DISPI_INDEX_VIRT_WIDTH];
 274         if (virt_width < mode->width)
 275                 virt_width = mode->width;
 276         mode->stride = virt_width * mode->bytepp;
 277         mode->size   = (u64)mode->stride * mode->height;
 278         mode->offset = ((u64)vbe[VBE_DISPI_INDEX_X_OFFSET] * mode->bytepp +
 279                        (u64)vbe[VBE_DISPI_INDEX_Y_OFFSET] * mode->stride);
 280 
 281         if (mode->width < 64 || mode->height < 64) {
 282                 dev_info_ratelimited(dev, "%s: invalid resolution %dx%d\n",
 283                                      __func__, mode->width, mode->height);
 284                 goto nofb;
 285         }
 286         if (mode->offset + mode->size > mdev_state->memsize) {
 287                 dev_info_ratelimited(dev, "%s: framebuffer memory overflow\n",
 288                                      __func__);
 289                 goto nofb;
 290         }
 291 
 292         return 0;
 293 
 294 nofb:
 295         memset(mode, 0, sizeof(*mode));
 296         return -EINVAL;
 297 }
 298 
 299 static bool mbochs_modes_equal(struct mbochs_mode *mode1,
 300                                struct mbochs_mode *mode2)
 301 {
 302         return memcmp(mode1, mode2, sizeof(struct mbochs_mode)) == 0;
 303 }
 304 
 305 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
 306                                  char *buf, u32 count)
 307 {
 308         struct device *dev = mdev_dev(mdev_state->mdev);
 309         int index = (offset - PCI_BASE_ADDRESS_0) / 0x04;
 310         u32 cfg_addr;
 311 
 312         switch (offset) {
 313         case PCI_BASE_ADDRESS_0:
 314         case PCI_BASE_ADDRESS_2:
 315                 cfg_addr = *(u32 *)buf;
 316 
 317                 if (cfg_addr == 0xffffffff) {
 318                         cfg_addr = (cfg_addr & mdev_state->bar_mask[index]);
 319                 } else {
 320                         cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
 321                         if (cfg_addr)
 322                                 dev_info(dev, "BAR #%d @ 0x%x\n",
 323                                          index, cfg_addr);
 324                 }
 325 
 326                 cfg_addr |= (mdev_state->vconfig[offset] &
 327                              ~PCI_BASE_ADDRESS_MEM_MASK);
 328                 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
 329                 break;
 330         }
 331 }
 332 
 333 static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset,
 334                               char *buf, u32 count)
 335 {
 336         struct device *dev = mdev_dev(mdev_state->mdev);
 337         int index;
 338         u16 reg16;
 339 
 340         switch (offset) {
 341         case 0x400 ... 0x41f: /* vga ioports remapped */
 342                 goto unhandled;
 343         case 0x500 ... 0x515: /* bochs dispi interface */
 344                 if (count != 2)
 345                         goto unhandled;
 346                 index = (offset - 0x500) / 2;
 347                 reg16 = *(u16 *)buf;
 348                 if (index < ARRAY_SIZE(mdev_state->vbe))
 349                         mdev_state->vbe[index] = reg16;
 350                 dev_dbg(dev, "%s: vbe write %d = %d (%s)\n",
 351                         __func__, index, reg16, vbe_name(index));
 352                 break;
 353         case 0x600 ... 0x607: /* qemu extended regs */
 354                 goto unhandled;
 355         default:
 356 unhandled:
 357                 dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n",
 358                         __func__, offset, count);
 359                 break;
 360         }
 361 }
 362 
 363 static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset,
 364                              char *buf, u32 count)
 365 {
 366         struct device *dev = mdev_dev(mdev_state->mdev);
 367         struct vfio_region_gfx_edid *edid;
 368         u16 reg16 = 0;
 369         int index;
 370 
 371         switch (offset) {
 372         case 0x000 ... 0x3ff: /* edid block */
 373                 edid = &mdev_state->edid_regs;
 374                 if (edid->link_state != VFIO_DEVICE_GFX_LINK_STATE_UP ||
 375                     offset >= edid->edid_size) {
 376                         memset(buf, 0, count);
 377                         break;
 378                 }
 379                 memcpy(buf, mdev_state->edid_blob + offset, count);
 380                 break;
 381         case 0x500 ... 0x515: /* bochs dispi interface */
 382                 if (count != 2)
 383                         goto unhandled;
 384                 index = (offset - 0x500) / 2;
 385                 if (index < ARRAY_SIZE(mdev_state->vbe))
 386                         reg16 = mdev_state->vbe[index];
 387                 dev_dbg(dev, "%s: vbe read %d = %d (%s)\n",
 388                         __func__, index, reg16, vbe_name(index));
 389                 *(u16 *)buf = reg16;
 390                 break;
 391         default:
 392 unhandled:
 393                 dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n",
 394                         __func__, offset, count);
 395                 memset(buf, 0, count);
 396                 break;
 397         }
 398 }
 399 
 400 static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset,
 401                              char *buf, u32 count, bool is_write)
 402 {
 403         char *regs = (void *)&mdev_state->edid_regs;
 404 
 405         if (offset + count > sizeof(mdev_state->edid_regs))
 406                 return;
 407         if (count != 4)
 408                 return;
 409         if (offset % 4)
 410                 return;
 411 
 412         if (is_write) {
 413                 switch (offset) {
 414                 case offsetof(struct vfio_region_gfx_edid, link_state):
 415                 case offsetof(struct vfio_region_gfx_edid, edid_size):
 416                         memcpy(regs + offset, buf, count);
 417                         break;
 418                 default:
 419                         /* read-only regs */
 420                         break;
 421                 }
 422         } else {
 423                 memcpy(buf, regs + offset, count);
 424         }
 425 }
 426 
 427 static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset,
 428                              char *buf, u32 count, bool is_write)
 429 {
 430         if (offset + count > mdev_state->edid_regs.edid_max_size)
 431                 return;
 432         if (is_write)
 433                 memcpy(mdev_state->edid_blob + offset, buf, count);
 434         else
 435                 memcpy(buf, mdev_state->edid_blob + offset, count);
 436 }
 437 
 438 static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
 439                            loff_t pos, bool is_write)
 440 {
 441         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
 442         struct device *dev = mdev_dev(mdev);
 443         struct page *pg;
 444         loff_t poff;
 445         char *map;
 446         int ret = 0;
 447 
 448         mutex_lock(&mdev_state->ops_lock);
 449 
 450         if (pos < MBOCHS_CONFIG_SPACE_SIZE) {
 451                 if (is_write)
 452                         handle_pci_cfg_write(mdev_state, pos, buf, count);
 453                 else
 454                         memcpy(buf, (mdev_state->vconfig + pos), count);
 455 
 456         } else if (pos >= MBOCHS_MMIO_BAR_OFFSET &&
 457                    pos + count <= (MBOCHS_MMIO_BAR_OFFSET +
 458                                    MBOCHS_MMIO_BAR_SIZE)) {
 459                 pos -= MBOCHS_MMIO_BAR_OFFSET;
 460                 if (is_write)
 461                         handle_mmio_write(mdev_state, pos, buf, count);
 462                 else
 463                         handle_mmio_read(mdev_state, pos, buf, count);
 464 
 465         } else if (pos >= MBOCHS_EDID_OFFSET &&
 466                    pos + count <= (MBOCHS_EDID_OFFSET +
 467                                    MBOCHS_EDID_SIZE)) {
 468                 pos -= MBOCHS_EDID_OFFSET;
 469                 if (pos < MBOCHS_EDID_BLOB_OFFSET) {
 470                         handle_edid_regs(mdev_state, pos, buf, count, is_write);
 471                 } else {
 472                         pos -= MBOCHS_EDID_BLOB_OFFSET;
 473                         handle_edid_blob(mdev_state, pos, buf, count, is_write);
 474                 }
 475 
 476         } else if (pos >= MBOCHS_MEMORY_BAR_OFFSET &&
 477                    pos + count <=
 478                    MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
 479                 pos -= MBOCHS_MMIO_BAR_OFFSET;
 480                 poff = pos & ~PAGE_MASK;
 481                 pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
 482                 map = kmap(pg);
 483                 if (is_write)
 484                         memcpy(map + poff, buf, count);
 485                 else
 486                         memcpy(buf, map + poff, count);
 487                 kunmap(pg);
 488                 put_page(pg);
 489 
 490         } else {
 491                 dev_dbg(dev, "%s: %s @0x%llx (unhandled)\n",
 492                         __func__, is_write ? "WR" : "RD", pos);
 493                 ret = -1;
 494                 goto accessfailed;
 495         }
 496 
 497         ret = count;
 498 
 499 
 500 accessfailed:
 501         mutex_unlock(&mdev_state->ops_lock);
 502 
 503         return ret;
 504 }
 505 
 506 static int mbochs_reset(struct mdev_device *mdev)
 507 {
 508         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
 509         u32 size64k = mdev_state->memsize / (64 * 1024);
 510         int i;
 511 
 512         for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++)
 513                 mdev_state->vbe[i] = 0;
 514         mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5;
 515         mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k;
 516         return 0;
 517 }
 518 
 519 static int mbochs_create(struct kobject *kobj, struct mdev_device *mdev)
 520 {
 521         const struct mbochs_type *type = mbochs_find_type(kobj);
 522         struct device *dev = mdev_dev(mdev);
 523         struct mdev_state *mdev_state;
 524 
 525         if (!type)
 526                 type = &mbochs_types[0];
 527         if (type->mbytes + mbochs_used_mbytes > max_mbytes)
 528                 return -ENOMEM;
 529 
 530         mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
 531         if (mdev_state == NULL)
 532                 return -ENOMEM;
 533 
 534         mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL);
 535         if (mdev_state->vconfig == NULL)
 536                 goto err_mem;
 537 
 538         mdev_state->memsize = type->mbytes * 1024 * 1024;
 539         mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT;
 540         mdev_state->pages = kcalloc(mdev_state->pagecount,
 541                                     sizeof(struct page *),
 542                                     GFP_KERNEL);
 543         if (!mdev_state->pages)
 544                 goto err_mem;
 545 
 546         dev_info(dev, "%s: %s, %d MB, %ld pages\n", __func__,
 547                  kobj->name, type->mbytes, mdev_state->pagecount);
 548 
 549         mutex_init(&mdev_state->ops_lock);
 550         mdev_state->mdev = mdev;
 551         mdev_set_drvdata(mdev, mdev_state);
 552         INIT_LIST_HEAD(&mdev_state->dmabufs);
 553         mdev_state->next_id = 1;
 554 
 555         mdev_state->type = type;
 556         mdev_state->edid_regs.max_xres = type->max_x;
 557         mdev_state->edid_regs.max_yres = type->max_y;
 558         mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET;
 559         mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob);
 560         mbochs_create_config_space(mdev_state);
 561         mbochs_reset(mdev);
 562 
 563         mbochs_used_mbytes += type->mbytes;
 564         return 0;
 565 
 566 err_mem:
 567         kfree(mdev_state->vconfig);
 568         kfree(mdev_state);
 569         return -ENOMEM;
 570 }
 571 
 572 static int mbochs_remove(struct mdev_device *mdev)
 573 {
 574         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
 575 
 576         mbochs_used_mbytes -= mdev_state->type->mbytes;
 577         mdev_set_drvdata(mdev, NULL);
 578         kfree(mdev_state->pages);
 579         kfree(mdev_state->vconfig);
 580         kfree(mdev_state);
 581         return 0;
 582 }
 583 
 584 static ssize_t mbochs_read(struct mdev_device *mdev, char __user *buf,
 585                            size_t count, loff_t *ppos)
 586 {
 587         unsigned int done = 0;
 588         int ret;
 589 
 590         while (count) {
 591                 size_t filled;
 592 
 593                 if (count >= 4 && !(*ppos % 4)) {
 594                         u32 val;
 595 
 596                         ret =  mdev_access(mdev, (char *)&val, sizeof(val),
 597                                            *ppos, false);
 598                         if (ret <= 0)
 599                                 goto read_err;
 600 
 601                         if (copy_to_user(buf, &val, sizeof(val)))
 602                                 goto read_err;
 603 
 604                         filled = 4;
 605                 } else if (count >= 2 && !(*ppos % 2)) {
 606                         u16 val;
 607 
 608                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
 609                                           *ppos, false);
 610                         if (ret <= 0)
 611                                 goto read_err;
 612 
 613                         if (copy_to_user(buf, &val, sizeof(val)))
 614                                 goto read_err;
 615 
 616                         filled = 2;
 617                 } else {
 618                         u8 val;
 619 
 620                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
 621                                           *ppos, false);
 622                         if (ret <= 0)
 623                                 goto read_err;
 624 
 625                         if (copy_to_user(buf, &val, sizeof(val)))
 626                                 goto read_err;
 627 
 628                         filled = 1;
 629                 }
 630 
 631                 count -= filled;
 632                 done += filled;
 633                 *ppos += filled;
 634                 buf += filled;
 635         }
 636 
 637         return done;
 638 
 639 read_err:
 640         return -EFAULT;
 641 }
 642 
 643 static ssize_t mbochs_write(struct mdev_device *mdev, const char __user *buf,
 644                             size_t count, loff_t *ppos)
 645 {
 646         unsigned int done = 0;
 647         int ret;
 648 
 649         while (count) {
 650                 size_t filled;
 651 
 652                 if (count >= 4 && !(*ppos % 4)) {
 653                         u32 val;
 654 
 655                         if (copy_from_user(&val, buf, sizeof(val)))
 656                                 goto write_err;
 657 
 658                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
 659                                           *ppos, true);
 660                         if (ret <= 0)
 661                                 goto write_err;
 662 
 663                         filled = 4;
 664                 } else if (count >= 2 && !(*ppos % 2)) {
 665                         u16 val;
 666 
 667                         if (copy_from_user(&val, buf, sizeof(val)))
 668                                 goto write_err;
 669 
 670                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
 671                                           *ppos, true);
 672                         if (ret <= 0)
 673                                 goto write_err;
 674 
 675                         filled = 2;
 676                 } else {
 677                         u8 val;
 678 
 679                         if (copy_from_user(&val, buf, sizeof(val)))
 680                                 goto write_err;
 681 
 682                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
 683                                           *ppos, true);
 684                         if (ret <= 0)
 685                                 goto write_err;
 686 
 687                         filled = 1;
 688                 }
 689                 count -= filled;
 690                 done += filled;
 691                 *ppos += filled;
 692                 buf += filled;
 693         }
 694 
 695         return done;
 696 write_err:
 697         return -EFAULT;
 698 }
 699 
 700 static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
 701                                       pgoff_t pgoff)
 702 {
 703         WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
 704 
 705         if (!mdev_state->pages[pgoff]) {
 706                 mdev_state->pages[pgoff] =
 707                         alloc_pages(GFP_HIGHUSER | __GFP_ZERO, 0);
 708                 if (!mdev_state->pages[pgoff])
 709                         return NULL;
 710         }
 711 
 712         get_page(mdev_state->pages[pgoff]);
 713         return mdev_state->pages[pgoff];
 714 }
 715 
 716 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
 717                                     pgoff_t pgoff)
 718 {
 719         struct page *page;
 720 
 721         if (WARN_ON(pgoff >= mdev_state->pagecount))
 722                 return NULL;
 723 
 724         mutex_lock(&mdev_state->ops_lock);
 725         page = __mbochs_get_page(mdev_state, pgoff);
 726         mutex_unlock(&mdev_state->ops_lock);
 727 
 728         return page;
 729 }
 730 
 731 static void mbochs_put_pages(struct mdev_state *mdev_state)
 732 {
 733         struct device *dev = mdev_dev(mdev_state->mdev);
 734         int i, count = 0;
 735 
 736         WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
 737 
 738         for (i = 0; i < mdev_state->pagecount; i++) {
 739                 if (!mdev_state->pages[i])
 740                         continue;
 741                 put_page(mdev_state->pages[i]);
 742                 mdev_state->pages[i] = NULL;
 743                 count++;
 744         }
 745         dev_dbg(dev, "%s: %d pages released\n", __func__, count);
 746 }
 747 
 748 static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
 749 {
 750         struct vm_area_struct *vma = vmf->vma;
 751         struct mdev_state *mdev_state = vma->vm_private_data;
 752         pgoff_t page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 753 
 754         if (page_offset >= mdev_state->pagecount)
 755                 return VM_FAULT_SIGBUS;
 756 
 757         vmf->page = mbochs_get_page(mdev_state, page_offset);
 758         if (!vmf->page)
 759                 return VM_FAULT_SIGBUS;
 760 
 761         return 0;
 762 }
 763 
 764 static const struct vm_operations_struct mbochs_region_vm_ops = {
 765         .fault = mbochs_region_vm_fault,
 766 };
 767 
 768 static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
 769 {
 770         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
 771 
 772         if (vma->vm_pgoff != MBOCHS_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
 773                 return -EINVAL;
 774         if (vma->vm_end < vma->vm_start)
 775                 return -EINVAL;
 776         if (vma->vm_end - vma->vm_start > mdev_state->memsize)
 777                 return -EINVAL;
 778         if ((vma->vm_flags & VM_SHARED) == 0)
 779                 return -EINVAL;
 780 
 781         vma->vm_ops = &mbochs_region_vm_ops;
 782         vma->vm_private_data = mdev_state;
 783         return 0;
 784 }
 785 
 786 static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
 787 {
 788         struct vm_area_struct *vma = vmf->vma;
 789         struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
 790 
 791         if (WARN_ON(vmf->pgoff >= dmabuf->pagecount))
 792                 return VM_FAULT_SIGBUS;
 793 
 794         vmf->page = dmabuf->pages[vmf->pgoff];
 795         get_page(vmf->page);
 796         return 0;
 797 }
 798 
 799 static const struct vm_operations_struct mbochs_dmabuf_vm_ops = {
 800         .fault = mbochs_dmabuf_vm_fault,
 801 };
 802 
 803 static int mbochs_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
 804 {
 805         struct mbochs_dmabuf *dmabuf = buf->priv;
 806         struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
 807 
 808         dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
 809 
 810         if ((vma->vm_flags & VM_SHARED) == 0)
 811                 return -EINVAL;
 812 
 813         vma->vm_ops = &mbochs_dmabuf_vm_ops;
 814         vma->vm_private_data = dmabuf;
 815         return 0;
 816 }
 817 
 818 static void mbochs_print_dmabuf(struct mbochs_dmabuf *dmabuf,
 819                                 const char *prefix)
 820 {
 821         struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
 822         u32 fourcc = dmabuf->mode.drm_format;
 823 
 824         dev_dbg(dev, "%s/%d: %c%c%c%c, %dx%d, stride %d, off 0x%llx, size 0x%llx, pages %ld\n",
 825                 prefix, dmabuf->id,
 826                 fourcc ? ((fourcc >>  0) & 0xff) : '-',
 827                 fourcc ? ((fourcc >>  8) & 0xff) : '-',
 828                 fourcc ? ((fourcc >> 16) & 0xff) : '-',
 829                 fourcc ? ((fourcc >> 24) & 0xff) : '-',
 830                 dmabuf->mode.width, dmabuf->mode.height, dmabuf->mode.stride,
 831                 dmabuf->mode.offset, dmabuf->mode.size, dmabuf->pagecount);
 832 }
 833 
 834 static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at,
 835                                           enum dma_data_direction direction)
 836 {
 837         struct mbochs_dmabuf *dmabuf = at->dmabuf->priv;
 838         struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
 839         struct sg_table *sg;
 840 
 841         dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
 842 
 843         sg = kzalloc(sizeof(*sg), GFP_KERNEL);
 844         if (!sg)
 845                 goto err1;
 846         if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount,
 847                                       0, dmabuf->mode.size, GFP_KERNEL) < 0)
 848                 goto err2;
 849         if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction))
 850                 goto err3;
 851 
 852         return sg;
 853 
 854 err3:
 855         sg_free_table(sg);
 856 err2:
 857         kfree(sg);
 858 err1:
 859         return ERR_PTR(-ENOMEM);
 860 }
 861 
 862 static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at,
 863                                 struct sg_table *sg,
 864                                 enum dma_data_direction direction)
 865 {
 866         struct mbochs_dmabuf *dmabuf = at->dmabuf->priv;
 867         struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
 868 
 869         dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
 870 
 871         sg_free_table(sg);
 872         kfree(sg);
 873 }
 874 
 875 static void mbochs_release_dmabuf(struct dma_buf *buf)
 876 {
 877         struct mbochs_dmabuf *dmabuf = buf->priv;
 878         struct mdev_state *mdev_state = dmabuf->mdev_state;
 879         struct device *dev = mdev_dev(mdev_state->mdev);
 880         pgoff_t pg;
 881 
 882         dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
 883 
 884         for (pg = 0; pg < dmabuf->pagecount; pg++)
 885                 put_page(dmabuf->pages[pg]);
 886 
 887         mutex_lock(&mdev_state->ops_lock);
 888         dmabuf->buf = NULL;
 889         if (dmabuf->unlinked)
 890                 kfree(dmabuf);
 891         mutex_unlock(&mdev_state->ops_lock);
 892 }
 893 
 894 static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
 895 {
 896         struct mbochs_dmabuf *dmabuf = buf->priv;
 897         struct page *page = dmabuf->pages[page_num];
 898 
 899         return kmap(page);
 900 }
 901 
 902 static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num,
 903                                  void *vaddr)
 904 {
 905         kunmap(vaddr);
 906 }
 907 
 908 static struct dma_buf_ops mbochs_dmabuf_ops = {
 909         .map_dma_buf      = mbochs_map_dmabuf,
 910         .unmap_dma_buf    = mbochs_unmap_dmabuf,
 911         .release          = mbochs_release_dmabuf,
 912         .map              = mbochs_kmap_dmabuf,
 913         .unmap            = mbochs_kunmap_dmabuf,
 914         .mmap             = mbochs_mmap_dmabuf,
 915 };
 916 
 917 static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state,
 918                                                  struct mbochs_mode *mode)
 919 {
 920         struct mbochs_dmabuf *dmabuf;
 921         pgoff_t page_offset, pg;
 922 
 923         WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
 924 
 925         dmabuf = kzalloc(sizeof(struct mbochs_dmabuf), GFP_KERNEL);
 926         if (!dmabuf)
 927                 return NULL;
 928 
 929         dmabuf->mode = *mode;
 930         dmabuf->id = mdev_state->next_id++;
 931         dmabuf->pagecount = DIV_ROUND_UP(mode->size, PAGE_SIZE);
 932         dmabuf->pages = kcalloc(dmabuf->pagecount, sizeof(struct page *),
 933                                 GFP_KERNEL);
 934         if (!dmabuf->pages)
 935                 goto err_free_dmabuf;
 936 
 937         page_offset = dmabuf->mode.offset >> PAGE_SHIFT;
 938         for (pg = 0; pg < dmabuf->pagecount; pg++) {
 939                 dmabuf->pages[pg] = __mbochs_get_page(mdev_state,
 940                                                       page_offset + pg);
 941                 if (!dmabuf->pages[pg])
 942                         goto err_free_pages;
 943         }
 944 
 945         dmabuf->mdev_state = mdev_state;
 946         list_add(&dmabuf->next, &mdev_state->dmabufs);
 947 
 948         mbochs_print_dmabuf(dmabuf, __func__);
 949         return dmabuf;
 950 
 951 err_free_pages:
 952         while (pg > 0)
 953                 put_page(dmabuf->pages[--pg]);
 954         kfree(dmabuf->pages);
 955 err_free_dmabuf:
 956         kfree(dmabuf);
 957         return NULL;
 958 }
 959 
 960 static struct mbochs_dmabuf *
 961 mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state,
 962                            struct mbochs_mode *mode)
 963 {
 964         struct mbochs_dmabuf *dmabuf;
 965 
 966         WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
 967 
 968         list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
 969                 if (mbochs_modes_equal(&dmabuf->mode, mode))
 970                         return dmabuf;
 971 
 972         return NULL;
 973 }
 974 
 975 static struct mbochs_dmabuf *
 976 mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id)
 977 {
 978         struct mbochs_dmabuf *dmabuf;
 979 
 980         WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
 981 
 982         list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
 983                 if (dmabuf->id == id)
 984                         return dmabuf;
 985 
 986         return NULL;
 987 }
 988 
 989 static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf)
 990 {
 991         struct mdev_state *mdev_state = dmabuf->mdev_state;
 992         struct device *dev = mdev_dev(mdev_state->mdev);
 993         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 994         struct dma_buf *buf;
 995 
 996         WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
 997 
 998         if (!IS_ALIGNED(dmabuf->mode.offset, PAGE_SIZE)) {
 999                 dev_info_ratelimited(dev, "%s: framebuffer not page-aligned\n",
1000                                      __func__);
1001                 return -EINVAL;
1002         }
1003 
1004         exp_info.ops = &mbochs_dmabuf_ops;
1005         exp_info.size = dmabuf->mode.size;
1006         exp_info.priv = dmabuf;
1007 
1008         buf = dma_buf_export(&exp_info);
1009         if (IS_ERR(buf)) {
1010                 dev_info_ratelimited(dev, "%s: dma_buf_export failed: %ld\n",
1011                                      __func__, PTR_ERR(buf));
1012                 return PTR_ERR(buf);
1013         }
1014 
1015         dmabuf->buf = buf;
1016         dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
1017         return 0;
1018 }
1019 
1020 static int mbochs_get_region_info(struct mdev_device *mdev,
1021                                   struct vfio_region_info_ext *ext)
1022 {
1023         struct vfio_region_info *region_info = &ext->base;
1024         struct mdev_state *mdev_state;
1025 
1026         mdev_state = mdev_get_drvdata(mdev);
1027         if (!mdev_state)
1028                 return -EINVAL;
1029 
1030         if (region_info->index >= MBOCHS_NUM_REGIONS)
1031                 return -EINVAL;
1032 
1033         switch (region_info->index) {
1034         case VFIO_PCI_CONFIG_REGION_INDEX:
1035                 region_info->offset = 0;
1036                 region_info->size   = MBOCHS_CONFIG_SPACE_SIZE;
1037                 region_info->flags  = (VFIO_REGION_INFO_FLAG_READ |
1038                                        VFIO_REGION_INFO_FLAG_WRITE);
1039                 break;
1040         case VFIO_PCI_BAR0_REGION_INDEX:
1041                 region_info->offset = MBOCHS_MEMORY_BAR_OFFSET;
1042                 region_info->size   = mdev_state->memsize;
1043                 region_info->flags  = (VFIO_REGION_INFO_FLAG_READ  |
1044                                        VFIO_REGION_INFO_FLAG_WRITE |
1045                                        VFIO_REGION_INFO_FLAG_MMAP);
1046                 break;
1047         case VFIO_PCI_BAR2_REGION_INDEX:
1048                 region_info->offset = MBOCHS_MMIO_BAR_OFFSET;
1049                 region_info->size   = MBOCHS_MMIO_BAR_SIZE;
1050                 region_info->flags  = (VFIO_REGION_INFO_FLAG_READ  |
1051                                        VFIO_REGION_INFO_FLAG_WRITE);
1052                 break;
1053         case MBOCHS_EDID_REGION_INDEX:
1054                 ext->base.argsz = sizeof(*ext);
1055                 ext->base.offset = MBOCHS_EDID_OFFSET;
1056                 ext->base.size = MBOCHS_EDID_SIZE;
1057                 ext->base.flags = (VFIO_REGION_INFO_FLAG_READ  |
1058                                    VFIO_REGION_INFO_FLAG_WRITE |
1059                                    VFIO_REGION_INFO_FLAG_CAPS);
1060                 ext->base.cap_offset = offsetof(typeof(*ext), type);
1061                 ext->type.header.id = VFIO_REGION_INFO_CAP_TYPE;
1062                 ext->type.header.version = 1;
1063                 ext->type.header.next = 0;
1064                 ext->type.type = VFIO_REGION_TYPE_GFX;
1065                 ext->type.subtype = VFIO_REGION_SUBTYPE_GFX_EDID;
1066                 break;
1067         default:
1068                 region_info->size   = 0;
1069                 region_info->offset = 0;
1070                 region_info->flags  = 0;
1071         }
1072 
1073         return 0;
1074 }
1075 
1076 static int mbochs_get_irq_info(struct mdev_device *mdev,
1077                                struct vfio_irq_info *irq_info)
1078 {
1079         irq_info->count = 0;
1080         return 0;
1081 }
1082 
1083 static int mbochs_get_device_info(struct mdev_device *mdev,
1084                                   struct vfio_device_info *dev_info)
1085 {
1086         dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1087         dev_info->num_regions = MBOCHS_NUM_REGIONS;
1088         dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1089         return 0;
1090 }
1091 
1092 static int mbochs_query_gfx_plane(struct mdev_device *mdev,
1093                                   struct vfio_device_gfx_plane_info *plane)
1094 {
1095         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
1096         struct device *dev = mdev_dev(mdev);
1097         struct mbochs_dmabuf *dmabuf;
1098         struct mbochs_mode mode;
1099         int ret;
1100 
1101         if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
1102                 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
1103                                      VFIO_GFX_PLANE_TYPE_DMABUF))
1104                         return 0;
1105                 return -EINVAL;
1106         }
1107 
1108         if (plane->flags != VFIO_GFX_PLANE_TYPE_DMABUF)
1109                 return -EINVAL;
1110 
1111         plane->drm_format_mod = 0;
1112         plane->x_pos          = 0;
1113         plane->y_pos          = 0;
1114         plane->x_hot          = 0;
1115         plane->y_hot          = 0;
1116 
1117         mutex_lock(&mdev_state->ops_lock);
1118 
1119         ret = -EINVAL;
1120         if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY)
1121                 ret = mbochs_check_framebuffer(mdev_state, &mode);
1122         if (ret < 0) {
1123                 plane->drm_format     = 0;
1124                 plane->width          = 0;
1125                 plane->height         = 0;
1126                 plane->stride         = 0;
1127                 plane->size           = 0;
1128                 plane->dmabuf_id      = 0;
1129                 goto done;
1130         }
1131 
1132         dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode);
1133         if (!dmabuf)
1134                 mbochs_dmabuf_alloc(mdev_state, &mode);
1135         if (!dmabuf) {
1136                 mutex_unlock(&mdev_state->ops_lock);
1137                 return -ENOMEM;
1138         }
1139 
1140         plane->drm_format     = dmabuf->mode.drm_format;
1141         plane->width          = dmabuf->mode.width;
1142         plane->height         = dmabuf->mode.height;
1143         plane->stride         = dmabuf->mode.stride;
1144         plane->size           = dmabuf->mode.size;
1145         plane->dmabuf_id      = dmabuf->id;
1146 
1147 done:
1148         if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY &&
1149             mdev_state->active_id != plane->dmabuf_id) {
1150                 dev_dbg(dev, "%s: primary: %d => %d\n", __func__,
1151                         mdev_state->active_id, plane->dmabuf_id);
1152                 mdev_state->active_id = plane->dmabuf_id;
1153         }
1154         mutex_unlock(&mdev_state->ops_lock);
1155         return 0;
1156 }
1157 
1158 static int mbochs_get_gfx_dmabuf(struct mdev_device *mdev,
1159                                  u32 id)
1160 {
1161         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
1162         struct mbochs_dmabuf *dmabuf;
1163 
1164         mutex_lock(&mdev_state->ops_lock);
1165 
1166         dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id);
1167         if (!dmabuf) {
1168                 mutex_unlock(&mdev_state->ops_lock);
1169                 return -ENOENT;
1170         }
1171 
1172         if (!dmabuf->buf)
1173                 mbochs_dmabuf_export(dmabuf);
1174 
1175         mutex_unlock(&mdev_state->ops_lock);
1176 
1177         if (!dmabuf->buf)
1178                 return -EINVAL;
1179 
1180         return dma_buf_fd(dmabuf->buf, 0);
1181 }
1182 
1183 static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
1184                         unsigned long arg)
1185 {
1186         int ret = 0;
1187         unsigned long minsz, outsz;
1188 
1189         switch (cmd) {
1190         case VFIO_DEVICE_GET_INFO:
1191         {
1192                 struct vfio_device_info info;
1193 
1194                 minsz = offsetofend(struct vfio_device_info, num_irqs);
1195 
1196                 if (copy_from_user(&info, (void __user *)arg, minsz))
1197                         return -EFAULT;
1198 
1199                 if (info.argsz < minsz)
1200                         return -EINVAL;
1201 
1202                 ret = mbochs_get_device_info(mdev, &info);
1203                 if (ret)
1204                         return ret;
1205 
1206                 if (copy_to_user((void __user *)arg, &info, minsz))
1207                         return -EFAULT;
1208 
1209                 return 0;
1210         }
1211         case VFIO_DEVICE_GET_REGION_INFO:
1212         {
1213                 struct vfio_region_info_ext info;
1214 
1215                 minsz = offsetofend(typeof(info), base.offset);
1216 
1217                 if (copy_from_user(&info, (void __user *)arg, minsz))
1218                         return -EFAULT;
1219 
1220                 outsz = info.base.argsz;
1221                 if (outsz < minsz)
1222                         return -EINVAL;
1223                 if (outsz > sizeof(info))
1224                         return -EINVAL;
1225 
1226                 ret = mbochs_get_region_info(mdev, &info);
1227                 if (ret)
1228                         return ret;
1229 
1230                 if (copy_to_user((void __user *)arg, &info, outsz))
1231                         return -EFAULT;
1232 
1233                 return 0;
1234         }
1235 
1236         case VFIO_DEVICE_GET_IRQ_INFO:
1237         {
1238                 struct vfio_irq_info info;
1239 
1240                 minsz = offsetofend(struct vfio_irq_info, count);
1241 
1242                 if (copy_from_user(&info, (void __user *)arg, minsz))
1243                         return -EFAULT;
1244 
1245                 if ((info.argsz < minsz) ||
1246                     (info.index >= VFIO_PCI_NUM_IRQS))
1247                         return -EINVAL;
1248 
1249                 ret = mbochs_get_irq_info(mdev, &info);
1250                 if (ret)
1251                         return ret;
1252 
1253                 if (copy_to_user((void __user *)arg, &info, minsz))
1254                         return -EFAULT;
1255 
1256                 return 0;
1257         }
1258 
1259         case VFIO_DEVICE_QUERY_GFX_PLANE:
1260         {
1261                 struct vfio_device_gfx_plane_info plane;
1262 
1263                 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1264                                     region_index);
1265 
1266                 if (copy_from_user(&plane, (void __user *)arg, minsz))
1267                         return -EFAULT;
1268 
1269                 if (plane.argsz < minsz)
1270                         return -EINVAL;
1271 
1272                 ret = mbochs_query_gfx_plane(mdev, &plane);
1273                 if (ret)
1274                         return ret;
1275 
1276                 if (copy_to_user((void __user *)arg, &plane, minsz))
1277                         return -EFAULT;
1278 
1279                 return 0;
1280         }
1281 
1282         case VFIO_DEVICE_GET_GFX_DMABUF:
1283         {
1284                 u32 dmabuf_id;
1285 
1286                 if (get_user(dmabuf_id, (__u32 __user *)arg))
1287                         return -EFAULT;
1288 
1289                 return mbochs_get_gfx_dmabuf(mdev, dmabuf_id);
1290         }
1291 
1292         case VFIO_DEVICE_SET_IRQS:
1293                 return -EINVAL;
1294 
1295         case VFIO_DEVICE_RESET:
1296                 return mbochs_reset(mdev);
1297         }
1298         return -ENOTTY;
1299 }
1300 
1301 static int mbochs_open(struct mdev_device *mdev)
1302 {
1303         if (!try_module_get(THIS_MODULE))
1304                 return -ENODEV;
1305 
1306         return 0;
1307 }
1308 
1309 static void mbochs_close(struct mdev_device *mdev)
1310 {
1311         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
1312         struct mbochs_dmabuf *dmabuf, *tmp;
1313 
1314         mutex_lock(&mdev_state->ops_lock);
1315 
1316         list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) {
1317                 list_del(&dmabuf->next);
1318                 if (dmabuf->buf) {
1319                         /* free in mbochs_release_dmabuf() */
1320                         dmabuf->unlinked = true;
1321                 } else {
1322                         kfree(dmabuf);
1323                 }
1324         }
1325         mbochs_put_pages(mdev_state);
1326 
1327         mutex_unlock(&mdev_state->ops_lock);
1328         module_put(THIS_MODULE);
1329 }
1330 
1331 static ssize_t
1332 memory_show(struct device *dev, struct device_attribute *attr,
1333             char *buf)
1334 {
1335         struct mdev_device *mdev = mdev_from_dev(dev);
1336         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
1337 
1338         return sprintf(buf, "%d MB\n", mdev_state->type->mbytes);
1339 }
1340 static DEVICE_ATTR_RO(memory);
1341 
1342 static struct attribute *mdev_dev_attrs[] = {
1343         &dev_attr_memory.attr,
1344         NULL,
1345 };
1346 
1347 static const struct attribute_group mdev_dev_group = {
1348         .name  = "vendor",
1349         .attrs = mdev_dev_attrs,
1350 };
1351 
1352 const struct attribute_group *mdev_dev_groups[] = {
1353         &mdev_dev_group,
1354         NULL,
1355 };
1356 
1357 static ssize_t
1358 name_show(struct kobject *kobj, struct device *dev, char *buf)
1359 {
1360         return sprintf(buf, "%s\n", kobj->name);
1361 }
1362 MDEV_TYPE_ATTR_RO(name);
1363 
1364 static ssize_t
1365 description_show(struct kobject *kobj, struct device *dev, char *buf)
1366 {
1367         const struct mbochs_type *type = mbochs_find_type(kobj);
1368 
1369         return sprintf(buf, "virtual display, %d MB video memory\n",
1370                        type ? type->mbytes  : 0);
1371 }
1372 MDEV_TYPE_ATTR_RO(description);
1373 
1374 static ssize_t
1375 available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
1376 {
1377         const struct mbochs_type *type = mbochs_find_type(kobj);
1378         int count = (max_mbytes - mbochs_used_mbytes) / type->mbytes;
1379 
1380         return sprintf(buf, "%d\n", count);
1381 }
1382 MDEV_TYPE_ATTR_RO(available_instances);
1383 
1384 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
1385                                char *buf)
1386 {
1387         return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1388 }
1389 MDEV_TYPE_ATTR_RO(device_api);
1390 
1391 static struct attribute *mdev_types_attrs[] = {
1392         &mdev_type_attr_name.attr,
1393         &mdev_type_attr_description.attr,
1394         &mdev_type_attr_device_api.attr,
1395         &mdev_type_attr_available_instances.attr,
1396         NULL,
1397 };
1398 
1399 static struct attribute_group mdev_type_group1 = {
1400         .name  = MBOCHS_TYPE_1,
1401         .attrs = mdev_types_attrs,
1402 };
1403 
1404 static struct attribute_group mdev_type_group2 = {
1405         .name  = MBOCHS_TYPE_2,
1406         .attrs = mdev_types_attrs,
1407 };
1408 
1409 static struct attribute_group mdev_type_group3 = {
1410         .name  = MBOCHS_TYPE_3,
1411         .attrs = mdev_types_attrs,
1412 };
1413 
1414 static struct attribute_group *mdev_type_groups[] = {
1415         &mdev_type_group1,
1416         &mdev_type_group2,
1417         &mdev_type_group3,
1418         NULL,
1419 };
1420 
1421 static const struct mdev_parent_ops mdev_fops = {
1422         .owner                  = THIS_MODULE,
1423         .mdev_attr_groups       = mdev_dev_groups,
1424         .supported_type_groups  = mdev_type_groups,
1425         .create                 = mbochs_create,
1426         .remove                 = mbochs_remove,
1427         .open                   = mbochs_open,
1428         .release                = mbochs_close,
1429         .read                   = mbochs_read,
1430         .write                  = mbochs_write,
1431         .ioctl                  = mbochs_ioctl,
1432         .mmap                   = mbochs_mmap,
1433 };
1434 
1435 static const struct file_operations vd_fops = {
1436         .owner          = THIS_MODULE,
1437 };
1438 
1439 static void mbochs_device_release(struct device *dev)
1440 {
1441         /* nothing */
1442 }
1443 
1444 static int __init mbochs_dev_init(void)
1445 {
1446         int ret = 0;
1447 
1448         ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK + 1, MBOCHS_NAME);
1449         if (ret < 0) {
1450                 pr_err("Error: failed to register mbochs_dev, err: %d\n", ret);
1451                 return ret;
1452         }
1453         cdev_init(&mbochs_cdev, &vd_fops);
1454         cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK + 1);
1455         pr_info("%s: major %d\n", __func__, MAJOR(mbochs_devt));
1456 
1457         mbochs_class = class_create(THIS_MODULE, MBOCHS_CLASS_NAME);
1458         if (IS_ERR(mbochs_class)) {
1459                 pr_err("Error: failed to register mbochs_dev class\n");
1460                 ret = PTR_ERR(mbochs_class);
1461                 goto failed1;
1462         }
1463         mbochs_dev.class = mbochs_class;
1464         mbochs_dev.release = mbochs_device_release;
1465         dev_set_name(&mbochs_dev, "%s", MBOCHS_NAME);
1466 
1467         ret = device_register(&mbochs_dev);
1468         if (ret)
1469                 goto failed2;
1470 
1471         ret = mdev_register_device(&mbochs_dev, &mdev_fops);
1472         if (ret)
1473                 goto failed3;
1474 
1475         return 0;
1476 
1477 failed3:
1478         device_unregister(&mbochs_dev);
1479 failed2:
1480         class_destroy(mbochs_class);
1481 failed1:
1482         cdev_del(&mbochs_cdev);
1483         unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
1484         return ret;
1485 }
1486 
1487 static void __exit mbochs_dev_exit(void)
1488 {
1489         mbochs_dev.bus = NULL;
1490         mdev_unregister_device(&mbochs_dev);
1491 
1492         device_unregister(&mbochs_dev);
1493         cdev_del(&mbochs_cdev);
1494         unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
1495         class_destroy(mbochs_class);
1496         mbochs_class = NULL;
1497 }
1498 
1499 module_init(mbochs_dev_init)
1500 module_exit(mbochs_dev_exit)

/* [<][>][^][v][top][bottom][index][help] */