root/drivers/gpu/drm/udl/udl_gem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. udl_gem_alloc_object
  2. udl_gem_create
  3. update_vm_cache_attr
  4. udl_dumb_create
  5. udl_drm_gem_mmap
  6. udl_gem_fault
  7. udl_gem_get_pages
  8. udl_gem_put_pages
  9. udl_gem_vmap
  10. udl_gem_vunmap
  11. udl_gem_free_object
  12. udl_gem_mmap

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2012 Red Hat
   4  */
   5 
   6 #include <linux/dma-buf.h>
   7 #include <linux/vmalloc.h>
   8 
   9 #include <drm/drm_mode.h>
  10 #include <drm/drm_prime.h>
  11 
  12 #include "udl_drv.h"
  13 
  14 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
  15                                             size_t size)
  16 {
  17         struct udl_gem_object *obj;
  18 
  19         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  20         if (obj == NULL)
  21                 return NULL;
  22 
  23         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  24                 kfree(obj);
  25                 return NULL;
  26         }
  27 
  28         obj->flags = UDL_BO_CACHEABLE;
  29         return obj;
  30 }
  31 
  32 static int
  33 udl_gem_create(struct drm_file *file,
  34                struct drm_device *dev,
  35                uint64_t size,
  36                uint32_t *handle_p)
  37 {
  38         struct udl_gem_object *obj;
  39         int ret;
  40         u32 handle;
  41 
  42         size = roundup(size, PAGE_SIZE);
  43 
  44         obj = udl_gem_alloc_object(dev, size);
  45         if (obj == NULL)
  46                 return -ENOMEM;
  47 
  48         ret = drm_gem_handle_create(file, &obj->base, &handle);
  49         if (ret) {
  50                 drm_gem_object_release(&obj->base);
  51                 kfree(obj);
  52                 return ret;
  53         }
  54 
  55         drm_gem_object_put_unlocked(&obj->base);
  56         *handle_p = handle;
  57         return 0;
  58 }
  59 
  60 static void update_vm_cache_attr(struct udl_gem_object *obj,
  61                                  struct vm_area_struct *vma)
  62 {
  63         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
  64 
  65         /* non-cacheable as default. */
  66         if (obj->flags & UDL_BO_CACHEABLE) {
  67                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  68         } else if (obj->flags & UDL_BO_WC) {
  69                 vma->vm_page_prot =
  70                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  71         } else {
  72                 vma->vm_page_prot =
  73                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  74         }
  75 }
  76 
  77 int udl_dumb_create(struct drm_file *file,
  78                     struct drm_device *dev,
  79                     struct drm_mode_create_dumb *args)
  80 {
  81         args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
  82         args->size = args->pitch * args->height;
  83         return udl_gem_create(file, dev,
  84                               args->size, &args->handle);
  85 }
  86 
  87 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  88 {
  89         int ret;
  90 
  91         ret = drm_gem_mmap(filp, vma);
  92         if (ret)
  93                 return ret;
  94 
  95         vma->vm_flags &= ~VM_PFNMAP;
  96         vma->vm_flags |= VM_MIXEDMAP;
  97 
  98         update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
  99 
 100         return ret;
 101 }
 102 
 103 vm_fault_t udl_gem_fault(struct vm_fault *vmf)
 104 {
 105         struct vm_area_struct *vma = vmf->vma;
 106         struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
 107         struct page *page;
 108         unsigned int page_offset;
 109 
 110         page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 111 
 112         if (!obj->pages)
 113                 return VM_FAULT_SIGBUS;
 114 
 115         page = obj->pages[page_offset];
 116         return vmf_insert_page(vma, vmf->address, page);
 117 }
 118 
 119 int udl_gem_get_pages(struct udl_gem_object *obj)
 120 {
 121         struct page **pages;
 122 
 123         if (obj->pages)
 124                 return 0;
 125 
 126         pages = drm_gem_get_pages(&obj->base);
 127         if (IS_ERR(pages))
 128                 return PTR_ERR(pages);
 129 
 130         obj->pages = pages;
 131 
 132         return 0;
 133 }
 134 
 135 void udl_gem_put_pages(struct udl_gem_object *obj)
 136 {
 137         if (obj->base.import_attach) {
 138                 kvfree(obj->pages);
 139                 obj->pages = NULL;
 140                 return;
 141         }
 142 
 143         drm_gem_put_pages(&obj->base, obj->pages, false, false);
 144         obj->pages = NULL;
 145 }
 146 
 147 int udl_gem_vmap(struct udl_gem_object *obj)
 148 {
 149         int page_count = obj->base.size / PAGE_SIZE;
 150         int ret;
 151 
 152         if (obj->base.import_attach) {
 153                 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
 154                 if (!obj->vmapping)
 155                         return -ENOMEM;
 156                 return 0;
 157         }
 158                 
 159         ret = udl_gem_get_pages(obj);
 160         if (ret)
 161                 return ret;
 162 
 163         obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
 164         if (!obj->vmapping)
 165                 return -ENOMEM;
 166         return 0;
 167 }
 168 
 169 void udl_gem_vunmap(struct udl_gem_object *obj)
 170 {
 171         if (obj->base.import_attach) {
 172                 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
 173                 return;
 174         }
 175 
 176         vunmap(obj->vmapping);
 177 
 178         udl_gem_put_pages(obj);
 179 }
 180 
 181 void udl_gem_free_object(struct drm_gem_object *gem_obj)
 182 {
 183         struct udl_gem_object *obj = to_udl_bo(gem_obj);
 184 
 185         if (obj->vmapping)
 186                 udl_gem_vunmap(obj);
 187 
 188         if (gem_obj->import_attach) {
 189                 drm_prime_gem_destroy(gem_obj, obj->sg);
 190                 put_device(gem_obj->dev->dev);
 191         }
 192 
 193         if (obj->pages)
 194                 udl_gem_put_pages(obj);
 195 
 196         drm_gem_free_mmap_offset(gem_obj);
 197 }
 198 
 199 /* the dumb interface doesn't work with the GEM straight MMAP
 200    interface, it expects to do MMAP on the drm fd, like normal */
 201 int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 202                  uint32_t handle, uint64_t *offset)
 203 {
 204         struct udl_gem_object *gobj;
 205         struct drm_gem_object *obj;
 206         struct udl_device *udl = to_udl(dev);
 207         int ret = 0;
 208 
 209         mutex_lock(&udl->gem_lock);
 210         obj = drm_gem_object_lookup(file, handle);
 211         if (obj == NULL) {
 212                 ret = -ENOENT;
 213                 goto unlock;
 214         }
 215         gobj = to_udl_bo(obj);
 216 
 217         ret = udl_gem_get_pages(gobj);
 218         if (ret)
 219                 goto out;
 220         ret = drm_gem_create_mmap_offset(obj);
 221         if (ret)
 222                 goto out;
 223 
 224         *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
 225 
 226 out:
 227         drm_gem_object_put_unlocked(&gobj->base);
 228 unlock:
 229         mutex_unlock(&udl->gem_lock);
 230         return ret;
 231 }

/* [<][>][^][v][top][bottom][index][help] */