1/* 2 * Copyright (C) 2012 Red Hat 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License v2. See the file COPYING in the main directory of this archive for 6 * more details. 7 */ 8 9#include <drm/drmP.h> 10#include "udl_drv.h" 11#include <linux/shmem_fs.h> 12#include <linux/dma-buf.h> 13 14struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 15 size_t size) 16{ 17 struct udl_gem_object *obj; 18 19 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 20 if (obj == NULL) 21 return NULL; 22 23 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 24 kfree(obj); 25 return NULL; 26 } 27 28 obj->flags = UDL_BO_CACHEABLE; 29 return obj; 30} 31 32static int 33udl_gem_create(struct drm_file *file, 34 struct drm_device *dev, 35 uint64_t size, 36 uint32_t *handle_p) 37{ 38 struct udl_gem_object *obj; 39 int ret; 40 u32 handle; 41 42 size = roundup(size, PAGE_SIZE); 43 44 obj = udl_gem_alloc_object(dev, size); 45 if (obj == NULL) 46 return -ENOMEM; 47 48 ret = drm_gem_handle_create(file, &obj->base, &handle); 49 if (ret) { 50 drm_gem_object_release(&obj->base); 51 kfree(obj); 52 return ret; 53 } 54 55 drm_gem_object_unreference(&obj->base); 56 *handle_p = handle; 57 return 0; 58} 59 60static void update_vm_cache_attr(struct udl_gem_object *obj, 61 struct vm_area_struct *vma) 62{ 63 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags); 64 65 /* non-cacheable as default. */ 66 if (obj->flags & UDL_BO_CACHEABLE) { 67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 68 } else if (obj->flags & UDL_BO_WC) { 69 vma->vm_page_prot = 70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 71 } else { 72 vma->vm_page_prot = 73 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 74 } 75} 76 77int udl_dumb_create(struct drm_file *file, 78 struct drm_device *dev, 79 struct drm_mode_create_dumb *args) 80{ 81 args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8); 82 args->size = args->pitch * args->height; 83 return udl_gem_create(file, dev, 84 args->size, &args->handle); 85} 86 87int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 88{ 89 int ret; 90 91 ret = drm_gem_mmap(filp, vma); 92 if (ret) 93 return ret; 94 95 vma->vm_flags &= ~VM_PFNMAP; 96 vma->vm_flags |= VM_MIXEDMAP; 97 98 update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma); 99 100 return ret; 101} 102 103int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 104{ 105 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); 106 struct page *page; 107 unsigned int page_offset; 108 int ret = 0; 109 110 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 111 PAGE_SHIFT; 112 113 if (!obj->pages) 114 return VM_FAULT_SIGBUS; 115 116 page = obj->pages[page_offset]; 117 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); 118 switch (ret) { 119 case -EAGAIN: 120 case 0: 121 case -ERESTARTSYS: 122 return VM_FAULT_NOPAGE; 123 case -ENOMEM: 124 return VM_FAULT_OOM; 125 default: 126 return VM_FAULT_SIGBUS; 127 } 128} 129 130int udl_gem_get_pages(struct udl_gem_object *obj) 131{ 132 struct page **pages; 133 134 if (obj->pages) 135 return 0; 136 137 pages = drm_gem_get_pages(&obj->base); 138 if (IS_ERR(pages)) 139 return PTR_ERR(pages); 140 141 obj->pages = pages; 142 143 return 0; 144} 145 146void udl_gem_put_pages(struct udl_gem_object *obj) 147{ 148 if (obj->base.import_attach) { 149 drm_free_large(obj->pages); 150 obj->pages = NULL; 151 return; 152 } 153 154 drm_gem_put_pages(&obj->base, obj->pages, false, false); 155 obj->pages = NULL; 156} 157 158int udl_gem_vmap(struct udl_gem_object *obj) 159{ 160 int page_count = obj->base.size / PAGE_SIZE; 161 int ret; 162 163 if (obj->base.import_attach) { 164 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf); 165 if (!obj->vmapping) 166 return -ENOMEM; 167 return 0; 168 } 169 170 ret = udl_gem_get_pages(obj); 171 if (ret) 172 return ret; 173 174 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); 175 if (!obj->vmapping) 176 return -ENOMEM; 177 return 0; 178} 179 180void udl_gem_vunmap(struct udl_gem_object *obj) 181{ 182 if (obj->base.import_attach) { 183 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping); 184 return; 185 } 186 187 vunmap(obj->vmapping); 188 189 udl_gem_put_pages(obj); 190} 191 192void udl_gem_free_object(struct drm_gem_object *gem_obj) 193{ 194 struct udl_gem_object *obj = to_udl_bo(gem_obj); 195 196 if (obj->vmapping) 197 udl_gem_vunmap(obj); 198 199 if (gem_obj->import_attach) { 200 drm_prime_gem_destroy(gem_obj, obj->sg); 201 put_device(gem_obj->dev->dev); 202 } 203 204 if (obj->pages) 205 udl_gem_put_pages(obj); 206 207 drm_gem_free_mmap_offset(gem_obj); 208} 209 210/* the dumb interface doesn't work with the GEM straight MMAP 211 interface, it expects to do MMAP on the drm fd, like normal */ 212int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, 213 uint32_t handle, uint64_t *offset) 214{ 215 struct udl_gem_object *gobj; 216 struct drm_gem_object *obj; 217 int ret = 0; 218 219 mutex_lock(&dev->struct_mutex); 220 obj = drm_gem_object_lookup(dev, file, handle); 221 if (obj == NULL) { 222 ret = -ENOENT; 223 goto unlock; 224 } 225 gobj = to_udl_bo(obj); 226 227 ret = udl_gem_get_pages(gobj); 228 if (ret) 229 goto out; 230 ret = drm_gem_create_mmap_offset(obj); 231 if (ret) 232 goto out; 233 234 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); 235 236out: 237 drm_gem_object_unreference(&gobj->base); 238unlock: 239 mutex_unlock(&dev->struct_mutex); 240 return ret; 241} 242