1/* 2 * drm gem CMA (contiguous memory allocator) helper functions 3 * 4 * Copyright (C) 2012 Sascha Hauer, Pengutronix 5 * 6 * Based on Samsung Exynos code 7 * 8 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#include <linux/mm.h> 21#include <linux/slab.h> 22#include <linux/mutex.h> 23#include <linux/export.h> 24#include <linux/dma-buf.h> 25#include <linux/dma-mapping.h> 26 27#include <drm/drmP.h> 28#include <drm/drm.h> 29#include <drm/drm_gem_cma_helper.h> 30#include <drm/drm_vma_manager.h> 31 32/** 33 * DOC: cma helpers 34 * 35 * The Contiguous Memory Allocator reserves a pool of memory at early boot 36 * that is used to service requests for large blocks of contiguous memory. 37 * 38 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer 39 * objects that are physically contiguous in memory. This is useful for 40 * display drivers that are unable to map scattered buffers via an IOMMU. 41 */ 42 43/** 44 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory 45 * @drm: DRM device 46 * @size: size of the object to allocate 47 * 48 * This function creates and initializes a GEM CMA object of the given size, 49 * but doesn't allocate any memory to back the object. 50 * 51 * Returns: 52 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative 53 * error code on failure. 54 */ 55static struct drm_gem_cma_object * 56__drm_gem_cma_create(struct drm_device *drm, size_t size) 57{ 58 struct drm_gem_cma_object *cma_obj; 59 struct drm_gem_object *gem_obj; 60 int ret; 61 62 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); 63 if (!cma_obj) 64 return ERR_PTR(-ENOMEM); 65 66 gem_obj = &cma_obj->base; 67 68 ret = drm_gem_object_init(drm, gem_obj, size); 69 if (ret) 70 goto error; 71 72 ret = drm_gem_create_mmap_offset(gem_obj); 73 if (ret) { 74 drm_gem_object_release(gem_obj); 75 goto error; 76 } 77 78 return cma_obj; 79 80error: 81 kfree(cma_obj); 82 return ERR_PTR(ret); 83} 84 85/** 86 * drm_gem_cma_create - allocate an object with the given size 87 * @drm: DRM device 88 * @size: size of the object to allocate 89 * 90 * This function creates a CMA GEM object and allocates a contiguous chunk of 91 * memory as backing store. The backing memory has the writecombine attribute 92 * set. 93 * 94 * Returns: 95 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative 96 * error code on failure. 97 */ 98struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, 99 size_t size) 100{ 101 struct drm_gem_cma_object *cma_obj; 102 int ret; 103 104 size = round_up(size, PAGE_SIZE); 105 106 cma_obj = __drm_gem_cma_create(drm, size); 107 if (IS_ERR(cma_obj)) 108 return cma_obj; 109 110 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, 111 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); 112 if (!cma_obj->vaddr) { 113 dev_err(drm->dev, "failed to allocate buffer with size %zu\n", 114 size); 115 ret = -ENOMEM; 116 goto error; 117 } 118 119 return cma_obj; 120 121error: 122 drm_gem_cma_free_object(&cma_obj->base); 123 return ERR_PTR(ret); 124} 125EXPORT_SYMBOL_GPL(drm_gem_cma_create); 126 127/** 128 * drm_gem_cma_create_with_handle - allocate an object with the given size and 129 * return a GEM handle to it 130 * @file_priv: DRM file-private structure to register the handle for 131 * @drm: DRM device 132 * @size: size of the object to allocate 133 * @handle: return location for the GEM handle 134 * 135 * This function creates a CMA GEM object, allocating a physically contiguous 136 * chunk of memory as backing store. The GEM object is then added to the list 137 * of object associated with the given file and a handle to it is returned. 138 * 139 * Returns: 140 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative 141 * error code on failure. 142 */ 143static struct drm_gem_cma_object * 144drm_gem_cma_create_with_handle(struct drm_file *file_priv, 145 struct drm_device *drm, size_t size, 146 uint32_t *handle) 147{ 148 struct drm_gem_cma_object *cma_obj; 149 struct drm_gem_object *gem_obj; 150 int ret; 151 152 cma_obj = drm_gem_cma_create(drm, size); 153 if (IS_ERR(cma_obj)) 154 return cma_obj; 155 156 gem_obj = &cma_obj->base; 157 158 /* 159 * allocate a id of idr table where the obj is registered 160 * and handle has the id what user can see. 161 */ 162 ret = drm_gem_handle_create(file_priv, gem_obj, handle); 163 if (ret) 164 goto err_handle_create; 165 166 /* drop reference from allocate - handle holds it now. */ 167 drm_gem_object_unreference_unlocked(gem_obj); 168 169 return cma_obj; 170 171err_handle_create: 172 drm_gem_cma_free_object(gem_obj); 173 174 return ERR_PTR(ret); 175} 176 177/** 178 * drm_gem_cma_free_object - free resources associated with a CMA GEM object 179 * @gem_obj: GEM object to free 180 * 181 * This function frees the backing memory of the CMA GEM object, cleans up the 182 * GEM object state and frees the memory used to store the object itself. 183 * Drivers using the CMA helpers should set this as their DRM driver's 184 * ->gem_free_object() callback. 185 */ 186void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) 187{ 188 struct drm_gem_cma_object *cma_obj; 189 190 cma_obj = to_drm_gem_cma_obj(gem_obj); 191 192 if (cma_obj->vaddr) { 193 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, 194 cma_obj->vaddr, cma_obj->paddr); 195 } else if (gem_obj->import_attach) { 196 drm_prime_gem_destroy(gem_obj, cma_obj->sgt); 197 } 198 199 drm_gem_object_release(gem_obj); 200 201 kfree(cma_obj); 202} 203EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); 204 205/** 206 * drm_gem_cma_dumb_create_internal - create a dumb buffer object 207 * @file_priv: DRM file-private structure to create the dumb buffer for 208 * @drm: DRM device 209 * @args: IOCTL data 210 * 211 * This aligns the pitch and size arguments to the minimum required. This is 212 * an internal helper that can be wrapped by a driver to account for hardware 213 * with more specific alignment requirements. It should not be used directly 214 * as the ->dumb_create() callback in a DRM driver. 215 * 216 * Returns: 217 * 0 on success or a negative error code on failure. 218 */ 219int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, 220 struct drm_device *drm, 221 struct drm_mode_create_dumb *args) 222{ 223 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 224 struct drm_gem_cma_object *cma_obj; 225 226 if (args->pitch < min_pitch) 227 args->pitch = min_pitch; 228 229 if (args->size < args->pitch * args->height) 230 args->size = args->pitch * args->height; 231 232 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size, 233 &args->handle); 234 return PTR_ERR_OR_ZERO(cma_obj); 235} 236EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal); 237 238/** 239 * drm_gem_cma_dumb_create - create a dumb buffer object 240 * @file_priv: DRM file-private structure to create the dumb buffer for 241 * @drm: DRM device 242 * @args: IOCTL data 243 * 244 * This function computes the pitch of the dumb buffer and rounds it up to an 245 * integer number of bytes per pixel. Drivers for hardware that doesn't have 246 * any additional restrictions on the pitch can directly use this function as 247 * their ->dumb_create() callback. 248 * 249 * For hardware with additional restrictions, drivers can adjust the fields 250 * set up by userspace and pass the IOCTL data along to the 251 * drm_gem_cma_dumb_create_internal() function. 252 * 253 * Returns: 254 * 0 on success or a negative error code on failure. 255 */ 256int drm_gem_cma_dumb_create(struct drm_file *file_priv, 257 struct drm_device *drm, 258 struct drm_mode_create_dumb *args) 259{ 260 struct drm_gem_cma_object *cma_obj; 261 262 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 263 args->size = args->pitch * args->height; 264 265 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size, 266 &args->handle); 267 return PTR_ERR_OR_ZERO(cma_obj); 268} 269EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); 270 271/** 272 * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM 273 * object 274 * @file_priv: DRM file-private structure containing the GEM object 275 * @drm: DRM device 276 * @handle: GEM object handle 277 * @offset: return location for the fake mmap offset 278 * 279 * This function look up an object by its handle and returns the fake mmap 280 * offset associated with it. Drivers using the CMA helpers should set this 281 * as their DRM driver's ->dumb_map_offset() callback. 282 * 283 * Returns: 284 * 0 on success or a negative error code on failure. 285 */ 286int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, 287 struct drm_device *drm, u32 handle, 288 u64 *offset) 289{ 290 struct drm_gem_object *gem_obj; 291 292 gem_obj = drm_gem_object_lookup(drm, file_priv, handle); 293 if (!gem_obj) { 294 dev_err(drm->dev, "failed to lookup GEM object\n"); 295 return -EINVAL; 296 } 297 298 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 299 300 drm_gem_object_unreference_unlocked(gem_obj); 301 302 return 0; 303} 304EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset); 305 306const struct vm_operations_struct drm_gem_cma_vm_ops = { 307 .open = drm_gem_vm_open, 308 .close = drm_gem_vm_close, 309}; 310EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops); 311 312static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj, 313 struct vm_area_struct *vma) 314{ 315 int ret; 316 317 /* 318 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the 319 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map 320 * the whole buffer. 321 */ 322 vma->vm_flags &= ~VM_PFNMAP; 323 vma->vm_pgoff = 0; 324 325 ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma, 326 cma_obj->vaddr, cma_obj->paddr, 327 vma->vm_end - vma->vm_start); 328 if (ret) 329 drm_gem_vm_close(vma); 330 331 return ret; 332} 333 334/** 335 * drm_gem_cma_mmap - memory-map a CMA GEM object 336 * @filp: file object 337 * @vma: VMA for the area to be mapped 338 * 339 * This function implements an augmented version of the GEM DRM file mmap 340 * operation for CMA objects: In addition to the usual GEM VMA setup it 341 * immediately faults in the entire object instead of using on-demaind 342 * faulting. Drivers which employ the CMA helpers should use this function 343 * as their ->mmap() handler in the DRM device file's file_operations 344 * structure. 345 * 346 * Returns: 347 * 0 on success or a negative error code on failure. 348 */ 349int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) 350{ 351 struct drm_gem_cma_object *cma_obj; 352 struct drm_gem_object *gem_obj; 353 int ret; 354 355 ret = drm_gem_mmap(filp, vma); 356 if (ret) 357 return ret; 358 359 gem_obj = vma->vm_private_data; 360 cma_obj = to_drm_gem_cma_obj(gem_obj); 361 362 return drm_gem_cma_mmap_obj(cma_obj, vma); 363} 364EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); 365 366#ifdef CONFIG_DEBUG_FS 367/** 368 * drm_gem_cma_describe - describe a CMA GEM object for debugfs 369 * @cma_obj: CMA GEM object 370 * @m: debugfs file handle 371 * 372 * This function can be used to dump a human-readable representation of the 373 * CMA GEM object into a synthetic file. 374 */ 375void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, 376 struct seq_file *m) 377{ 378 struct drm_gem_object *obj = &cma_obj->base; 379 uint64_t off; 380 381 off = drm_vma_node_start(&obj->vma_node); 382 383 seq_printf(m, "%2d (%2d) %08llx %pad %p %zu", 384 obj->name, obj->refcount.refcount.counter, 385 off, &cma_obj->paddr, cma_obj->vaddr, obj->size); 386 387 seq_printf(m, "\n"); 388} 389EXPORT_SYMBOL_GPL(drm_gem_cma_describe); 390#endif 391 392/** 393 * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned 394 * pages for a CMA GEM object 395 * @obj: GEM object 396 * 397 * This function exports a scatter/gather table suitable for PRIME usage by 398 * calling the standard DMA mapping API. Drivers using the CMA helpers should 399 * set this as their DRM driver's ->gem_prime_get_sg_table() callback. 400 * 401 * Returns: 402 * A pointer to the scatter/gather table of pinned pages or NULL on failure. 403 */ 404struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) 405{ 406 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 407 struct sg_table *sgt; 408 int ret; 409 410 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 411 if (!sgt) 412 return NULL; 413 414 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr, 415 cma_obj->paddr, obj->size); 416 if (ret < 0) 417 goto out; 418 419 return sgt; 420 421out: 422 kfree(sgt); 423 return NULL; 424} 425EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table); 426 427/** 428 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another 429 * driver's scatter/gather table of pinned pages 430 * @dev: device to import into 431 * @attach: DMA-BUF attachment 432 * @sgt: scatter/gather table of pinned pages 433 * 434 * This function imports a scatter/gather table exported via DMA-BUF by 435 * another driver. Imported buffers must be physically contiguous in memory 436 * (i.e. the scatter/gather table must contain a single entry). Drivers that 437 * use the CMA helpers should set this as their DRM driver's 438 * ->gem_prime_import_sg_table() callback. 439 * 440 * Returns: 441 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 442 * error code on failure. 443 */ 444struct drm_gem_object * 445drm_gem_cma_prime_import_sg_table(struct drm_device *dev, 446 struct dma_buf_attachment *attach, 447 struct sg_table *sgt) 448{ 449 struct drm_gem_cma_object *cma_obj; 450 451 if (sgt->nents != 1) 452 return ERR_PTR(-EINVAL); 453 454 /* Create a CMA GEM buffer. */ 455 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size); 456 if (IS_ERR(cma_obj)) 457 return ERR_CAST(cma_obj); 458 459 cma_obj->paddr = sg_dma_address(sgt->sgl); 460 cma_obj->sgt = sgt; 461 462 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size); 463 464 return &cma_obj->base; 465} 466EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); 467 468/** 469 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object 470 * @obj: GEM object 471 * @vma: VMA for the area to be mapped 472 * 473 * This function maps a buffer imported via DRM PRIME into a userspace 474 * process's address space. Drivers that use the CMA helpers should set this 475 * as their DRM driver's ->gem_prime_mmap() callback. 476 * 477 * Returns: 478 * 0 on success or a negative error code on failure. 479 */ 480int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, 481 struct vm_area_struct *vma) 482{ 483 struct drm_gem_cma_object *cma_obj; 484 int ret; 485 486 ret = drm_gem_mmap_obj(obj, obj->size, vma); 487 if (ret < 0) 488 return ret; 489 490 cma_obj = to_drm_gem_cma_obj(obj); 491 return drm_gem_cma_mmap_obj(cma_obj, vma); 492} 493EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); 494 495/** 496 * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual 497 * address space 498 * @obj: GEM object 499 * 500 * This function maps a buffer exported via DRM PRIME into the kernel's 501 * virtual address space. Since the CMA buffers are already mapped into the 502 * kernel virtual address space this simply returns the cached virtual 503 * address. Drivers using the CMA helpers should set this as their DRM 504 * driver's ->gem_prime_vmap() callback. 505 * 506 * Returns: 507 * The kernel virtual address of the CMA GEM object's backing store. 508 */ 509void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) 510{ 511 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 512 513 return cma_obj->vaddr; 514} 515EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); 516 517/** 518 * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual 519 * address space 520 * @obj: GEM object 521 * @vaddr: kernel virtual address where the CMA GEM object was mapped 522 * 523 * This function removes a buffer exported via DRM PRIME from the kernel's 524 * virtual address space. This is a no-op because CMA buffers cannot be 525 * unmapped from kernel space. Drivers using the CMA helpers should set this 526 * as their DRM driver's ->gem_prime_vunmap() callback. 527 */ 528void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 529{ 530 /* Nothing to do */ 531} 532EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap); 533