root/drivers/gpu/drm/vc4/vc4_bo.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_user_label
  2. vc4_bo_stats_print
  3. vc4_bo_stats_debugfs
  4. vc4_get_user_label
  5. vc4_bo_set_label
  6. bo_page_index
  7. vc4_bo_destroy
  8. vc4_bo_remove_from_cache
  9. vc4_get_cache_list_for_size
  10. vc4_bo_cache_purge
  11. vc4_bo_add_to_purgeable_pool
  12. vc4_bo_remove_from_purgeable_pool_locked
  13. vc4_bo_remove_from_purgeable_pool
  14. vc4_bo_purge
  15. vc4_bo_userspace_cache_purge
  16. vc4_bo_get_from_cache
  17. vc4_create_object
  18. vc4_bo_create
  19. vc4_dumb_create
  20. vc4_bo_cache_free_old
  21. vc4_free_object
  22. vc4_bo_cache_time_work
  23. vc4_bo_inc_usecnt
  24. vc4_bo_dec_usecnt
  25. vc4_bo_cache_time_timer
  26. vc4_prime_export
  27. vc4_fault
  28. vc4_mmap
  29. vc4_prime_mmap
  30. vc4_prime_vmap
  31. vc4_prime_import_sg_table
  32. vc4_grab_bin_bo
  33. vc4_create_bo_ioctl
  34. vc4_mmap_bo_ioctl
  35. vc4_create_shader_bo_ioctl
  36. vc4_set_tiling_ioctl
  37. vc4_get_tiling_ioctl
  38. vc4_bo_cache_init
  39. vc4_bo_cache_destroy
  40. vc4_label_bo_ioctl

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  Copyright © 2015 Broadcom
   4  */
   5 
   6 /**
   7  * DOC: VC4 GEM BO management support
   8  *
   9  * The VC4 GPU architecture (both scanout and rendering) has direct
  10  * access to system memory with no MMU in between.  To support it, we
  11  * use the GEM CMA helper functions to allocate contiguous ranges of
  12  * physical memory for our BOs.
  13  *
  14  * Since the CMA allocator is very slow, we keep a cache of recently
  15  * freed BOs around so that the kernel's allocation of objects for 3D
  16  * rendering can return quickly.
  17  */
  18 
  19 #include <linux/dma-buf.h>
  20 
  21 #include "vc4_drv.h"
  22 #include "uapi/drm/vc4_drm.h"
  23 
  24 static const char * const bo_type_names[] = {
  25         "kernel",
  26         "V3D",
  27         "V3D shader",
  28         "dumb",
  29         "binner",
  30         "RCL",
  31         "BCL",
  32         "kernel BO cache",
  33 };
  34 
  35 static bool is_user_label(int label)
  36 {
  37         return label >= VC4_BO_TYPE_COUNT;
  38 }
  39 
  40 static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
  41 {
  42         int i;
  43 
  44         for (i = 0; i < vc4->num_labels; i++) {
  45                 if (!vc4->bo_labels[i].num_allocated)
  46                         continue;
  47 
  48                 drm_printf(p, "%30s: %6dkb BOs (%d)\n",
  49                            vc4->bo_labels[i].name,
  50                            vc4->bo_labels[i].size_allocated / 1024,
  51                            vc4->bo_labels[i].num_allocated);
  52         }
  53 
  54         mutex_lock(&vc4->purgeable.lock);
  55         if (vc4->purgeable.num)
  56                 drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
  57                            vc4->purgeable.size / 1024, vc4->purgeable.num);
  58 
  59         if (vc4->purgeable.purged_num)
  60                 drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
  61                            vc4->purgeable.purged_size / 1024,
  62                            vc4->purgeable.purged_num);
  63         mutex_unlock(&vc4->purgeable.lock);
  64 }
  65 
  66 static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
  67 {
  68         struct drm_info_node *node = (struct drm_info_node *)m->private;
  69         struct drm_device *dev = node->minor->dev;
  70         struct vc4_dev *vc4 = to_vc4_dev(dev);
  71         struct drm_printer p = drm_seq_file_printer(m);
  72 
  73         vc4_bo_stats_print(&p, vc4);
  74 
  75         return 0;
  76 }
  77 
  78 /* Takes ownership of *name and returns the appropriate slot for it in
  79  * the bo_labels[] array, extending it as necessary.
  80  *
  81  * This is inefficient and could use a hash table instead of walking
  82  * an array and strcmp()ing.  However, the assumption is that user
  83  * labeling will be infrequent (scanout buffers and other long-lived
  84  * objects, or debug driver builds), so we can live with it for now.
  85  */
  86 static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
  87 {
  88         int i;
  89         int free_slot = -1;
  90 
  91         for (i = 0; i < vc4->num_labels; i++) {
  92                 if (!vc4->bo_labels[i].name) {
  93                         free_slot = i;
  94                 } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
  95                         kfree(name);
  96                         return i;
  97                 }
  98         }
  99 
 100         if (free_slot != -1) {
 101                 WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
 102                 vc4->bo_labels[free_slot].name = name;
 103                 return free_slot;
 104         } else {
 105                 u32 new_label_count = vc4->num_labels + 1;
 106                 struct vc4_label *new_labels =
 107                         krealloc(vc4->bo_labels,
 108                                  new_label_count * sizeof(*new_labels),
 109                                  GFP_KERNEL);
 110 
 111                 if (!new_labels) {
 112                         kfree(name);
 113                         return -1;
 114                 }
 115 
 116                 free_slot = vc4->num_labels;
 117                 vc4->bo_labels = new_labels;
 118                 vc4->num_labels = new_label_count;
 119 
 120                 vc4->bo_labels[free_slot].name = name;
 121                 vc4->bo_labels[free_slot].num_allocated = 0;
 122                 vc4->bo_labels[free_slot].size_allocated = 0;
 123 
 124                 return free_slot;
 125         }
 126 }
 127 
 128 static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
 129 {
 130         struct vc4_bo *bo = to_vc4_bo(gem_obj);
 131         struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
 132 
 133         lockdep_assert_held(&vc4->bo_lock);
 134 
 135         if (label != -1) {
 136                 vc4->bo_labels[label].num_allocated++;
 137                 vc4->bo_labels[label].size_allocated += gem_obj->size;
 138         }
 139 
 140         vc4->bo_labels[bo->label].num_allocated--;
 141         vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
 142 
 143         if (vc4->bo_labels[bo->label].num_allocated == 0 &&
 144             is_user_label(bo->label)) {
 145                 /* Free user BO label slots on last unreference.
 146                  * Slots are just where we track the stats for a given
 147                  * name, and once a name is unused we can reuse that
 148                  * slot.
 149                  */
 150                 kfree(vc4->bo_labels[bo->label].name);
 151                 vc4->bo_labels[bo->label].name = NULL;
 152         }
 153 
 154         bo->label = label;
 155 }
 156 
 157 static uint32_t bo_page_index(size_t size)
 158 {
 159         return (size / PAGE_SIZE) - 1;
 160 }
 161 
 162 static void vc4_bo_destroy(struct vc4_bo *bo)
 163 {
 164         struct drm_gem_object *obj = &bo->base.base;
 165         struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
 166 
 167         lockdep_assert_held(&vc4->bo_lock);
 168 
 169         vc4_bo_set_label(obj, -1);
 170 
 171         if (bo->validated_shader) {
 172                 kfree(bo->validated_shader->uniform_addr_offsets);
 173                 kfree(bo->validated_shader->texture_samples);
 174                 kfree(bo->validated_shader);
 175                 bo->validated_shader = NULL;
 176         }
 177 
 178         drm_gem_cma_free_object(obj);
 179 }
 180 
 181 static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
 182 {
 183         struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 184 
 185         lockdep_assert_held(&vc4->bo_lock);
 186         list_del(&bo->unref_head);
 187         list_del(&bo->size_head);
 188 }
 189 
 190 static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
 191                                                      size_t size)
 192 {
 193         struct vc4_dev *vc4 = to_vc4_dev(dev);
 194         uint32_t page_index = bo_page_index(size);
 195 
 196         if (vc4->bo_cache.size_list_size <= page_index) {
 197                 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
 198                                         page_index + 1);
 199                 struct list_head *new_list;
 200                 uint32_t i;
 201 
 202                 new_list = kmalloc_array(new_size, sizeof(struct list_head),
 203                                          GFP_KERNEL);
 204                 if (!new_list)
 205                         return NULL;
 206 
 207                 /* Rebase the old cached BO lists to their new list
 208                  * head locations.
 209                  */
 210                 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
 211                         struct list_head *old_list =
 212                                 &vc4->bo_cache.size_list[i];
 213 
 214                         if (list_empty(old_list))
 215                                 INIT_LIST_HEAD(&new_list[i]);
 216                         else
 217                                 list_replace(old_list, &new_list[i]);
 218                 }
 219                 /* And initialize the brand new BO list heads. */
 220                 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
 221                         INIT_LIST_HEAD(&new_list[i]);
 222 
 223                 kfree(vc4->bo_cache.size_list);
 224                 vc4->bo_cache.size_list = new_list;
 225                 vc4->bo_cache.size_list_size = new_size;
 226         }
 227 
 228         return &vc4->bo_cache.size_list[page_index];
 229 }
 230 
 231 static void vc4_bo_cache_purge(struct drm_device *dev)
 232 {
 233         struct vc4_dev *vc4 = to_vc4_dev(dev);
 234 
 235         mutex_lock(&vc4->bo_lock);
 236         while (!list_empty(&vc4->bo_cache.time_list)) {
 237                 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
 238                                                     struct vc4_bo, unref_head);
 239                 vc4_bo_remove_from_cache(bo);
 240                 vc4_bo_destroy(bo);
 241         }
 242         mutex_unlock(&vc4->bo_lock);
 243 }
 244 
 245 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
 246 {
 247         struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 248 
 249         mutex_lock(&vc4->purgeable.lock);
 250         list_add_tail(&bo->size_head, &vc4->purgeable.list);
 251         vc4->purgeable.num++;
 252         vc4->purgeable.size += bo->base.base.size;
 253         mutex_unlock(&vc4->purgeable.lock);
 254 }
 255 
 256 static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
 257 {
 258         struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 259 
 260         /* list_del_init() is used here because the caller might release
 261          * the purgeable lock in order to acquire the madv one and update the
 262          * madv status.
 263          * During this short period of time a user might decide to mark
 264          * the BO as unpurgeable, and if bo->madv is set to
 265          * VC4_MADV_DONTNEED it will try to remove the BO from the
 266          * purgeable list which will fail if the ->next/prev fields
 267          * are set to LIST_POISON1/LIST_POISON2 (which is what
 268          * list_del() does).
 269          * Re-initializing the list element guarantees that list_del()
 270          * will work correctly even if it's a NOP.
 271          */
 272         list_del_init(&bo->size_head);
 273         vc4->purgeable.num--;
 274         vc4->purgeable.size -= bo->base.base.size;
 275 }
 276 
 277 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
 278 {
 279         struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 280 
 281         mutex_lock(&vc4->purgeable.lock);
 282         vc4_bo_remove_from_purgeable_pool_locked(bo);
 283         mutex_unlock(&vc4->purgeable.lock);
 284 }
 285 
 286 static void vc4_bo_purge(struct drm_gem_object *obj)
 287 {
 288         struct vc4_bo *bo = to_vc4_bo(obj);
 289         struct drm_device *dev = obj->dev;
 290 
 291         WARN_ON(!mutex_is_locked(&bo->madv_lock));
 292         WARN_ON(bo->madv != VC4_MADV_DONTNEED);
 293 
 294         drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 295 
 296         dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
 297         bo->base.vaddr = NULL;
 298         bo->madv = __VC4_MADV_PURGED;
 299 }
 300 
 301 static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
 302 {
 303         struct vc4_dev *vc4 = to_vc4_dev(dev);
 304 
 305         mutex_lock(&vc4->purgeable.lock);
 306         while (!list_empty(&vc4->purgeable.list)) {
 307                 struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
 308                                                      struct vc4_bo, size_head);
 309                 struct drm_gem_object *obj = &bo->base.base;
 310                 size_t purged_size = 0;
 311 
 312                 vc4_bo_remove_from_purgeable_pool_locked(bo);
 313 
 314                 /* Release the purgeable lock while we're purging the BO so
 315                  * that other people can continue inserting things in the
 316                  * purgeable pool without having to wait for all BOs to be
 317                  * purged.
 318                  */
 319                 mutex_unlock(&vc4->purgeable.lock);
 320                 mutex_lock(&bo->madv_lock);
 321 
 322                 /* Since we released the purgeable pool lock before acquiring
 323                  * the BO madv one, the user may have marked the BO as WILLNEED
 324                  * and re-used it in the meantime.
 325                  * Before purging the BO we need to make sure
 326                  * - it is still marked as DONTNEED
 327                  * - it has not been re-inserted in the purgeable list
 328                  * - it is not used by HW blocks
 329                  * If one of these conditions is not met, just skip the entry.
 330                  */
 331                 if (bo->madv == VC4_MADV_DONTNEED &&
 332                     list_empty(&bo->size_head) &&
 333                     !refcount_read(&bo->usecnt)) {
 334                         purged_size = bo->base.base.size;
 335                         vc4_bo_purge(obj);
 336                 }
 337                 mutex_unlock(&bo->madv_lock);
 338                 mutex_lock(&vc4->purgeable.lock);
 339 
 340                 if (purged_size) {
 341                         vc4->purgeable.purged_size += purged_size;
 342                         vc4->purgeable.purged_num++;
 343                 }
 344         }
 345         mutex_unlock(&vc4->purgeable.lock);
 346 }
 347 
 348 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
 349                                             uint32_t size,
 350                                             enum vc4_kernel_bo_type type)
 351 {
 352         struct vc4_dev *vc4 = to_vc4_dev(dev);
 353         uint32_t page_index = bo_page_index(size);
 354         struct vc4_bo *bo = NULL;
 355 
 356         size = roundup(size, PAGE_SIZE);
 357 
 358         mutex_lock(&vc4->bo_lock);
 359         if (page_index >= vc4->bo_cache.size_list_size)
 360                 goto out;
 361 
 362         if (list_empty(&vc4->bo_cache.size_list[page_index]))
 363                 goto out;
 364 
 365         bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
 366                               struct vc4_bo, size_head);
 367         vc4_bo_remove_from_cache(bo);
 368         kref_init(&bo->base.base.refcount);
 369 
 370 out:
 371         if (bo)
 372                 vc4_bo_set_label(&bo->base.base, type);
 373         mutex_unlock(&vc4->bo_lock);
 374         return bo;
 375 }
 376 
 377 /**
 378  * vc4_gem_create_object - Implementation of driver->gem_create_object.
 379  * @dev: DRM device
 380  * @size: Size in bytes of the memory the object will reference
 381  *
 382  * This lets the CMA helpers allocate object structs for us, and keep
 383  * our BO stats correct.
 384  */
 385 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
 386 {
 387         struct vc4_dev *vc4 = to_vc4_dev(dev);
 388         struct vc4_bo *bo;
 389 
 390         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 391         if (!bo)
 392                 return ERR_PTR(-ENOMEM);
 393 
 394         bo->madv = VC4_MADV_WILLNEED;
 395         refcount_set(&bo->usecnt, 0);
 396         mutex_init(&bo->madv_lock);
 397         mutex_lock(&vc4->bo_lock);
 398         bo->label = VC4_BO_TYPE_KERNEL;
 399         vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
 400         vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
 401         mutex_unlock(&vc4->bo_lock);
 402 
 403         return &bo->base.base;
 404 }
 405 
 406 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
 407                              bool allow_unzeroed, enum vc4_kernel_bo_type type)
 408 {
 409         size_t size = roundup(unaligned_size, PAGE_SIZE);
 410         struct vc4_dev *vc4 = to_vc4_dev(dev);
 411         struct drm_gem_cma_object *cma_obj;
 412         struct vc4_bo *bo;
 413 
 414         if (size == 0)
 415                 return ERR_PTR(-EINVAL);
 416 
 417         /* First, try to get a vc4_bo from the kernel BO cache. */
 418         bo = vc4_bo_get_from_cache(dev, size, type);
 419         if (bo) {
 420                 if (!allow_unzeroed)
 421                         memset(bo->base.vaddr, 0, bo->base.base.size);
 422                 return bo;
 423         }
 424 
 425         cma_obj = drm_gem_cma_create(dev, size);
 426         if (IS_ERR(cma_obj)) {
 427                 /*
 428                  * If we've run out of CMA memory, kill the cache of
 429                  * CMA allocations we've got laying around and try again.
 430                  */
 431                 vc4_bo_cache_purge(dev);
 432                 cma_obj = drm_gem_cma_create(dev, size);
 433         }
 434 
 435         if (IS_ERR(cma_obj)) {
 436                 /*
 437                  * Still not enough CMA memory, purge the userspace BO
 438                  * cache and retry.
 439                  * This is sub-optimal since we purge the whole userspace
 440                  * BO cache which forces user that want to re-use the BO to
 441                  * restore its initial content.
 442                  * Ideally, we should purge entries one by one and retry
 443                  * after each to see if CMA allocation succeeds. Or even
 444                  * better, try to find an entry with at least the same
 445                  * size.
 446                  */
 447                 vc4_bo_userspace_cache_purge(dev);
 448                 cma_obj = drm_gem_cma_create(dev, size);
 449         }
 450 
 451         if (IS_ERR(cma_obj)) {
 452                 struct drm_printer p = drm_info_printer(vc4->dev->dev);
 453                 DRM_ERROR("Failed to allocate from CMA:\n");
 454                 vc4_bo_stats_print(&p, vc4);
 455                 return ERR_PTR(-ENOMEM);
 456         }
 457         bo = to_vc4_bo(&cma_obj->base);
 458 
 459         /* By default, BOs do not support the MADV ioctl. This will be enabled
 460          * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
 461          * BOs).
 462          */
 463         bo->madv = __VC4_MADV_NOTSUPP;
 464 
 465         mutex_lock(&vc4->bo_lock);
 466         vc4_bo_set_label(&cma_obj->base, type);
 467         mutex_unlock(&vc4->bo_lock);
 468 
 469         return bo;
 470 }
 471 
 472 int vc4_dumb_create(struct drm_file *file_priv,
 473                     struct drm_device *dev,
 474                     struct drm_mode_create_dumb *args)
 475 {
 476         int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 477         struct vc4_bo *bo = NULL;
 478         int ret;
 479 
 480         if (args->pitch < min_pitch)
 481                 args->pitch = min_pitch;
 482 
 483         if (args->size < args->pitch * args->height)
 484                 args->size = args->pitch * args->height;
 485 
 486         bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
 487         if (IS_ERR(bo))
 488                 return PTR_ERR(bo);
 489 
 490         bo->madv = VC4_MADV_WILLNEED;
 491 
 492         ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 493         drm_gem_object_put_unlocked(&bo->base.base);
 494 
 495         return ret;
 496 }
 497 
 498 static void vc4_bo_cache_free_old(struct drm_device *dev)
 499 {
 500         struct vc4_dev *vc4 = to_vc4_dev(dev);
 501         unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
 502 
 503         lockdep_assert_held(&vc4->bo_lock);
 504 
 505         while (!list_empty(&vc4->bo_cache.time_list)) {
 506                 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
 507                                                     struct vc4_bo, unref_head);
 508                 if (time_before(expire_time, bo->free_time)) {
 509                         mod_timer(&vc4->bo_cache.time_timer,
 510                                   round_jiffies_up(jiffies +
 511                                                    msecs_to_jiffies(1000)));
 512                         return;
 513                 }
 514 
 515                 vc4_bo_remove_from_cache(bo);
 516                 vc4_bo_destroy(bo);
 517         }
 518 }
 519 
 520 /* Called on the last userspace/kernel unreference of the BO.  Returns
 521  * it to the BO cache if possible, otherwise frees it.
 522  */
 523 void vc4_free_object(struct drm_gem_object *gem_bo)
 524 {
 525         struct drm_device *dev = gem_bo->dev;
 526         struct vc4_dev *vc4 = to_vc4_dev(dev);
 527         struct vc4_bo *bo = to_vc4_bo(gem_bo);
 528         struct list_head *cache_list;
 529 
 530         /* Remove the BO from the purgeable list. */
 531         mutex_lock(&bo->madv_lock);
 532         if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
 533                 vc4_bo_remove_from_purgeable_pool(bo);
 534         mutex_unlock(&bo->madv_lock);
 535 
 536         mutex_lock(&vc4->bo_lock);
 537         /* If the object references someone else's memory, we can't cache it.
 538          */
 539         if (gem_bo->import_attach) {
 540                 vc4_bo_destroy(bo);
 541                 goto out;
 542         }
 543 
 544         /* Don't cache if it was publicly named. */
 545         if (gem_bo->name) {
 546                 vc4_bo_destroy(bo);
 547                 goto out;
 548         }
 549 
 550         /* If this object was partially constructed but CMA allocation
 551          * had failed, just free it. Can also happen when the BO has been
 552          * purged.
 553          */
 554         if (!bo->base.vaddr) {
 555                 vc4_bo_destroy(bo);
 556                 goto out;
 557         }
 558 
 559         cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
 560         if (!cache_list) {
 561                 vc4_bo_destroy(bo);
 562                 goto out;
 563         }
 564 
 565         if (bo->validated_shader) {
 566                 kfree(bo->validated_shader->uniform_addr_offsets);
 567                 kfree(bo->validated_shader->texture_samples);
 568                 kfree(bo->validated_shader);
 569                 bo->validated_shader = NULL;
 570         }
 571 
 572         /* Reset madv and usecnt before adding the BO to the cache. */
 573         bo->madv = __VC4_MADV_NOTSUPP;
 574         refcount_set(&bo->usecnt, 0);
 575 
 576         bo->t_format = false;
 577         bo->free_time = jiffies;
 578         list_add(&bo->size_head, cache_list);
 579         list_add(&bo->unref_head, &vc4->bo_cache.time_list);
 580 
 581         vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
 582 
 583         vc4_bo_cache_free_old(dev);
 584 
 585 out:
 586         mutex_unlock(&vc4->bo_lock);
 587 }
 588 
 589 static void vc4_bo_cache_time_work(struct work_struct *work)
 590 {
 591         struct vc4_dev *vc4 =
 592                 container_of(work, struct vc4_dev, bo_cache.time_work);
 593         struct drm_device *dev = vc4->dev;
 594 
 595         mutex_lock(&vc4->bo_lock);
 596         vc4_bo_cache_free_old(dev);
 597         mutex_unlock(&vc4->bo_lock);
 598 }
 599 
 600 int vc4_bo_inc_usecnt(struct vc4_bo *bo)
 601 {
 602         int ret;
 603 
 604         /* Fast path: if the BO is already retained by someone, no need to
 605          * check the madv status.
 606          */
 607         if (refcount_inc_not_zero(&bo->usecnt))
 608                 return 0;
 609 
 610         mutex_lock(&bo->madv_lock);
 611         switch (bo->madv) {
 612         case VC4_MADV_WILLNEED:
 613                 if (!refcount_inc_not_zero(&bo->usecnt))
 614                         refcount_set(&bo->usecnt, 1);
 615                 ret = 0;
 616                 break;
 617         case VC4_MADV_DONTNEED:
 618                 /* We shouldn't use a BO marked as purgeable if at least
 619                  * someone else retained its content by incrementing usecnt.
 620                  * Luckily the BO hasn't been purged yet, but something wrong
 621                  * is happening here. Just throw an error instead of
 622                  * authorizing this use case.
 623                  */
 624         case __VC4_MADV_PURGED:
 625                 /* We can't use a purged BO. */
 626         default:
 627                 /* Invalid madv value. */
 628                 ret = -EINVAL;
 629                 break;
 630         }
 631         mutex_unlock(&bo->madv_lock);
 632 
 633         return ret;
 634 }
 635 
 636 void vc4_bo_dec_usecnt(struct vc4_bo *bo)
 637 {
 638         /* Fast path: if the BO is still retained by someone, no need to test
 639          * the madv value.
 640          */
 641         if (refcount_dec_not_one(&bo->usecnt))
 642                 return;
 643 
 644         mutex_lock(&bo->madv_lock);
 645         if (refcount_dec_and_test(&bo->usecnt) &&
 646             bo->madv == VC4_MADV_DONTNEED)
 647                 vc4_bo_add_to_purgeable_pool(bo);
 648         mutex_unlock(&bo->madv_lock);
 649 }
 650 
 651 static void vc4_bo_cache_time_timer(struct timer_list *t)
 652 {
 653         struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
 654 
 655         schedule_work(&vc4->bo_cache.time_work);
 656 }
 657 
 658 struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
 659 {
 660         struct vc4_bo *bo = to_vc4_bo(obj);
 661         struct dma_buf *dmabuf;
 662         int ret;
 663 
 664         if (bo->validated_shader) {
 665                 DRM_DEBUG("Attempting to export shader BO\n");
 666                 return ERR_PTR(-EINVAL);
 667         }
 668 
 669         /* Note: as soon as the BO is exported it becomes unpurgeable, because
 670          * noone ever decrements the usecnt even if the reference held by the
 671          * exported BO is released. This shouldn't be a problem since we don't
 672          * expect exported BOs to be marked as purgeable.
 673          */
 674         ret = vc4_bo_inc_usecnt(bo);
 675         if (ret) {
 676                 DRM_ERROR("Failed to increment BO usecnt\n");
 677                 return ERR_PTR(ret);
 678         }
 679 
 680         dmabuf = drm_gem_prime_export(obj, flags);
 681         if (IS_ERR(dmabuf))
 682                 vc4_bo_dec_usecnt(bo);
 683 
 684         return dmabuf;
 685 }
 686 
 687 vm_fault_t vc4_fault(struct vm_fault *vmf)
 688 {
 689         struct vm_area_struct *vma = vmf->vma;
 690         struct drm_gem_object *obj = vma->vm_private_data;
 691         struct vc4_bo *bo = to_vc4_bo(obj);
 692 
 693         /* The only reason we would end up here is when user-space accesses
 694          * BO's memory after it's been purged.
 695          */
 696         mutex_lock(&bo->madv_lock);
 697         WARN_ON(bo->madv != __VC4_MADV_PURGED);
 698         mutex_unlock(&bo->madv_lock);
 699 
 700         return VM_FAULT_SIGBUS;
 701 }
 702 
 703 int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
 704 {
 705         struct drm_gem_object *gem_obj;
 706         unsigned long vm_pgoff;
 707         struct vc4_bo *bo;
 708         int ret;
 709 
 710         ret = drm_gem_mmap(filp, vma);
 711         if (ret)
 712                 return ret;
 713 
 714         gem_obj = vma->vm_private_data;
 715         bo = to_vc4_bo(gem_obj);
 716 
 717         if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
 718                 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
 719                 return -EINVAL;
 720         }
 721 
 722         if (bo->madv != VC4_MADV_WILLNEED) {
 723                 DRM_DEBUG("mmaping of %s BO not allowed\n",
 724                           bo->madv == VC4_MADV_DONTNEED ?
 725                           "purgeable" : "purged");
 726                 return -EINVAL;
 727         }
 728 
 729         /*
 730          * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
 731          * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 732          * the whole buffer.
 733          */
 734         vma->vm_flags &= ~VM_PFNMAP;
 735 
 736         /* This ->vm_pgoff dance is needed to make all parties happy:
 737          * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
 738          *   mem-region, hence the need to set it to zero (the value set by
 739          *   the DRM core is a virtual offset encoding the GEM object-id)
 740          * - the mmap() core logic needs ->vm_pgoff to be restored to its
 741          *   initial value before returning from this function because it
 742          *   encodes the  offset of this GEM in the dev->anon_inode pseudo-file
 743          *   and this information will be used when we invalidate userspace
 744          *   mappings  with drm_vma_node_unmap() (called from vc4_gem_purge()).
 745          */
 746         vm_pgoff = vma->vm_pgoff;
 747         vma->vm_pgoff = 0;
 748         ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
 749                           bo->base.paddr, vma->vm_end - vma->vm_start);
 750         vma->vm_pgoff = vm_pgoff;
 751 
 752         if (ret)
 753                 drm_gem_vm_close(vma);
 754 
 755         return ret;
 756 }
 757 
 758 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 759 {
 760         struct vc4_bo *bo = to_vc4_bo(obj);
 761 
 762         if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
 763                 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
 764                 return -EINVAL;
 765         }
 766 
 767         return drm_gem_cma_prime_mmap(obj, vma);
 768 }
 769 
 770 void *vc4_prime_vmap(struct drm_gem_object *obj)
 771 {
 772         struct vc4_bo *bo = to_vc4_bo(obj);
 773 
 774         if (bo->validated_shader) {
 775                 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
 776                 return ERR_PTR(-EINVAL);
 777         }
 778 
 779         return drm_gem_cma_prime_vmap(obj);
 780 }
 781 
 782 struct drm_gem_object *
 783 vc4_prime_import_sg_table(struct drm_device *dev,
 784                           struct dma_buf_attachment *attach,
 785                           struct sg_table *sgt)
 786 {
 787         struct drm_gem_object *obj;
 788 
 789         obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
 790         if (IS_ERR(obj))
 791                 return obj;
 792 
 793         return obj;
 794 }
 795 
 796 static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
 797 {
 798         int ret;
 799 
 800         if (!vc4->v3d)
 801                 return -ENODEV;
 802 
 803         if (vc4file->bin_bo_used)
 804                 return 0;
 805 
 806         ret = vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
 807         if (ret)
 808                 return ret;
 809 
 810         return 0;
 811 }
 812 
 813 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
 814                         struct drm_file *file_priv)
 815 {
 816         struct drm_vc4_create_bo *args = data;
 817         struct vc4_file *vc4file = file_priv->driver_priv;
 818         struct vc4_dev *vc4 = to_vc4_dev(dev);
 819         struct vc4_bo *bo = NULL;
 820         int ret;
 821 
 822         ret = vc4_grab_bin_bo(vc4, vc4file);
 823         if (ret)
 824                 return ret;
 825 
 826         /*
 827          * We can't allocate from the BO cache, because the BOs don't
 828          * get zeroed, and that might leak data between users.
 829          */
 830         bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
 831         if (IS_ERR(bo))
 832                 return PTR_ERR(bo);
 833 
 834         bo->madv = VC4_MADV_WILLNEED;
 835 
 836         ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 837         drm_gem_object_put_unlocked(&bo->base.base);
 838 
 839         return ret;
 840 }
 841 
 842 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
 843                       struct drm_file *file_priv)
 844 {
 845         struct drm_vc4_mmap_bo *args = data;
 846         struct drm_gem_object *gem_obj;
 847 
 848         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 849         if (!gem_obj) {
 850                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 851                 return -EINVAL;
 852         }
 853 
 854         /* The mmap offset was set up at BO allocation time. */
 855         args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 856 
 857         drm_gem_object_put_unlocked(gem_obj);
 858         return 0;
 859 }
 860 
 861 int
 862 vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
 863                            struct drm_file *file_priv)
 864 {
 865         struct drm_vc4_create_shader_bo *args = data;
 866         struct vc4_file *vc4file = file_priv->driver_priv;
 867         struct vc4_dev *vc4 = to_vc4_dev(dev);
 868         struct vc4_bo *bo = NULL;
 869         int ret;
 870 
 871         if (args->size == 0)
 872                 return -EINVAL;
 873 
 874         if (args->size % sizeof(u64) != 0)
 875                 return -EINVAL;
 876 
 877         if (args->flags != 0) {
 878                 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
 879                 return -EINVAL;
 880         }
 881 
 882         if (args->pad != 0) {
 883                 DRM_INFO("Pad set: 0x%08x\n", args->pad);
 884                 return -EINVAL;
 885         }
 886 
 887         ret = vc4_grab_bin_bo(vc4, vc4file);
 888         if (ret)
 889                 return ret;
 890 
 891         bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
 892         if (IS_ERR(bo))
 893                 return PTR_ERR(bo);
 894 
 895         bo->madv = VC4_MADV_WILLNEED;
 896 
 897         if (copy_from_user(bo->base.vaddr,
 898                              (void __user *)(uintptr_t)args->data,
 899                              args->size)) {
 900                 ret = -EFAULT;
 901                 goto fail;
 902         }
 903         /* Clear the rest of the memory from allocating from the BO
 904          * cache.
 905          */
 906         memset(bo->base.vaddr + args->size, 0,
 907                bo->base.base.size - args->size);
 908 
 909         bo->validated_shader = vc4_validate_shader(&bo->base);
 910         if (!bo->validated_shader) {
 911                 ret = -EINVAL;
 912                 goto fail;
 913         }
 914 
 915         /* We have to create the handle after validation, to avoid
 916          * races for users to do doing things like mmap the shader BO.
 917          */
 918         ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 919 
 920 fail:
 921         drm_gem_object_put_unlocked(&bo->base.base);
 922 
 923         return ret;
 924 }
 925 
 926 /**
 927  * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
 928  * @dev: DRM device
 929  * @data: ioctl argument
 930  * @file_priv: DRM file for this fd
 931  *
 932  * The tiling state of the BO decides the default modifier of an fb if
 933  * no specific modifier was set by userspace, and the return value of
 934  * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
 935  * received from dmabuf as the same tiling format as the producer
 936  * used).
 937  */
 938 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
 939                          struct drm_file *file_priv)
 940 {
 941         struct drm_vc4_set_tiling *args = data;
 942         struct drm_gem_object *gem_obj;
 943         struct vc4_bo *bo;
 944         bool t_format;
 945 
 946         if (args->flags != 0)
 947                 return -EINVAL;
 948 
 949         switch (args->modifier) {
 950         case DRM_FORMAT_MOD_NONE:
 951                 t_format = false;
 952                 break;
 953         case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
 954                 t_format = true;
 955                 break;
 956         default:
 957                 return -EINVAL;
 958         }
 959 
 960         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 961         if (!gem_obj) {
 962                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 963                 return -ENOENT;
 964         }
 965         bo = to_vc4_bo(gem_obj);
 966         bo->t_format = t_format;
 967 
 968         drm_gem_object_put_unlocked(gem_obj);
 969 
 970         return 0;
 971 }
 972 
 973 /**
 974  * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
 975  * @dev: DRM device
 976  * @data: ioctl argument
 977  * @file_priv: DRM file for this fd
 978  *
 979  * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
 980  */
 981 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
 982                          struct drm_file *file_priv)
 983 {
 984         struct drm_vc4_get_tiling *args = data;
 985         struct drm_gem_object *gem_obj;
 986         struct vc4_bo *bo;
 987 
 988         if (args->flags != 0 || args->modifier != 0)
 989                 return -EINVAL;
 990 
 991         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 992         if (!gem_obj) {
 993                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 994                 return -ENOENT;
 995         }
 996         bo = to_vc4_bo(gem_obj);
 997 
 998         if (bo->t_format)
 999                 args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1000         else
1001                 args->modifier = DRM_FORMAT_MOD_NONE;
1002 
1003         drm_gem_object_put_unlocked(gem_obj);
1004 
1005         return 0;
1006 }
1007 
1008 int vc4_bo_cache_init(struct drm_device *dev)
1009 {
1010         struct vc4_dev *vc4 = to_vc4_dev(dev);
1011         int i;
1012 
1013         /* Create the initial set of BO labels that the kernel will
1014          * use.  This lets us avoid a bunch of string reallocation in
1015          * the kernel's draw and BO allocation paths.
1016          */
1017         vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1018                                  GFP_KERNEL);
1019         if (!vc4->bo_labels)
1020                 return -ENOMEM;
1021         vc4->num_labels = VC4_BO_TYPE_COUNT;
1022 
1023         BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1024         for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1025                 vc4->bo_labels[i].name = bo_type_names[i];
1026 
1027         mutex_init(&vc4->bo_lock);
1028 
1029         vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
1030 
1031         INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1032 
1033         INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
1034         timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
1035 
1036         return 0;
1037 }
1038 
1039 void vc4_bo_cache_destroy(struct drm_device *dev)
1040 {
1041         struct vc4_dev *vc4 = to_vc4_dev(dev);
1042         int i;
1043 
1044         del_timer(&vc4->bo_cache.time_timer);
1045         cancel_work_sync(&vc4->bo_cache.time_work);
1046 
1047         vc4_bo_cache_purge(dev);
1048 
1049         for (i = 0; i < vc4->num_labels; i++) {
1050                 if (vc4->bo_labels[i].num_allocated) {
1051                         DRM_ERROR("Destroying BO cache with %d %s "
1052                                   "BOs still allocated\n",
1053                                   vc4->bo_labels[i].num_allocated,
1054                                   vc4->bo_labels[i].name);
1055                 }
1056 
1057                 if (is_user_label(i))
1058                         kfree(vc4->bo_labels[i].name);
1059         }
1060         kfree(vc4->bo_labels);
1061 }
1062 
1063 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1064                        struct drm_file *file_priv)
1065 {
1066         struct vc4_dev *vc4 = to_vc4_dev(dev);
1067         struct drm_vc4_label_bo *args = data;
1068         char *name;
1069         struct drm_gem_object *gem_obj;
1070         int ret = 0, label;
1071 
1072         if (!args->len)
1073                 return -EINVAL;
1074 
1075         name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1076         if (IS_ERR(name))
1077                 return PTR_ERR(name);
1078 
1079         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1080         if (!gem_obj) {
1081                 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1082                 kfree(name);
1083                 return -ENOENT;
1084         }
1085 
1086         mutex_lock(&vc4->bo_lock);
1087         label = vc4_get_user_label(vc4, name);
1088         if (label != -1)
1089                 vc4_bo_set_label(gem_obj, label);
1090         else
1091                 ret = -ENOMEM;
1092         mutex_unlock(&vc4->bo_lock);
1093 
1094         drm_gem_object_put_unlocked(gem_obj);
1095 
1096         return ret;
1097 }

/* [<][>][^][v][top][bottom][index][help] */