root/drivers/gpu/drm/panfrost/panfrost_gem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. panfrost_gem_free_object
  2. panfrost_gem_mapping_get
  3. panfrost_gem_teardown_mapping
  4. panfrost_gem_mapping_release
  5. panfrost_gem_mapping_put
  6. panfrost_gem_teardown_mappings
  7. panfrost_gem_open
  8. panfrost_gem_close
  9. panfrost_gem_pin
  10. panfrost_gem_create_object
  11. panfrost_gem_create_with_handle
  12. panfrost_gem_prime_import_sg_table

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
   3 
   4 #include <linux/err.h>
   5 #include <linux/slab.h>
   6 #include <linux/dma-buf.h>
   7 #include <linux/dma-mapping.h>
   8 
   9 #include <drm/panfrost_drm.h>
  10 #include "panfrost_device.h"
  11 #include "panfrost_gem.h"
  12 #include "panfrost_mmu.h"
  13 
  14 /* Called DRM core on the last userspace/kernel unreference of the
  15  * BO.
  16  */
  17 static void panfrost_gem_free_object(struct drm_gem_object *obj)
  18 {
  19         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
  20         struct panfrost_device *pfdev = obj->dev->dev_private;
  21 
  22         /*
  23          * Make sure the BO is no longer inserted in the shrinker list before
  24          * taking care of the destruction itself. If we don't do that we have a
  25          * race condition between this function and what's done in
  26          * panfrost_gem_shrinker_scan().
  27          */
  28         mutex_lock(&pfdev->shrinker_lock);
  29         list_del_init(&bo->base.madv_list);
  30         mutex_unlock(&pfdev->shrinker_lock);
  31 
  32         /*
  33          * If we still have mappings attached to the BO, there's a problem in
  34          * our refcounting.
  35          */
  36         WARN_ON_ONCE(!list_empty(&bo->mappings.list));
  37 
  38         if (bo->sgts) {
  39                 int i;
  40                 int n_sgt = bo->base.base.size / SZ_2M;
  41 
  42                 for (i = 0; i < n_sgt; i++) {
  43                         if (bo->sgts[i].sgl) {
  44                                 dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
  45                                              bo->sgts[i].nents, DMA_BIDIRECTIONAL);
  46                                 sg_free_table(&bo->sgts[i]);
  47                         }
  48                 }
  49                 kfree(bo->sgts);
  50         }
  51 
  52         drm_gem_shmem_free_object(obj);
  53 }
  54 
  55 struct panfrost_gem_mapping *
  56 panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
  57                          struct panfrost_file_priv *priv)
  58 {
  59         struct panfrost_gem_mapping *iter, *mapping = NULL;
  60 
  61         mutex_lock(&bo->mappings.lock);
  62         list_for_each_entry(iter, &bo->mappings.list, node) {
  63                 if (iter->mmu == &priv->mmu) {
  64                         kref_get(&iter->refcount);
  65                         mapping = iter;
  66                         break;
  67                 }
  68         }
  69         mutex_unlock(&bo->mappings.lock);
  70 
  71         return mapping;
  72 }
  73 
  74 static void
  75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
  76 {
  77         struct panfrost_file_priv *priv;
  78 
  79         if (mapping->active)
  80                 panfrost_mmu_unmap(mapping);
  81 
  82         priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
  83         spin_lock(&priv->mm_lock);
  84         if (drm_mm_node_allocated(&mapping->mmnode))
  85                 drm_mm_remove_node(&mapping->mmnode);
  86         spin_unlock(&priv->mm_lock);
  87 }
  88 
  89 static void panfrost_gem_mapping_release(struct kref *kref)
  90 {
  91         struct panfrost_gem_mapping *mapping;
  92 
  93         mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
  94 
  95         panfrost_gem_teardown_mapping(mapping);
  96         drm_gem_object_put_unlocked(&mapping->obj->base.base);
  97         kfree(mapping);
  98 }
  99 
 100 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
 101 {
 102         if (!mapping)
 103                 return;
 104 
 105         kref_put(&mapping->refcount, panfrost_gem_mapping_release);
 106 }
 107 
 108 void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
 109 {
 110         struct panfrost_gem_mapping *mapping;
 111 
 112         mutex_lock(&bo->mappings.lock);
 113         list_for_each_entry(mapping, &bo->mappings.list, node)
 114                 panfrost_gem_teardown_mapping(mapping);
 115         mutex_unlock(&bo->mappings.lock);
 116 }
 117 
 118 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 119 {
 120         int ret;
 121         size_t size = obj->size;
 122         u64 align;
 123         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
 124         unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
 125         struct panfrost_file_priv *priv = file_priv->driver_priv;
 126         struct panfrost_gem_mapping *mapping;
 127 
 128         mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
 129         if (!mapping)
 130                 return -ENOMEM;
 131 
 132         INIT_LIST_HEAD(&mapping->node);
 133         kref_init(&mapping->refcount);
 134         drm_gem_object_get(obj);
 135         mapping->obj = bo;
 136 
 137         /*
 138          * Executable buffers cannot cross a 16MB boundary as the program
 139          * counter is 24-bits. We assume executable buffers will be less than
 140          * 16MB and aligning executable buffers to their size will avoid
 141          * crossing a 16MB boundary.
 142          */
 143         if (!bo->noexec)
 144                 align = size >> PAGE_SHIFT;
 145         else
 146                 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
 147 
 148         mapping->mmu = &priv->mmu;
 149         spin_lock(&priv->mm_lock);
 150         ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
 151                                          size >> PAGE_SHIFT, align, color, 0);
 152         spin_unlock(&priv->mm_lock);
 153         if (ret)
 154                 goto err;
 155 
 156         if (!bo->is_heap) {
 157                 ret = panfrost_mmu_map(mapping);
 158                 if (ret)
 159                         goto err;
 160         }
 161 
 162         mutex_lock(&bo->mappings.lock);
 163         WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
 164         list_add_tail(&mapping->node, &bo->mappings.list);
 165         mutex_unlock(&bo->mappings.lock);
 166 
 167 err:
 168         if (ret)
 169                 panfrost_gem_mapping_put(mapping);
 170         return ret;
 171 }
 172 
 173 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
 174 {
 175         struct panfrost_file_priv *priv = file_priv->driver_priv;
 176         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
 177         struct panfrost_gem_mapping *mapping = NULL, *iter;
 178 
 179         mutex_lock(&bo->mappings.lock);
 180         list_for_each_entry(iter, &bo->mappings.list, node) {
 181                 if (iter->mmu == &priv->mmu) {
 182                         mapping = iter;
 183                         list_del(&iter->node);
 184                         break;
 185                 }
 186         }
 187         mutex_unlock(&bo->mappings.lock);
 188 
 189         panfrost_gem_mapping_put(mapping);
 190 }
 191 
 192 static int panfrost_gem_pin(struct drm_gem_object *obj)
 193 {
 194         if (to_panfrost_bo(obj)->is_heap)
 195                 return -EINVAL;
 196 
 197         return drm_gem_shmem_pin(obj);
 198 }
 199 
 200 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
 201         .free = panfrost_gem_free_object,
 202         .open = panfrost_gem_open,
 203         .close = panfrost_gem_close,
 204         .print_info = drm_gem_shmem_print_info,
 205         .pin = panfrost_gem_pin,
 206         .unpin = drm_gem_shmem_unpin,
 207         .get_sg_table = drm_gem_shmem_get_sg_table,
 208         .vmap = drm_gem_shmem_vmap,
 209         .vunmap = drm_gem_shmem_vunmap,
 210         .vm_ops = &drm_gem_shmem_vm_ops,
 211 };
 212 
 213 /**
 214  * panfrost_gem_create_object - Implementation of driver->gem_create_object.
 215  * @dev: DRM device
 216  * @size: Size in bytes of the memory the object will reference
 217  *
 218  * This lets the GEM helpers allocate object structs for us, and keep
 219  * our BO stats correct.
 220  */
 221 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
 222 {
 223         struct panfrost_gem_object *obj;
 224 
 225         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 226         if (!obj)
 227                 return NULL;
 228 
 229         INIT_LIST_HEAD(&obj->mappings.list);
 230         mutex_init(&obj->mappings.lock);
 231         obj->base.base.funcs = &panfrost_gem_funcs;
 232 
 233         return &obj->base.base;
 234 }
 235 
 236 struct panfrost_gem_object *
 237 panfrost_gem_create_with_handle(struct drm_file *file_priv,
 238                                 struct drm_device *dev, size_t size,
 239                                 u32 flags,
 240                                 uint32_t *handle)
 241 {
 242         int ret;
 243         struct drm_gem_shmem_object *shmem;
 244         struct panfrost_gem_object *bo;
 245 
 246         /* Round up heap allocations to 2MB to keep fault handling simple */
 247         if (flags & PANFROST_BO_HEAP)
 248                 size = roundup(size, SZ_2M);
 249 
 250         shmem = drm_gem_shmem_create(dev, size);
 251         if (IS_ERR(shmem))
 252                 return ERR_CAST(shmem);
 253 
 254         bo = to_panfrost_bo(&shmem->base);
 255         bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
 256         bo->is_heap = !!(flags & PANFROST_BO_HEAP);
 257 
 258         /*
 259          * Allocate an id of idr table where the obj is registered
 260          * and handle has the id what user can see.
 261          */
 262         ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
 263         /* drop reference from allocate - handle holds it now. */
 264         drm_gem_object_put_unlocked(&shmem->base);
 265         if (ret)
 266                 return ERR_PTR(ret);
 267 
 268         return bo;
 269 }
 270 
 271 struct drm_gem_object *
 272 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
 273                                    struct dma_buf_attachment *attach,
 274                                    struct sg_table *sgt)
 275 {
 276         struct drm_gem_object *obj;
 277         struct panfrost_gem_object *bo;
 278 
 279         obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
 280         if (IS_ERR(obj))
 281                 return ERR_CAST(obj);
 282 
 283         bo = to_panfrost_bo(obj);
 284         bo->noexec = true;
 285 
 286         return obj;
 287 }

/* [<][>][^][v][top][bottom][index][help] */