root/drivers/gpu/drm/panfrost/panfrost_drv.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. panfrost_ioctl_get_param
  2. panfrost_ioctl_create_bo
  3. panfrost_lookup_bos
  4. panfrost_copy_in_sync
  5. panfrost_ioctl_submit
  6. panfrost_ioctl_wait_bo
  7. panfrost_ioctl_mmap_bo
  8. panfrost_ioctl_get_bo_offset
  9. panfrost_ioctl_madvise
  10. panfrost_unstable_ioctl_check
  11. panfrost_drm_mm_color_adjust
  12. panfrost_open
  13. panfrost_postclose
  14. panfrost_probe
  15. panfrost_remove

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
   3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
   4 /* Copyright 2019 Collabora ltd. */
   5 
   6 #include <linux/module.h>
   7 #include <linux/of_platform.h>
   8 #include <linux/pagemap.h>
   9 #include <linux/pm_runtime.h>
  10 #include <drm/panfrost_drm.h>
  11 #include <drm/drm_drv.h>
  12 #include <drm/drm_ioctl.h>
  13 #include <drm/drm_syncobj.h>
  14 #include <drm/drm_utils.h>
  15 
  16 #include "panfrost_device.h"
  17 #include "panfrost_devfreq.h"
  18 #include "panfrost_gem.h"
  19 #include "panfrost_mmu.h"
  20 #include "panfrost_job.h"
  21 #include "panfrost_gpu.h"
  22 #include "panfrost_perfcnt.h"
  23 
  24 static bool unstable_ioctls;
  25 module_param_unsafe(unstable_ioctls, bool, 0600);
  26 
  27 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
  28 {
  29         struct drm_panfrost_get_param *param = data;
  30         struct panfrost_device *pfdev = ddev->dev_private;
  31 
  32         if (param->pad != 0)
  33                 return -EINVAL;
  34 
  35 #define PANFROST_FEATURE(name, member)                  \
  36         case DRM_PANFROST_PARAM_ ## name:               \
  37                 param->value = pfdev->features.member;  \
  38                 break
  39 #define PANFROST_FEATURE_ARRAY(name, member, max)                       \
  40         case DRM_PANFROST_PARAM_ ## name ## 0 ...                       \
  41                 DRM_PANFROST_PARAM_ ## name ## max:                     \
  42                 param->value = pfdev->features.member[param->param -    \
  43                         DRM_PANFROST_PARAM_ ## name ## 0];              \
  44                 break
  45 
  46         switch (param->param) {
  47                 PANFROST_FEATURE(GPU_PROD_ID, id);
  48                 PANFROST_FEATURE(GPU_REVISION, revision);
  49                 PANFROST_FEATURE(SHADER_PRESENT, shader_present);
  50                 PANFROST_FEATURE(TILER_PRESENT, tiler_present);
  51                 PANFROST_FEATURE(L2_PRESENT, l2_present);
  52                 PANFROST_FEATURE(STACK_PRESENT, stack_present);
  53                 PANFROST_FEATURE(AS_PRESENT, as_present);
  54                 PANFROST_FEATURE(JS_PRESENT, js_present);
  55                 PANFROST_FEATURE(L2_FEATURES, l2_features);
  56                 PANFROST_FEATURE(CORE_FEATURES, core_features);
  57                 PANFROST_FEATURE(TILER_FEATURES, tiler_features);
  58                 PANFROST_FEATURE(MEM_FEATURES, mem_features);
  59                 PANFROST_FEATURE(MMU_FEATURES, mmu_features);
  60                 PANFROST_FEATURE(THREAD_FEATURES, thread_features);
  61                 PANFROST_FEATURE(MAX_THREADS, max_threads);
  62                 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
  63                                 thread_max_workgroup_sz);
  64                 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
  65                                 thread_max_barrier_sz);
  66                 PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
  67                 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
  68                 PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
  69                 PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
  70                 PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
  71         default:
  72                 return -EINVAL;
  73         }
  74 
  75         return 0;
  76 }
  77 
  78 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
  79                 struct drm_file *file)
  80 {
  81         struct panfrost_file_priv *priv = file->driver_priv;
  82         struct panfrost_gem_object *bo;
  83         struct drm_panfrost_create_bo *args = data;
  84         struct panfrost_gem_mapping *mapping;
  85 
  86         if (!args->size || args->pad ||
  87             (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
  88                 return -EINVAL;
  89 
  90         /* Heaps should never be executable */
  91         if ((args->flags & PANFROST_BO_HEAP) &&
  92             !(args->flags & PANFROST_BO_NOEXEC))
  93                 return -EINVAL;
  94 
  95         bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
  96                                              &args->handle);
  97         if (IS_ERR(bo))
  98                 return PTR_ERR(bo);
  99 
 100         mapping = panfrost_gem_mapping_get(bo, priv);
 101         if (!mapping) {
 102                 drm_gem_object_put_unlocked(&bo->base.base);
 103                 return -EINVAL;
 104         }
 105 
 106         args->offset = mapping->mmnode.start << PAGE_SHIFT;
 107         panfrost_gem_mapping_put(mapping);
 108 
 109         return 0;
 110 }
 111 
 112 /**
 113  * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
 114  * referenced by the job.
 115  * @dev: DRM device
 116  * @file_priv: DRM file for this fd
 117  * @args: IOCTL args
 118  * @job: job being set up
 119  *
 120  * Resolve handles from userspace to BOs and attach them to job.
 121  *
 122  * Note that this function doesn't need to unreference the BOs on
 123  * failure, because that will happen at panfrost_job_cleanup() time.
 124  */
 125 static int
 126 panfrost_lookup_bos(struct drm_device *dev,
 127                   struct drm_file *file_priv,
 128                   struct drm_panfrost_submit *args,
 129                   struct panfrost_job *job)
 130 {
 131         struct panfrost_file_priv *priv = file_priv->driver_priv;
 132         struct panfrost_gem_object *bo;
 133         unsigned int i;
 134         int ret;
 135 
 136         job->bo_count = args->bo_handle_count;
 137 
 138         if (!job->bo_count)
 139                 return 0;
 140 
 141         job->implicit_fences = kvmalloc_array(job->bo_count,
 142                                   sizeof(struct dma_fence *),
 143                                   GFP_KERNEL | __GFP_ZERO);
 144         if (!job->implicit_fences)
 145                 return -ENOMEM;
 146 
 147         ret = drm_gem_objects_lookup(file_priv,
 148                                      (void __user *)(uintptr_t)args->bo_handles,
 149                                      job->bo_count, &job->bos);
 150         if (ret)
 151                 return ret;
 152 
 153         job->mappings = kvmalloc_array(job->bo_count,
 154                                        sizeof(struct panfrost_gem_mapping *),
 155                                        GFP_KERNEL | __GFP_ZERO);
 156         if (!job->mappings)
 157                 return -ENOMEM;
 158 
 159         for (i = 0; i < job->bo_count; i++) {
 160                 struct panfrost_gem_mapping *mapping;
 161 
 162                 bo = to_panfrost_bo(job->bos[i]);
 163                 mapping = panfrost_gem_mapping_get(bo, priv);
 164                 if (!mapping) {
 165                         ret = -EINVAL;
 166                         break;
 167                 }
 168 
 169                 atomic_inc(&bo->gpu_usecount);
 170                 job->mappings[i] = mapping;
 171         }
 172 
 173         return ret;
 174 }
 175 
 176 /**
 177  * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects
 178  * referenced by the job.
 179  * @dev: DRM device
 180  * @file_priv: DRM file for this fd
 181  * @args: IOCTL args
 182  * @job: job being set up
 183  *
 184  * Resolve syncobjs from userspace to fences and attach them to job.
 185  *
 186  * Note that this function doesn't need to unreference the fences on
 187  * failure, because that will happen at panfrost_job_cleanup() time.
 188  */
 189 static int
 190 panfrost_copy_in_sync(struct drm_device *dev,
 191                   struct drm_file *file_priv,
 192                   struct drm_panfrost_submit *args,
 193                   struct panfrost_job *job)
 194 {
 195         u32 *handles;
 196         int ret = 0;
 197         int i;
 198 
 199         job->in_fence_count = args->in_sync_count;
 200 
 201         if (!job->in_fence_count)
 202                 return 0;
 203 
 204         job->in_fences = kvmalloc_array(job->in_fence_count,
 205                                         sizeof(struct dma_fence *),
 206                                         GFP_KERNEL | __GFP_ZERO);
 207         if (!job->in_fences) {
 208                 DRM_DEBUG("Failed to allocate job in fences\n");
 209                 return -ENOMEM;
 210         }
 211 
 212         handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL);
 213         if (!handles) {
 214                 ret = -ENOMEM;
 215                 DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
 216                 goto fail;
 217         }
 218 
 219         if (copy_from_user(handles,
 220                            (void __user *)(uintptr_t)args->in_syncs,
 221                            job->in_fence_count * sizeof(u32))) {
 222                 ret = -EFAULT;
 223                 DRM_DEBUG("Failed to copy in syncobj handles\n");
 224                 goto fail;
 225         }
 226 
 227         for (i = 0; i < job->in_fence_count; i++) {
 228                 ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
 229                                              &job->in_fences[i]);
 230                 if (ret == -EINVAL)
 231                         goto fail;
 232         }
 233 
 234 fail:
 235         kvfree(handles);
 236         return ret;
 237 }
 238 
 239 static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 240                 struct drm_file *file)
 241 {
 242         struct panfrost_device *pfdev = dev->dev_private;
 243         struct drm_panfrost_submit *args = data;
 244         struct drm_syncobj *sync_out = NULL;
 245         struct panfrost_job *job;
 246         int ret = 0;
 247 
 248         if (!args->jc)
 249                 return -EINVAL;
 250 
 251         if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
 252                 return -EINVAL;
 253 
 254         if (args->out_sync > 0) {
 255                 sync_out = drm_syncobj_find(file, args->out_sync);
 256                 if (!sync_out)
 257                         return -ENODEV;
 258         }
 259 
 260         job = kzalloc(sizeof(*job), GFP_KERNEL);
 261         if (!job) {
 262                 ret = -ENOMEM;
 263                 goto fail_out_sync;
 264         }
 265 
 266         kref_init(&job->refcount);
 267 
 268         job->pfdev = pfdev;
 269         job->jc = args->jc;
 270         job->requirements = args->requirements;
 271         job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
 272         job->file_priv = file->driver_priv;
 273 
 274         ret = panfrost_copy_in_sync(dev, file, args, job);
 275         if (ret)
 276                 goto fail_job;
 277 
 278         ret = panfrost_lookup_bos(dev, file, args, job);
 279         if (ret)
 280                 goto fail_job;
 281 
 282         ret = panfrost_job_push(job);
 283         if (ret)
 284                 goto fail_job;
 285 
 286         /* Update the return sync object for the job */
 287         if (sync_out)
 288                 drm_syncobj_replace_fence(sync_out, job->render_done_fence);
 289 
 290 fail_job:
 291         panfrost_job_put(job);
 292 fail_out_sync:
 293         if (sync_out)
 294                 drm_syncobj_put(sync_out);
 295 
 296         return ret;
 297 }
 298 
 299 static int
 300 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
 301                        struct drm_file *file_priv)
 302 {
 303         long ret;
 304         struct drm_panfrost_wait_bo *args = data;
 305         struct drm_gem_object *gem_obj;
 306         unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
 307 
 308         if (args->pad)
 309                 return -EINVAL;
 310 
 311         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 312         if (!gem_obj)
 313                 return -ENOENT;
 314 
 315         ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
 316                                                   true, timeout);
 317         if (!ret)
 318                 ret = timeout ? -ETIMEDOUT : -EBUSY;
 319 
 320         drm_gem_object_put_unlocked(gem_obj);
 321 
 322         return ret;
 323 }
 324 
 325 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
 326                       struct drm_file *file_priv)
 327 {
 328         struct drm_panfrost_mmap_bo *args = data;
 329         struct drm_gem_object *gem_obj;
 330         int ret;
 331 
 332         if (args->flags != 0) {
 333                 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
 334                 return -EINVAL;
 335         }
 336 
 337         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 338         if (!gem_obj) {
 339                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 340                 return -ENOENT;
 341         }
 342 
 343         /* Don't allow mmapping of heap objects as pages are not pinned. */
 344         if (to_panfrost_bo(gem_obj)->is_heap) {
 345                 ret = -EINVAL;
 346                 goto out;
 347         }
 348 
 349         ret = drm_gem_create_mmap_offset(gem_obj);
 350         if (ret == 0)
 351                 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 352 
 353 out:
 354         drm_gem_object_put_unlocked(gem_obj);
 355         return ret;
 356 }
 357 
 358 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
 359                             struct drm_file *file_priv)
 360 {
 361         struct panfrost_file_priv *priv = file_priv->driver_priv;
 362         struct drm_panfrost_get_bo_offset *args = data;
 363         struct panfrost_gem_mapping *mapping;
 364         struct drm_gem_object *gem_obj;
 365         struct panfrost_gem_object *bo;
 366 
 367         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 368         if (!gem_obj) {
 369                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 370                 return -ENOENT;
 371         }
 372         bo = to_panfrost_bo(gem_obj);
 373 
 374         mapping = panfrost_gem_mapping_get(bo, priv);
 375         drm_gem_object_put_unlocked(gem_obj);
 376 
 377         if (!mapping)
 378                 return -EINVAL;
 379 
 380         args->offset = mapping->mmnode.start << PAGE_SHIFT;
 381         panfrost_gem_mapping_put(mapping);
 382         return 0;
 383 }
 384 
 385 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
 386                                   struct drm_file *file_priv)
 387 {
 388         struct panfrost_file_priv *priv = file_priv->driver_priv;
 389         struct drm_panfrost_madvise *args = data;
 390         struct panfrost_device *pfdev = dev->dev_private;
 391         struct drm_gem_object *gem_obj;
 392         struct panfrost_gem_object *bo;
 393         int ret = 0;
 394 
 395         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 396         if (!gem_obj) {
 397                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 398                 return -ENOENT;
 399         }
 400 
 401         bo = to_panfrost_bo(gem_obj);
 402 
 403         mutex_lock(&pfdev->shrinker_lock);
 404         mutex_lock(&bo->mappings.lock);
 405         if (args->madv == PANFROST_MADV_DONTNEED) {
 406                 struct panfrost_gem_mapping *first;
 407 
 408                 first = list_first_entry(&bo->mappings.list,
 409                                          struct panfrost_gem_mapping,
 410                                          node);
 411 
 412                 /*
 413                  * If we want to mark the BO purgeable, there must be only one
 414                  * user: the caller FD.
 415                  * We could do something smarter and mark the BO purgeable only
 416                  * when all its users have marked it purgeable, but globally
 417                  * visible/shared BOs are likely to never be marked purgeable
 418                  * anyway, so let's not bother.
 419                  */
 420                 if (!list_is_singular(&bo->mappings.list) ||
 421                     WARN_ON_ONCE(first->mmu != &priv->mmu)) {
 422                         ret = -EINVAL;
 423                         goto out_unlock_mappings;
 424                 }
 425         }
 426 
 427         args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
 428 
 429         if (args->retained) {
 430                 if (args->madv == PANFROST_MADV_DONTNEED)
 431                         list_add_tail(&bo->base.madv_list,
 432                                       &pfdev->shrinker_list);
 433                 else if (args->madv == PANFROST_MADV_WILLNEED)
 434                         list_del_init(&bo->base.madv_list);
 435         }
 436 
 437 out_unlock_mappings:
 438         mutex_unlock(&bo->mappings.lock);
 439         mutex_unlock(&pfdev->shrinker_lock);
 440 
 441         drm_gem_object_put_unlocked(gem_obj);
 442         return ret;
 443 }
 444 
 445 int panfrost_unstable_ioctl_check(void)
 446 {
 447         if (!unstable_ioctls)
 448                 return -ENOSYS;
 449 
 450         return 0;
 451 }
 452 
 453 #define PFN_4G          (SZ_4G >> PAGE_SHIFT)
 454 #define PFN_4G_MASK     (PFN_4G - 1)
 455 #define PFN_16M         (SZ_16M >> PAGE_SHIFT)
 456 
 457 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
 458                                          unsigned long color,
 459                                          u64 *start, u64 *end)
 460 {
 461         /* Executable buffers can't start or end on a 4GB boundary */
 462         if (!(color & PANFROST_BO_NOEXEC)) {
 463                 u64 next_seg;
 464 
 465                 if ((*start & PFN_4G_MASK) == 0)
 466                         (*start)++;
 467 
 468                 if ((*end & PFN_4G_MASK) == 0)
 469                         (*end)--;
 470 
 471                 next_seg = ALIGN(*start, PFN_4G);
 472                 if (next_seg - *start <= PFN_16M)
 473                         *start = next_seg + 1;
 474 
 475                 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
 476         }
 477 }
 478 
 479 static int
 480 panfrost_open(struct drm_device *dev, struct drm_file *file)
 481 {
 482         int ret;
 483         struct panfrost_device *pfdev = dev->dev_private;
 484         struct panfrost_file_priv *panfrost_priv;
 485 
 486         panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
 487         if (!panfrost_priv)
 488                 return -ENOMEM;
 489 
 490         panfrost_priv->pfdev = pfdev;
 491         file->driver_priv = panfrost_priv;
 492 
 493         spin_lock_init(&panfrost_priv->mm_lock);
 494 
 495         /* 4G enough for now. can be 48-bit */
 496         drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
 497         panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
 498 
 499         ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
 500         if (ret)
 501                 goto err_pgtable;
 502 
 503         ret = panfrost_job_open(panfrost_priv);
 504         if (ret)
 505                 goto err_job;
 506 
 507         return 0;
 508 
 509 err_job:
 510         panfrost_mmu_pgtable_free(panfrost_priv);
 511 err_pgtable:
 512         drm_mm_takedown(&panfrost_priv->mm);
 513         kfree(panfrost_priv);
 514         return ret;
 515 }
 516 
 517 static void
 518 panfrost_postclose(struct drm_device *dev, struct drm_file *file)
 519 {
 520         struct panfrost_file_priv *panfrost_priv = file->driver_priv;
 521 
 522         panfrost_perfcnt_close(file);
 523         panfrost_job_close(panfrost_priv);
 524 
 525         panfrost_mmu_pgtable_free(panfrost_priv);
 526         drm_mm_takedown(&panfrost_priv->mm);
 527         kfree(panfrost_priv);
 528 }
 529 
 530 /* DRM_AUTH is required on SUBMIT for now, while all clients share a single
 531  * address space.  Note that render nodes would be able to submit jobs that
 532  * could access BOs from clients authenticated with the master node.
 533  */
 534 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
 535 #define PANFROST_IOCTL(n, func, flags) \
 536         DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
 537 
 538         PANFROST_IOCTL(SUBMIT,          submit,         DRM_RENDER_ALLOW | DRM_AUTH),
 539         PANFROST_IOCTL(WAIT_BO,         wait_bo,        DRM_RENDER_ALLOW),
 540         PANFROST_IOCTL(CREATE_BO,       create_bo,      DRM_RENDER_ALLOW),
 541         PANFROST_IOCTL(MMAP_BO,         mmap_bo,        DRM_RENDER_ALLOW),
 542         PANFROST_IOCTL(GET_PARAM,       get_param,      DRM_RENDER_ALLOW),
 543         PANFROST_IOCTL(GET_BO_OFFSET,   get_bo_offset,  DRM_RENDER_ALLOW),
 544         PANFROST_IOCTL(PERFCNT_ENABLE,  perfcnt_enable, DRM_RENDER_ALLOW),
 545         PANFROST_IOCTL(PERFCNT_DUMP,    perfcnt_dump,   DRM_RENDER_ALLOW),
 546         PANFROST_IOCTL(MADVISE,         madvise,        DRM_RENDER_ALLOW),
 547 };
 548 
 549 DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
 550 
 551 /*
 552  * Panfrost driver version:
 553  * - 1.0 - initial interface
 554  * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
 555  */
 556 static struct drm_driver panfrost_drm_driver = {
 557         .driver_features        = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
 558         .open                   = panfrost_open,
 559         .postclose              = panfrost_postclose,
 560         .ioctls                 = panfrost_drm_driver_ioctls,
 561         .num_ioctls             = ARRAY_SIZE(panfrost_drm_driver_ioctls),
 562         .fops                   = &panfrost_drm_driver_fops,
 563         .name                   = "panfrost",
 564         .desc                   = "panfrost DRM",
 565         .date                   = "20180908",
 566         .major                  = 1,
 567         .minor                  = 1,
 568 
 569         .gem_create_object      = panfrost_gem_create_object,
 570         .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
 571         .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
 572         .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
 573         .gem_prime_mmap         = drm_gem_prime_mmap,
 574 };
 575 
 576 static int panfrost_probe(struct platform_device *pdev)
 577 {
 578         struct panfrost_device *pfdev;
 579         struct drm_device *ddev;
 580         int err;
 581 
 582         pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
 583         if (!pfdev)
 584                 return -ENOMEM;
 585 
 586         pfdev->pdev = pdev;
 587         pfdev->dev = &pdev->dev;
 588 
 589         platform_set_drvdata(pdev, pfdev);
 590 
 591         /* Allocate and initialze the DRM device. */
 592         ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
 593         if (IS_ERR(ddev))
 594                 return PTR_ERR(ddev);
 595 
 596         ddev->dev_private = pfdev;
 597         pfdev->ddev = ddev;
 598 
 599         mutex_init(&pfdev->shrinker_lock);
 600         INIT_LIST_HEAD(&pfdev->shrinker_list);
 601 
 602         err = panfrost_device_init(pfdev);
 603         if (err) {
 604                 if (err != -EPROBE_DEFER)
 605                         dev_err(&pdev->dev, "Fatal error during GPU init\n");
 606                 goto err_out0;
 607         }
 608 
 609         err = panfrost_devfreq_init(pfdev);
 610         if (err) {
 611                 if (err != -EPROBE_DEFER)
 612                         dev_err(&pdev->dev, "Fatal error during devfreq init\n");
 613                 goto err_out1;
 614         }
 615 
 616         pm_runtime_set_active(pfdev->dev);
 617         pm_runtime_mark_last_busy(pfdev->dev);
 618         pm_runtime_enable(pfdev->dev);
 619         pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
 620         pm_runtime_use_autosuspend(pfdev->dev);
 621 
 622         /*
 623          * Register the DRM device with the core and the connectors with
 624          * sysfs
 625          */
 626         err = drm_dev_register(ddev, 0);
 627         if (err < 0)
 628                 goto err_out2;
 629 
 630         panfrost_gem_shrinker_init(ddev);
 631 
 632         return 0;
 633 
 634 err_out2:
 635         pm_runtime_disable(pfdev->dev);
 636         panfrost_devfreq_fini(pfdev);
 637 err_out1:
 638         panfrost_device_fini(pfdev);
 639 err_out0:
 640         drm_dev_put(ddev);
 641         return err;
 642 }
 643 
 644 static int panfrost_remove(struct platform_device *pdev)
 645 {
 646         struct panfrost_device *pfdev = platform_get_drvdata(pdev);
 647         struct drm_device *ddev = pfdev->ddev;
 648 
 649         drm_dev_unregister(ddev);
 650         panfrost_gem_shrinker_cleanup(ddev);
 651 
 652         pm_runtime_get_sync(pfdev->dev);
 653         panfrost_devfreq_fini(pfdev);
 654         panfrost_device_fini(pfdev);
 655         pm_runtime_put_sync_suspend(pfdev->dev);
 656         pm_runtime_disable(pfdev->dev);
 657 
 658         drm_dev_put(ddev);
 659         return 0;
 660 }
 661 
 662 static const struct of_device_id dt_match[] = {
 663         { .compatible = "arm,mali-t604" },
 664         { .compatible = "arm,mali-t624" },
 665         { .compatible = "arm,mali-t628" },
 666         { .compatible = "arm,mali-t720" },
 667         { .compatible = "arm,mali-t760" },
 668         { .compatible = "arm,mali-t820" },
 669         { .compatible = "arm,mali-t830" },
 670         { .compatible = "arm,mali-t860" },
 671         { .compatible = "arm,mali-t880" },
 672         {}
 673 };
 674 MODULE_DEVICE_TABLE(of, dt_match);
 675 
 676 static const struct dev_pm_ops panfrost_pm_ops = {
 677         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
 678         SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
 679 };
 680 
 681 static struct platform_driver panfrost_driver = {
 682         .probe          = panfrost_probe,
 683         .remove         = panfrost_remove,
 684         .driver         = {
 685                 .name   = "panfrost",
 686                 .pm     = &panfrost_pm_ops,
 687                 .of_match_table = dt_match,
 688         },
 689 };
 690 module_platform_driver(panfrost_driver);
 691 
 692 MODULE_AUTHOR("Panfrost Project Developers");
 693 MODULE_DESCRIPTION("Panfrost DRM Driver");
 694 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */