root/drivers/gpu/drm/msm/msm_gem_vma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. msm_gem_address_space_destroy
  2. msm_gem_address_space_put
  3. msm_gem_purge_vma
  4. msm_gem_unmap_vma
  5. msm_gem_map_vma
  6. msm_gem_close_vma
  7. msm_gem_init_vma
  8. msm_gem_address_space_create
  9. msm_gem_address_space_create_a2xx

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2016 Red Hat
   4  * Author: Rob Clark <robdclark@gmail.com>
   5  */
   6 
   7 #include "msm_drv.h"
   8 #include "msm_gem.h"
   9 #include "msm_mmu.h"
  10 
  11 static void
  12 msm_gem_address_space_destroy(struct kref *kref)
  13 {
  14         struct msm_gem_address_space *aspace = container_of(kref,
  15                         struct msm_gem_address_space, kref);
  16 
  17         drm_mm_takedown(&aspace->mm);
  18         if (aspace->mmu)
  19                 aspace->mmu->funcs->destroy(aspace->mmu);
  20         kfree(aspace);
  21 }
  22 
  23 
  24 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
  25 {
  26         if (aspace)
  27                 kref_put(&aspace->kref, msm_gem_address_space_destroy);
  28 }
  29 
  30 /* Actually unmap memory for the vma */
  31 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
  32                 struct msm_gem_vma *vma)
  33 {
  34         unsigned size = vma->node.size << PAGE_SHIFT;
  35 
  36         /* Print a message if we try to purge a vma in use */
  37         if (WARN_ON(vma->inuse > 0))
  38                 return;
  39 
  40         /* Don't do anything if the memory isn't mapped */
  41         if (!vma->mapped)
  42                 return;
  43 
  44         if (aspace->mmu)
  45                 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
  46 
  47         vma->mapped = false;
  48 }
  49 
  50 /* Remove reference counts for the mapping */
  51 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
  52                 struct msm_gem_vma *vma)
  53 {
  54         if (!WARN_ON(!vma->iova))
  55                 vma->inuse--;
  56 }
  57 
  58 int
  59 msm_gem_map_vma(struct msm_gem_address_space *aspace,
  60                 struct msm_gem_vma *vma, int prot,
  61                 struct sg_table *sgt, int npages)
  62 {
  63         unsigned size = npages << PAGE_SHIFT;
  64         int ret = 0;
  65 
  66         if (WARN_ON(!vma->iova))
  67                 return -EINVAL;
  68 
  69         /* Increase the usage counter */
  70         vma->inuse++;
  71 
  72         if (vma->mapped)
  73                 return 0;
  74 
  75         vma->mapped = true;
  76 
  77         if (aspace && aspace->mmu)
  78                 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
  79                                 size, prot);
  80 
  81         if (ret)
  82                 vma->mapped = false;
  83 
  84         return ret;
  85 }
  86 
  87 /* Close an iova.  Warn if it is still in use */
  88 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
  89                 struct msm_gem_vma *vma)
  90 {
  91         if (WARN_ON(vma->inuse > 0 || vma->mapped))
  92                 return;
  93 
  94         spin_lock(&aspace->lock);
  95         if (vma->iova)
  96                 drm_mm_remove_node(&vma->node);
  97         spin_unlock(&aspace->lock);
  98 
  99         vma->iova = 0;
 100 
 101         msm_gem_address_space_put(aspace);
 102 }
 103 
 104 /* Initialize a new vma and allocate an iova for it */
 105 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
 106                 struct msm_gem_vma *vma, int npages)
 107 {
 108         int ret;
 109 
 110         if (WARN_ON(vma->iova))
 111                 return -EBUSY;
 112 
 113         spin_lock(&aspace->lock);
 114         ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
 115         spin_unlock(&aspace->lock);
 116 
 117         if (ret)
 118                 return ret;
 119 
 120         vma->iova = vma->node.start << PAGE_SHIFT;
 121         vma->mapped = false;
 122 
 123         kref_get(&aspace->kref);
 124 
 125         return 0;
 126 }
 127 
 128 
 129 struct msm_gem_address_space *
 130 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 131                 const char *name)
 132 {
 133         struct msm_gem_address_space *aspace;
 134         u64 size = domain->geometry.aperture_end -
 135                 domain->geometry.aperture_start;
 136 
 137         aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
 138         if (!aspace)
 139                 return ERR_PTR(-ENOMEM);
 140 
 141         spin_lock_init(&aspace->lock);
 142         aspace->name = name;
 143         aspace->mmu = msm_iommu_new(dev, domain);
 144 
 145         drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
 146                 size >> PAGE_SHIFT);
 147 
 148         kref_init(&aspace->kref);
 149 
 150         return aspace;
 151 }
 152 
 153 struct msm_gem_address_space *
 154 msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
 155                 const char *name, uint64_t va_start, uint64_t va_end)
 156 {
 157         struct msm_gem_address_space *aspace;
 158         u64 size = va_end - va_start;
 159 
 160         aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
 161         if (!aspace)
 162                 return ERR_PTR(-ENOMEM);
 163 
 164         spin_lock_init(&aspace->lock);
 165         aspace->name = name;
 166         aspace->mmu = msm_gpummu_new(dev, gpu);
 167 
 168         drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
 169                 size >> PAGE_SHIFT);
 170 
 171         kref_init(&aspace->kref);
 172 
 173         return aspace;
 174 }

/* [<][>][^][v][top][bottom][index][help] */