This source file includes following definitions.
- radeon_gem_prime_get_sg_table
- radeon_gem_prime_vmap
- radeon_gem_prime_vunmap
- radeon_gem_prime_import_sg_table
- radeon_gem_prime_pin
- radeon_gem_prime_unpin
- radeon_gem_prime_export
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 #include <linux/dma-buf.h>
  28 
  29 #include <drm/drm_prime.h>
  30 #include <drm/radeon_drm.h>
  31 
  32 #include "radeon.h"
  33 
  34 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
  35 {
  36         struct radeon_bo *bo = gem_to_radeon_bo(obj);
  37         int npages = bo->tbo.num_pages;
  38 
  39         return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
  40 }
  41 
  42 void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
  43 {
  44         struct radeon_bo *bo = gem_to_radeon_bo(obj);
  45         int ret;
  46 
  47         ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
  48                           &bo->dma_buf_vmap);
  49         if (ret)
  50                 return ERR_PTR(ret);
  51 
  52         return bo->dma_buf_vmap.virtual;
  53 }
  54 
  55 void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  56 {
  57         struct radeon_bo *bo = gem_to_radeon_bo(obj);
  58 
  59         ttm_bo_kunmap(&bo->dma_buf_vmap);
  60 }
  61 
  62 struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
  63                                                         struct dma_buf_attachment *attach,
  64                                                         struct sg_table *sg)
  65 {
  66         struct dma_resv *resv = attach->dmabuf->resv;
  67         struct radeon_device *rdev = dev->dev_private;
  68         struct radeon_bo *bo;
  69         int ret;
  70 
  71         dma_resv_lock(resv, NULL);
  72         ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
  73                                RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
  74         dma_resv_unlock(resv);
  75         if (ret)
  76                 return ERR_PTR(ret);
  77 
  78         mutex_lock(&rdev->gem.mutex);
  79         list_add_tail(&bo->list, &rdev->gem.objects);
  80         mutex_unlock(&rdev->gem.mutex);
  81 
  82         bo->prime_shared_count = 1;
  83         return &bo->tbo.base;
  84 }
  85 
  86 int radeon_gem_prime_pin(struct drm_gem_object *obj)
  87 {
  88         struct radeon_bo *bo = gem_to_radeon_bo(obj);
  89         int ret = 0;
  90 
  91         ret = radeon_bo_reserve(bo, false);
  92         if (unlikely(ret != 0))
  93                 return ret;
  94 
  95         
  96         ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
  97         if (likely(ret == 0))
  98                 bo->prime_shared_count++;
  99 
 100         radeon_bo_unreserve(bo);
 101         return ret;
 102 }
 103 
 104 void radeon_gem_prime_unpin(struct drm_gem_object *obj)
 105 {
 106         struct radeon_bo *bo = gem_to_radeon_bo(obj);
 107         int ret = 0;
 108 
 109         ret = radeon_bo_reserve(bo, false);
 110         if (unlikely(ret != 0))
 111                 return;
 112 
 113         radeon_bo_unpin(bo);
 114         if (bo->prime_shared_count)
 115                 bo->prime_shared_count--;
 116         radeon_bo_unreserve(bo);
 117 }
 118 
 119 
 120 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
 121                                         int flags)
 122 {
 123         struct radeon_bo *bo = gem_to_radeon_bo(gobj);
 124         if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
 125                 return ERR_PTR(-EPERM);
 126         return drm_gem_prime_export(gobj, flags);
 127 }