root/drivers/gpu/drm/udl/udl_dmabuf.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. udl_attach_dma_buf
  2. udl_detach_dma_buf
  3. udl_map_dma_buf
  4. udl_unmap_dma_buf
  5. udl_dmabuf_kmap
  6. udl_dmabuf_kunmap
  7. udl_dmabuf_mmap
  8. udl_gem_prime_export
  9. udl_prime_create
  10. udl_gem_prime_import

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * udl_dmabuf.c
   4  *
   5  * Copyright (c) 2014 The Chromium OS Authors
   6  */
   7 
   8 #include <linux/shmem_fs.h>
   9 #include <linux/dma-buf.h>
  10 
  11 #include <drm/drm_prime.h>
  12 
  13 #include "udl_drv.h"
  14 
  15 struct udl_drm_dmabuf_attachment {
  16         struct sg_table sgt;
  17         enum dma_data_direction dir;
  18         bool is_mapped;
  19 };
  20 
  21 static int udl_attach_dma_buf(struct dma_buf *dmabuf,
  22                               struct dma_buf_attachment *attach)
  23 {
  24         struct udl_drm_dmabuf_attachment *udl_attach;
  25 
  26         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
  27                         attach->dmabuf->size);
  28 
  29         udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
  30         if (!udl_attach)
  31                 return -ENOMEM;
  32 
  33         udl_attach->dir = DMA_NONE;
  34         attach->priv = udl_attach;
  35 
  36         return 0;
  37 }
  38 
  39 static void udl_detach_dma_buf(struct dma_buf *dmabuf,
  40                                struct dma_buf_attachment *attach)
  41 {
  42         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
  43         struct sg_table *sgt;
  44 
  45         if (!udl_attach)
  46                 return;
  47 
  48         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
  49                         attach->dmabuf->size);
  50 
  51         sgt = &udl_attach->sgt;
  52 
  53         if (udl_attach->dir != DMA_NONE)
  54                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
  55                                 udl_attach->dir);
  56 
  57         sg_free_table(sgt);
  58         kfree(udl_attach);
  59         attach->priv = NULL;
  60 }
  61 
  62 static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
  63                                         enum dma_data_direction dir)
  64 {
  65         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
  66         struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
  67         struct drm_device *dev = obj->base.dev;
  68         struct udl_device *udl = dev->dev_private;
  69         struct scatterlist *rd, *wr;
  70         struct sg_table *sgt = NULL;
  71         unsigned int i;
  72         int page_count;
  73         int nents, ret;
  74 
  75         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
  76                         attach->dmabuf->size, dir);
  77 
  78         /* just return current sgt if already requested. */
  79         if (udl_attach->dir == dir && udl_attach->is_mapped)
  80                 return &udl_attach->sgt;
  81 
  82         if (!obj->pages) {
  83                 ret = udl_gem_get_pages(obj);
  84                 if (ret) {
  85                         DRM_ERROR("failed to map pages.\n");
  86                         return ERR_PTR(ret);
  87                 }
  88         }
  89 
  90         page_count = obj->base.size / PAGE_SIZE;
  91         obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
  92         if (IS_ERR(obj->sg)) {
  93                 DRM_ERROR("failed to allocate sgt.\n");
  94                 return ERR_CAST(obj->sg);
  95         }
  96 
  97         sgt = &udl_attach->sgt;
  98 
  99         ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
 100         if (ret) {
 101                 DRM_ERROR("failed to alloc sgt.\n");
 102                 return ERR_PTR(-ENOMEM);
 103         }
 104 
 105         mutex_lock(&udl->gem_lock);
 106 
 107         rd = obj->sg->sgl;
 108         wr = sgt->sgl;
 109         for (i = 0; i < sgt->orig_nents; ++i) {
 110                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
 111                 rd = sg_next(rd);
 112                 wr = sg_next(wr);
 113         }
 114 
 115         if (dir != DMA_NONE) {
 116                 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
 117                 if (!nents) {
 118                         DRM_ERROR("failed to map sgl with iommu.\n");
 119                         sg_free_table(sgt);
 120                         sgt = ERR_PTR(-EIO);
 121                         goto err_unlock;
 122                 }
 123         }
 124 
 125         udl_attach->is_mapped = true;
 126         udl_attach->dir = dir;
 127         attach->priv = udl_attach;
 128 
 129 err_unlock:
 130         mutex_unlock(&udl->gem_lock);
 131         return sgt;
 132 }
 133 
 134 static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
 135                               struct sg_table *sgt,
 136                               enum dma_data_direction dir)
 137 {
 138         /* Nothing to do. */
 139         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
 140                         attach->dmabuf->size, dir);
 141 }
 142 
 143 static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 144 {
 145         /* TODO */
 146 
 147         return NULL;
 148 }
 149 
 150 static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
 151                               unsigned long page_num, void *addr)
 152 {
 153         /* TODO */
 154 }
 155 
 156 static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
 157                            struct vm_area_struct *vma)
 158 {
 159         /* TODO */
 160 
 161         return -EINVAL;
 162 }
 163 
 164 static const struct dma_buf_ops udl_dmabuf_ops = {
 165         .attach                 = udl_attach_dma_buf,
 166         .detach                 = udl_detach_dma_buf,
 167         .map_dma_buf            = udl_map_dma_buf,
 168         .unmap_dma_buf          = udl_unmap_dma_buf,
 169         .map                    = udl_dmabuf_kmap,
 170         .unmap                  = udl_dmabuf_kunmap,
 171         .mmap                   = udl_dmabuf_mmap,
 172         .release                = drm_gem_dmabuf_release,
 173 };
 174 
 175 struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags)
 176 {
 177         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 178 
 179         exp_info.ops = &udl_dmabuf_ops;
 180         exp_info.size = obj->size;
 181         exp_info.flags = flags;
 182         exp_info.priv = obj;
 183 
 184         return drm_gem_dmabuf_export(obj->dev, &exp_info);
 185 }
 186 
 187 static int udl_prime_create(struct drm_device *dev,
 188                             size_t size,
 189                             struct sg_table *sg,
 190                             struct udl_gem_object **obj_p)
 191 {
 192         struct udl_gem_object *obj;
 193         int npages;
 194 
 195         npages = size / PAGE_SIZE;
 196 
 197         *obj_p = NULL;
 198         obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
 199         if (!obj)
 200                 return -ENOMEM;
 201 
 202         obj->sg = sg;
 203         obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 204         if (obj->pages == NULL) {
 205                 DRM_ERROR("obj pages is NULL %d\n", npages);
 206                 return -ENOMEM;
 207         }
 208 
 209         drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
 210 
 211         *obj_p = obj;
 212         return 0;
 213 }
 214 
 215 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
 216                                 struct dma_buf *dma_buf)
 217 {
 218         struct dma_buf_attachment *attach;
 219         struct sg_table *sg;
 220         struct udl_gem_object *uobj;
 221         int ret;
 222 
 223         /* need to attach */
 224         get_device(dev->dev);
 225         attach = dma_buf_attach(dma_buf, dev->dev);
 226         if (IS_ERR(attach)) {
 227                 put_device(dev->dev);
 228                 return ERR_CAST(attach);
 229         }
 230 
 231         get_dma_buf(dma_buf);
 232 
 233         sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
 234         if (IS_ERR(sg)) {
 235                 ret = PTR_ERR(sg);
 236                 goto fail_detach;
 237         }
 238 
 239         ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
 240         if (ret)
 241                 goto fail_unmap;
 242 
 243         uobj->base.import_attach = attach;
 244         uobj->flags = UDL_BO_WC;
 245 
 246         return &uobj->base;
 247 
 248 fail_unmap:
 249         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
 250 fail_detach:
 251         dma_buf_detach(dma_buf, attach);
 252         dma_buf_put(dma_buf);
 253         put_device(dev->dev);
 254         return ERR_PTR(ret);
 255 }

/* [<][>][^][v][top][bottom][index][help] */