root/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dma_buf_to_obj
  2. i915_gem_map_dma_buf
  3. i915_gem_unmap_dma_buf
  4. i915_gem_dmabuf_vmap
  5. i915_gem_dmabuf_vunmap
  6. i915_gem_dmabuf_kmap
  7. i915_gem_dmabuf_kunmap
  8. i915_gem_dmabuf_mmap
  9. i915_gem_begin_cpu_access
  10. i915_gem_end_cpu_access
  11. i915_gem_prime_export
  12. i915_gem_object_get_pages_dmabuf
  13. i915_gem_object_put_pages_dmabuf
  14. i915_gem_prime_import

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright 2012 Red Hat Inc
   5  */
   6 
   7 #include <linux/dma-buf.h>
   8 #include <linux/highmem.h>
   9 #include <linux/dma-resv.h>
  10 
  11 #include "i915_drv.h"
  12 #include "i915_gem_object.h"
  13 #include "i915_scatterlist.h"
  14 
  15 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
  16 {
  17         return to_intel_bo(buf->priv);
  18 }
  19 
  20 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  21                                              enum dma_data_direction dir)
  22 {
  23         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  24         struct sg_table *st;
  25         struct scatterlist *src, *dst;
  26         int ret, i;
  27 
  28         ret = i915_gem_object_pin_pages(obj);
  29         if (ret)
  30                 goto err;
  31 
  32         /* Copy sg so that we make an independent mapping */
  33         st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  34         if (st == NULL) {
  35                 ret = -ENOMEM;
  36                 goto err_unpin_pages;
  37         }
  38 
  39         ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
  40         if (ret)
  41                 goto err_free;
  42 
  43         src = obj->mm.pages->sgl;
  44         dst = st->sgl;
  45         for (i = 0; i < obj->mm.pages->nents; i++) {
  46                 sg_set_page(dst, sg_page(src), src->length, 0);
  47                 dst = sg_next(dst);
  48                 src = sg_next(src);
  49         }
  50 
  51         if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
  52                 ret = -ENOMEM;
  53                 goto err_free_sg;
  54         }
  55 
  56         return st;
  57 
  58 err_free_sg:
  59         sg_free_table(st);
  60 err_free:
  61         kfree(st);
  62 err_unpin_pages:
  63         i915_gem_object_unpin_pages(obj);
  64 err:
  65         return ERR_PTR(ret);
  66 }
  67 
  68 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  69                                    struct sg_table *sg,
  70                                    enum dma_data_direction dir)
  71 {
  72         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  73 
  74         dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
  75         sg_free_table(sg);
  76         kfree(sg);
  77 
  78         i915_gem_object_unpin_pages(obj);
  79 }
  80 
  81 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
  82 {
  83         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  84 
  85         return i915_gem_object_pin_map(obj, I915_MAP_WB);
  86 }
  87 
  88 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
  89 {
  90         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  91 
  92         i915_gem_object_flush_map(obj);
  93         i915_gem_object_unpin_map(obj);
  94 }
  95 
  96 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
  97 {
  98         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  99         struct page *page;
 100 
 101         if (page_num >= obj->base.size >> PAGE_SHIFT)
 102                 return NULL;
 103 
 104         if (!i915_gem_object_has_struct_page(obj))
 105                 return NULL;
 106 
 107         if (i915_gem_object_pin_pages(obj))
 108                 return NULL;
 109 
 110         /* Synchronisation is left to the caller (via .begin_cpu_access()) */
 111         page = i915_gem_object_get_page(obj, page_num);
 112         if (IS_ERR(page))
 113                 goto err_unpin;
 114 
 115         return kmap(page);
 116 
 117 err_unpin:
 118         i915_gem_object_unpin_pages(obj);
 119         return NULL;
 120 }
 121 
 122 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 123 {
 124         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 125 
 126         kunmap(virt_to_page(addr));
 127         i915_gem_object_unpin_pages(obj);
 128 }
 129 
 130 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 131 {
 132         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 133         int ret;
 134 
 135         if (obj->base.size < vma->vm_end - vma->vm_start)
 136                 return -EINVAL;
 137 
 138         if (!obj->base.filp)
 139                 return -ENODEV;
 140 
 141         ret = call_mmap(obj->base.filp, vma);
 142         if (ret)
 143                 return ret;
 144 
 145         fput(vma->vm_file);
 146         vma->vm_file = get_file(obj->base.filp);
 147 
 148         return 0;
 149 }
 150 
 151 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 152 {
 153         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 154         bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
 155         int err;
 156 
 157         err = i915_gem_object_pin_pages(obj);
 158         if (err)
 159                 return err;
 160 
 161         err = i915_gem_object_lock_interruptible(obj);
 162         if (err)
 163                 goto out;
 164 
 165         err = i915_gem_object_set_to_cpu_domain(obj, write);
 166         i915_gem_object_unlock(obj);
 167 
 168 out:
 169         i915_gem_object_unpin_pages(obj);
 170         return err;
 171 }
 172 
 173 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 174 {
 175         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 176         int err;
 177 
 178         err = i915_gem_object_pin_pages(obj);
 179         if (err)
 180                 return err;
 181 
 182         err = i915_gem_object_lock_interruptible(obj);
 183         if (err)
 184                 goto out;
 185 
 186         err = i915_gem_object_set_to_gtt_domain(obj, false);
 187         i915_gem_object_unlock(obj);
 188 
 189 out:
 190         i915_gem_object_unpin_pages(obj);
 191         return err;
 192 }
 193 
 194 static const struct dma_buf_ops i915_dmabuf_ops =  {
 195         .map_dma_buf = i915_gem_map_dma_buf,
 196         .unmap_dma_buf = i915_gem_unmap_dma_buf,
 197         .release = drm_gem_dmabuf_release,
 198         .map = i915_gem_dmabuf_kmap,
 199         .unmap = i915_gem_dmabuf_kunmap,
 200         .mmap = i915_gem_dmabuf_mmap,
 201         .vmap = i915_gem_dmabuf_vmap,
 202         .vunmap = i915_gem_dmabuf_vunmap,
 203         .begin_cpu_access = i915_gem_begin_cpu_access,
 204         .end_cpu_access = i915_gem_end_cpu_access,
 205 };
 206 
 207 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
 208 {
 209         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 210         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 211 
 212         exp_info.ops = &i915_dmabuf_ops;
 213         exp_info.size = gem_obj->size;
 214         exp_info.flags = flags;
 215         exp_info.priv = gem_obj;
 216         exp_info.resv = obj->base.resv;
 217 
 218         if (obj->ops->dmabuf_export) {
 219                 int ret = obj->ops->dmabuf_export(obj);
 220                 if (ret)
 221                         return ERR_PTR(ret);
 222         }
 223 
 224         return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
 225 }
 226 
 227 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 228 {
 229         struct sg_table *pages;
 230         unsigned int sg_page_sizes;
 231 
 232         pages = dma_buf_map_attachment(obj->base.import_attach,
 233                                        DMA_BIDIRECTIONAL);
 234         if (IS_ERR(pages))
 235                 return PTR_ERR(pages);
 236 
 237         sg_page_sizes = i915_sg_page_sizes(pages->sgl);
 238 
 239         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 240 
 241         return 0;
 242 }
 243 
 244 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
 245                                              struct sg_table *pages)
 246 {
 247         dma_buf_unmap_attachment(obj->base.import_attach, pages,
 248                                  DMA_BIDIRECTIONAL);
 249 }
 250 
 251 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
 252         .get_pages = i915_gem_object_get_pages_dmabuf,
 253         .put_pages = i915_gem_object_put_pages_dmabuf,
 254 };
 255 
 256 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 257                                              struct dma_buf *dma_buf)
 258 {
 259         struct dma_buf_attachment *attach;
 260         struct drm_i915_gem_object *obj;
 261         int ret;
 262 
 263         /* is this one of own objects? */
 264         if (dma_buf->ops == &i915_dmabuf_ops) {
 265                 obj = dma_buf_to_obj(dma_buf);
 266                 /* is it from our device? */
 267                 if (obj->base.dev == dev) {
 268                         /*
 269                          * Importing dmabuf exported from out own gem increases
 270                          * refcount on gem itself instead of f_count of dmabuf.
 271                          */
 272                         return &i915_gem_object_get(obj)->base;
 273                 }
 274         }
 275 
 276         /* need to attach */
 277         attach = dma_buf_attach(dma_buf, dev->dev);
 278         if (IS_ERR(attach))
 279                 return ERR_CAST(attach);
 280 
 281         get_dma_buf(dma_buf);
 282 
 283         obj = i915_gem_object_alloc();
 284         if (obj == NULL) {
 285                 ret = -ENOMEM;
 286                 goto fail_detach;
 287         }
 288 
 289         drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 290         i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 291         obj->base.import_attach = attach;
 292         obj->base.resv = dma_buf->resv;
 293 
 294         /* We use GTT as shorthand for a coherent domain, one that is
 295          * neither in the GPU cache nor in the CPU cache, where all
 296          * writes are immediately visible in memory. (That's not strictly
 297          * true, but it's close! There are internal buffers such as the
 298          * write-combined buffer or a delay through the chipset for GTT
 299          * writes that do require us to treat GTT as a separate cache domain.)
 300          */
 301         obj->read_domains = I915_GEM_DOMAIN_GTT;
 302         obj->write_domain = 0;
 303 
 304         return &obj->base;
 305 
 306 fail_detach:
 307         dma_buf_detach(dma_buf, attach);
 308         dma_buf_put(dma_buf);
 309 
 310         return ERR_PTR(ret);
 311 }
 312 
 313 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 314 #include "selftests/mock_dmabuf.c"
 315 #include "selftests/i915_gem_dmabuf.c"
 316 #endif

/* [<][>][^][v][top][bottom][index][help] */