root/drivers/gpu/drm/i915/gem/i915_gem_phys.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. i915_gem_object_get_pages_phys
  2. i915_gem_object_put_pages_phys
  3. phys_release
  4. i915_gem_object_attach_phys

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2014-2016 Intel Corporation
   5  */
   6 
   7 #include <linux/highmem.h>
   8 #include <linux/shmem_fs.h>
   9 #include <linux/swap.h>
  10 
  11 #include <drm/drm.h> /* for drm_legacy.h! */
  12 #include <drm/drm_cache.h>
  13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
  14 #include <drm/drm_pci.h>
  15 
  16 #include "gt/intel_gt.h"
  17 #include "i915_drv.h"
  18 #include "i915_gem_object.h"
  19 #include "i915_scatterlist.h"
  20 
  21 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
  22 {
  23         struct address_space *mapping = obj->base.filp->f_mapping;
  24         struct scatterlist *sg;
  25         struct sg_table *st;
  26         dma_addr_t dma;
  27         void *vaddr;
  28         void *dst;
  29         int i;
  30 
  31         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
  32                 return -EINVAL;
  33 
  34         /*
  35          * Always aligning to the object size, allows a single allocation
  36          * to handle all possible callers, and given typical object sizes,
  37          * the alignment of the buddy allocation will naturally match.
  38          */
  39         vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
  40                                    roundup_pow_of_two(obj->base.size),
  41                                    &dma, GFP_KERNEL);
  42         if (!vaddr)
  43                 return -ENOMEM;
  44 
  45         st = kmalloc(sizeof(*st), GFP_KERNEL);
  46         if (!st)
  47                 goto err_pci;
  48 
  49         if (sg_alloc_table(st, 1, GFP_KERNEL))
  50                 goto err_st;
  51 
  52         sg = st->sgl;
  53         sg->offset = 0;
  54         sg->length = obj->base.size;
  55 
  56         sg_assign_page(sg, (struct page *)vaddr);
  57         sg_dma_address(sg) = dma;
  58         sg_dma_len(sg) = obj->base.size;
  59 
  60         dst = vaddr;
  61         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
  62                 struct page *page;
  63                 void *src;
  64 
  65                 page = shmem_read_mapping_page(mapping, i);
  66                 if (IS_ERR(page))
  67                         goto err_st;
  68 
  69                 src = kmap_atomic(page);
  70                 memcpy(dst, src, PAGE_SIZE);
  71                 drm_clflush_virt_range(dst, PAGE_SIZE);
  72                 kunmap_atomic(src);
  73 
  74                 put_page(page);
  75                 dst += PAGE_SIZE;
  76         }
  77 
  78         intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
  79 
  80         __i915_gem_object_set_pages(obj, st, sg->length);
  81 
  82         return 0;
  83 
  84 err_st:
  85         kfree(st);
  86 err_pci:
  87         dma_free_coherent(&obj->base.dev->pdev->dev,
  88                           roundup_pow_of_two(obj->base.size),
  89                           vaddr, dma);
  90         return -ENOMEM;
  91 }
  92 
  93 static void
  94 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
  95                                struct sg_table *pages)
  96 {
  97         dma_addr_t dma = sg_dma_address(pages->sgl);
  98         void *vaddr = sg_page(pages->sgl);
  99 
 100         __i915_gem_object_release_shmem(obj, pages, false);
 101 
 102         if (obj->mm.dirty) {
 103                 struct address_space *mapping = obj->base.filp->f_mapping;
 104                 void *src = vaddr;
 105                 int i;
 106 
 107                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
 108                         struct page *page;
 109                         char *dst;
 110 
 111                         page = shmem_read_mapping_page(mapping, i);
 112                         if (IS_ERR(page))
 113                                 continue;
 114 
 115                         dst = kmap_atomic(page);
 116                         drm_clflush_virt_range(src, PAGE_SIZE);
 117                         memcpy(dst, src, PAGE_SIZE);
 118                         kunmap_atomic(dst);
 119 
 120                         set_page_dirty(page);
 121                         if (obj->mm.madv == I915_MADV_WILLNEED)
 122                                 mark_page_accessed(page);
 123                         put_page(page);
 124 
 125                         src += PAGE_SIZE;
 126                 }
 127                 obj->mm.dirty = false;
 128         }
 129 
 130         sg_free_table(pages);
 131         kfree(pages);
 132 
 133         dma_free_coherent(&obj->base.dev->pdev->dev,
 134                           roundup_pow_of_two(obj->base.size),
 135                           vaddr, dma);
 136 }
 137 
 138 static void phys_release(struct drm_i915_gem_object *obj)
 139 {
 140         fput(obj->base.filp);
 141 }
 142 
 143 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
 144         .get_pages = i915_gem_object_get_pages_phys,
 145         .put_pages = i915_gem_object_put_pages_phys,
 146 
 147         .release = phys_release,
 148 };
 149 
 150 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 151 {
 152         struct sg_table *pages;
 153         int err;
 154 
 155         if (align > obj->base.size)
 156                 return -EINVAL;
 157 
 158         if (obj->ops == &i915_gem_phys_ops)
 159                 return 0;
 160 
 161         if (obj->ops != &i915_gem_shmem_ops)
 162                 return -EINVAL;
 163 
 164         err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
 165         if (err)
 166                 return err;
 167 
 168         mutex_lock(&obj->mm.lock);
 169 
 170         if (obj->mm.madv != I915_MADV_WILLNEED) {
 171                 err = -EFAULT;
 172                 goto err_unlock;
 173         }
 174 
 175         if (obj->mm.quirked) {
 176                 err = -EFAULT;
 177                 goto err_unlock;
 178         }
 179 
 180         if (obj->mm.mapping) {
 181                 err = -EBUSY;
 182                 goto err_unlock;
 183         }
 184 
 185         pages = __i915_gem_object_unset_pages(obj);
 186 
 187         obj->ops = &i915_gem_phys_ops;
 188 
 189         err = ____i915_gem_object_get_pages(obj);
 190         if (err)
 191                 goto err_xfer;
 192 
 193         /* Perma-pin (until release) the physical set of pages */
 194         __i915_gem_object_pin_pages(obj);
 195 
 196         if (!IS_ERR_OR_NULL(pages))
 197                 i915_gem_shmem_ops.put_pages(obj, pages);
 198         mutex_unlock(&obj->mm.lock);
 199         return 0;
 200 
 201 err_xfer:
 202         obj->ops = &i915_gem_shmem_ops;
 203         if (!IS_ERR_OR_NULL(pages)) {
 204                 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
 205 
 206                 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 207         }
 208 err_unlock:
 209         mutex_unlock(&obj->mm.lock);
 210         return err;
 211 }
 212 
 213 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 214 #include "selftests/i915_gem_phys.c"
 215 #endif

/* [<][>][^][v][top][bottom][index][help] */