root/drivers/gpu/drm/i915/gem/i915_gem_internal.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. internal_free_pages
  2. i915_gem_object_get_pages_internal
  3. i915_gem_object_put_pages_internal
  4. i915_gem_object_create_internal

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2014-2016 Intel Corporation
   5  */
   6 
   7 #include <linux/scatterlist.h>
   8 #include <linux/slab.h>
   9 #include <linux/swiotlb.h>
  10 
  11 #include <drm/i915_drm.h>
  12 
  13 #include "i915_drv.h"
  14 #include "i915_gem.h"
  15 #include "i915_gem_object.h"
  16 #include "i915_scatterlist.h"
  17 #include "i915_utils.h"
  18 
  19 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
  20 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
  21 
  22 static void internal_free_pages(struct sg_table *st)
  23 {
  24         struct scatterlist *sg;
  25 
  26         for (sg = st->sgl; sg; sg = __sg_next(sg)) {
  27                 if (sg_page(sg))
  28                         __free_pages(sg_page(sg), get_order(sg->length));
  29         }
  30 
  31         sg_free_table(st);
  32         kfree(st);
  33 }
  34 
  35 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
  36 {
  37         struct drm_i915_private *i915 = to_i915(obj->base.dev);
  38         struct sg_table *st;
  39         struct scatterlist *sg;
  40         unsigned int sg_page_sizes;
  41         unsigned int npages;
  42         int max_order;
  43         gfp_t gfp;
  44 
  45         max_order = MAX_ORDER;
  46 #ifdef CONFIG_SWIOTLB
  47         if (swiotlb_nr_tbl()) {
  48                 unsigned int max_segment;
  49 
  50                 max_segment = swiotlb_max_segment();
  51                 if (max_segment) {
  52                         max_segment = max_t(unsigned int, max_segment,
  53                                             PAGE_SIZE) >> PAGE_SHIFT;
  54                         max_order = min(max_order, ilog2(max_segment));
  55                 }
  56         }
  57 #endif
  58 
  59         gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
  60         if (IS_I965GM(i915) || IS_I965G(i915)) {
  61                 /* 965gm cannot relocate objects above 4GiB. */
  62                 gfp &= ~__GFP_HIGHMEM;
  63                 gfp |= __GFP_DMA32;
  64         }
  65 
  66 create_st:
  67         st = kmalloc(sizeof(*st), GFP_KERNEL);
  68         if (!st)
  69                 return -ENOMEM;
  70 
  71         npages = obj->base.size / PAGE_SIZE;
  72         if (sg_alloc_table(st, npages, GFP_KERNEL)) {
  73                 kfree(st);
  74                 return -ENOMEM;
  75         }
  76 
  77         sg = st->sgl;
  78         st->nents = 0;
  79         sg_page_sizes = 0;
  80 
  81         do {
  82                 int order = min(fls(npages) - 1, max_order);
  83                 struct page *page;
  84 
  85                 do {
  86                         page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
  87                                            order);
  88                         if (page)
  89                                 break;
  90                         if (!order--)
  91                                 goto err;
  92 
  93                         /* Limit subsequent allocations as well */
  94                         max_order = order;
  95                 } while (1);
  96 
  97                 sg_set_page(sg, page, PAGE_SIZE << order, 0);
  98                 sg_page_sizes |= PAGE_SIZE << order;
  99                 st->nents++;
 100 
 101                 npages -= 1 << order;
 102                 if (!npages) {
 103                         sg_mark_end(sg);
 104                         break;
 105                 }
 106 
 107                 sg = __sg_next(sg);
 108         } while (1);
 109 
 110         if (i915_gem_gtt_prepare_pages(obj, st)) {
 111                 /* Failed to dma-map try again with single page sg segments */
 112                 if (get_order(st->sgl->length)) {
 113                         internal_free_pages(st);
 114                         max_order = 0;
 115                         goto create_st;
 116                 }
 117                 goto err;
 118         }
 119 
 120         /* Mark the pages as dontneed whilst they are still pinned. As soon
 121          * as they are unpinned they are allowed to be reaped by the shrinker,
 122          * and the caller is expected to repopulate - the contents of this
 123          * object are only valid whilst active and pinned.
 124          */
 125         obj->mm.madv = I915_MADV_DONTNEED;
 126 
 127         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
 128 
 129         return 0;
 130 
 131 err:
 132         sg_set_page(sg, NULL, 0, 0);
 133         sg_mark_end(sg);
 134         internal_free_pages(st);
 135 
 136         return -ENOMEM;
 137 }
 138 
 139 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
 140                                                struct sg_table *pages)
 141 {
 142         i915_gem_gtt_finish_pages(obj, pages);
 143         internal_free_pages(pages);
 144 
 145         obj->mm.dirty = false;
 146         obj->mm.madv = I915_MADV_WILLNEED;
 147 }
 148 
 149 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
 150         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 151                  I915_GEM_OBJECT_IS_SHRINKABLE,
 152         .get_pages = i915_gem_object_get_pages_internal,
 153         .put_pages = i915_gem_object_put_pages_internal,
 154 };
 155 
 156 /**
 157  * i915_gem_object_create_internal: create an object with volatile pages
 158  * @i915: the i915 device
 159  * @size: the size in bytes of backing storage to allocate for the object
 160  *
 161  * Creates a new object that wraps some internal memory for private use.
 162  * This object is not backed by swappable storage, and as such its contents
 163  * are volatile and only valid whilst pinned. If the object is reaped by the
 164  * shrinker, its pages and data will be discarded. Equally, it is not a full
 165  * GEM object and so not valid for access from userspace. This makes it useful
 166  * for hardware interfaces like ringbuffers (which are pinned from the time
 167  * the request is written to the time the hardware stops accessing it), but
 168  * not for contexts (which need to be preserved when not active for later
 169  * reuse). Note that it is not cleared upon allocation.
 170  */
 171 struct drm_i915_gem_object *
 172 i915_gem_object_create_internal(struct drm_i915_private *i915,
 173                                 phys_addr_t size)
 174 {
 175         struct drm_i915_gem_object *obj;
 176         unsigned int cache_level;
 177 
 178         GEM_BUG_ON(!size);
 179         GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
 180 
 181         if (overflows_type(size, obj->base.size))
 182                 return ERR_PTR(-E2BIG);
 183 
 184         obj = i915_gem_object_alloc();
 185         if (!obj)
 186                 return ERR_PTR(-ENOMEM);
 187 
 188         drm_gem_private_object_init(&i915->drm, &obj->base, size);
 189         i915_gem_object_init(obj, &i915_gem_object_internal_ops);
 190 
 191         obj->read_domains = I915_GEM_DOMAIN_CPU;
 192         obj->write_domain = I915_GEM_DOMAIN_CPU;
 193 
 194         cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
 195         i915_gem_object_set_cache_coherency(obj, cache_level);
 196 
 197         return obj;
 198 }

/* [<][>][^][v][top][bottom][index][help] */