This source file includes following definitions.
- huge_free_pages
- huge_get_pages
- huge_put_pages
- huge_gem_object
1
2
3
4
5
6
7 #include "i915_scatterlist.h"
8
9 #include "huge_gem_object.h"
10
11 static void huge_free_pages(struct drm_i915_gem_object *obj,
12 struct sg_table *pages)
13 {
14 unsigned long nreal = obj->scratch / PAGE_SIZE;
15 struct scatterlist *sg;
16
17 for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
18 __free_page(sg_page(sg));
19
20 sg_free_table(pages);
21 kfree(pages);
22 }
23
24 static int huge_get_pages(struct drm_i915_gem_object *obj)
25 {
26 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
27 const unsigned long nreal = obj->scratch / PAGE_SIZE;
28 const unsigned long npages = obj->base.size / PAGE_SIZE;
29 struct scatterlist *sg, *src, *end;
30 struct sg_table *pages;
31 unsigned long n;
32
33 pages = kmalloc(sizeof(*pages), GFP);
34 if (!pages)
35 return -ENOMEM;
36
37 if (sg_alloc_table(pages, npages, GFP)) {
38 kfree(pages);
39 return -ENOMEM;
40 }
41
42 sg = pages->sgl;
43 for (n = 0; n < nreal; n++) {
44 struct page *page;
45
46 page = alloc_page(GFP | __GFP_HIGHMEM);
47 if (!page) {
48 sg_mark_end(sg);
49 goto err;
50 }
51
52 sg_set_page(sg, page, PAGE_SIZE, 0);
53 sg = __sg_next(sg);
54 }
55 if (nreal < npages) {
56 for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
57 sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
58 src = __sg_next(src);
59 if (src == end)
60 src = pages->sgl;
61 }
62 }
63
64 if (i915_gem_gtt_prepare_pages(obj, pages))
65 goto err;
66
67 __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
68
69 return 0;
70
71 err:
72 huge_free_pages(obj, pages);
73
74 return -ENOMEM;
75 #undef GFP
76 }
77
78 static void huge_put_pages(struct drm_i915_gem_object *obj,
79 struct sg_table *pages)
80 {
81 i915_gem_gtt_finish_pages(obj, pages);
82 huge_free_pages(obj, pages);
83
84 obj->mm.dirty = false;
85 }
86
87 static const struct drm_i915_gem_object_ops huge_ops = {
88 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
89 I915_GEM_OBJECT_IS_SHRINKABLE,
90 .get_pages = huge_get_pages,
91 .put_pages = huge_put_pages,
92 };
93
94 struct drm_i915_gem_object *
95 huge_gem_object(struct drm_i915_private *i915,
96 phys_addr_t phys_size,
97 dma_addr_t dma_size)
98 {
99 struct drm_i915_gem_object *obj;
100 unsigned int cache_level;
101
102 GEM_BUG_ON(!phys_size || phys_size > dma_size);
103 GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
104 GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
105
106 if (overflows_type(dma_size, obj->base.size))
107 return ERR_PTR(-E2BIG);
108
109 obj = i915_gem_object_alloc();
110 if (!obj)
111 return ERR_PTR(-ENOMEM);
112
113 drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
114 i915_gem_object_init(obj, &huge_ops);
115
116 obj->read_domains = I915_GEM_DOMAIN_CPU;
117 obj->write_domain = I915_GEM_DOMAIN_CPU;
118 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
119 i915_gem_object_set_cache_coherency(obj, cache_level);
120 obj->scratch = phys_size;
121
122 return obj;
123 }