This source file includes following definitions.
- order_to_index
- order_to_size
- alloc_buffer_page
- free_buffer_page
- alloc_largest_available
- ion_system_heap_allocate
- ion_system_heap_free
- ion_system_heap_shrink
- ion_system_heap_destroy_pools
- ion_system_heap_create_pools
- __ion_system_heap_create
- ion_system_heap_create
- ion_system_contig_heap_allocate
- ion_system_contig_heap_free
- __ion_system_contig_heap_create
- ion_system_contig_heap_create
1
2
3
4
5
6
7
8 #include <asm/page.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/highmem.h>
12 #include <linux/mm.h>
13 #include <linux/scatterlist.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16
17 #include "ion.h"
18
19 #define NUM_ORDERS ARRAY_SIZE(orders)
20
21 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
22 __GFP_NORETRY) & ~__GFP_RECLAIM;
23 static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
24 static const unsigned int orders[] = {8, 4, 0};
25
26 static int order_to_index(unsigned int order)
27 {
28 int i;
29
30 for (i = 0; i < NUM_ORDERS; i++)
31 if (order == orders[i])
32 return i;
33 BUG();
34 return -1;
35 }
36
37 static inline unsigned int order_to_size(int order)
38 {
39 return PAGE_SIZE << order;
40 }
41
42 struct ion_system_heap {
43 struct ion_heap heap;
44 struct ion_page_pool *pools[NUM_ORDERS];
45 };
46
47 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
48 struct ion_buffer *buffer,
49 unsigned long order)
50 {
51 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
52
53 return ion_page_pool_alloc(pool);
54 }
55
56 static void free_buffer_page(struct ion_system_heap *heap,
57 struct ion_buffer *buffer, struct page *page)
58 {
59 struct ion_page_pool *pool;
60 unsigned int order = compound_order(page);
61
62
63 if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
64 __free_pages(page, order);
65 return;
66 }
67
68 pool = heap->pools[order_to_index(order)];
69
70 ion_page_pool_free(pool, page);
71 }
72
73 static struct page *alloc_largest_available(struct ion_system_heap *heap,
74 struct ion_buffer *buffer,
75 unsigned long size,
76 unsigned int max_order)
77 {
78 struct page *page;
79 int i;
80
81 for (i = 0; i < NUM_ORDERS; i++) {
82 if (size < order_to_size(orders[i]))
83 continue;
84 if (max_order < orders[i])
85 continue;
86
87 page = alloc_buffer_page(heap, buffer, orders[i]);
88 if (!page)
89 continue;
90
91 return page;
92 }
93
94 return NULL;
95 }
96
97 static int ion_system_heap_allocate(struct ion_heap *heap,
98 struct ion_buffer *buffer,
99 unsigned long size,
100 unsigned long flags)
101 {
102 struct ion_system_heap *sys_heap = container_of(heap,
103 struct ion_system_heap,
104 heap);
105 struct sg_table *table;
106 struct scatterlist *sg;
107 struct list_head pages;
108 struct page *page, *tmp_page;
109 int i = 0;
110 unsigned long size_remaining = PAGE_ALIGN(size);
111 unsigned int max_order = orders[0];
112
113 if (size / PAGE_SIZE > totalram_pages() / 2)
114 return -ENOMEM;
115
116 INIT_LIST_HEAD(&pages);
117 while (size_remaining > 0) {
118 page = alloc_largest_available(sys_heap, buffer, size_remaining,
119 max_order);
120 if (!page)
121 goto free_pages;
122 list_add_tail(&page->lru, &pages);
123 size_remaining -= page_size(page);
124 max_order = compound_order(page);
125 i++;
126 }
127 table = kmalloc(sizeof(*table), GFP_KERNEL);
128 if (!table)
129 goto free_pages;
130
131 if (sg_alloc_table(table, i, GFP_KERNEL))
132 goto free_table;
133
134 sg = table->sgl;
135 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
136 sg_set_page(sg, page, page_size(page), 0);
137 sg = sg_next(sg);
138 list_del(&page->lru);
139 }
140
141 buffer->sg_table = table;
142 return 0;
143
144 free_table:
145 kfree(table);
146 free_pages:
147 list_for_each_entry_safe(page, tmp_page, &pages, lru)
148 free_buffer_page(sys_heap, buffer, page);
149 return -ENOMEM;
150 }
151
152 static void ion_system_heap_free(struct ion_buffer *buffer)
153 {
154 struct ion_system_heap *sys_heap = container_of(buffer->heap,
155 struct ion_system_heap,
156 heap);
157 struct sg_table *table = buffer->sg_table;
158 struct scatterlist *sg;
159 int i;
160
161
162 if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
163 ion_heap_buffer_zero(buffer);
164
165 for_each_sg(table->sgl, sg, table->nents, i)
166 free_buffer_page(sys_heap, buffer, sg_page(sg));
167 sg_free_table(table);
168 kfree(table);
169 }
170
171 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
172 int nr_to_scan)
173 {
174 struct ion_page_pool *pool;
175 struct ion_system_heap *sys_heap;
176 int nr_total = 0;
177 int i, nr_freed;
178 int only_scan = 0;
179
180 sys_heap = container_of(heap, struct ion_system_heap, heap);
181
182 if (!nr_to_scan)
183 only_scan = 1;
184
185 for (i = 0; i < NUM_ORDERS; i++) {
186 pool = sys_heap->pools[i];
187
188 if (only_scan) {
189 nr_total += ion_page_pool_shrink(pool,
190 gfp_mask,
191 nr_to_scan);
192
193 } else {
194 nr_freed = ion_page_pool_shrink(pool,
195 gfp_mask,
196 nr_to_scan);
197 nr_to_scan -= nr_freed;
198 nr_total += nr_freed;
199 if (nr_to_scan <= 0)
200 break;
201 }
202 }
203 return nr_total;
204 }
205
206 static struct ion_heap_ops system_heap_ops = {
207 .allocate = ion_system_heap_allocate,
208 .free = ion_system_heap_free,
209 .map_kernel = ion_heap_map_kernel,
210 .unmap_kernel = ion_heap_unmap_kernel,
211 .map_user = ion_heap_map_user,
212 .shrink = ion_system_heap_shrink,
213 };
214
215 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
216 {
217 int i;
218
219 for (i = 0; i < NUM_ORDERS; i++)
220 if (pools[i])
221 ion_page_pool_destroy(pools[i]);
222 }
223
224 static int ion_system_heap_create_pools(struct ion_page_pool **pools)
225 {
226 int i;
227
228 for (i = 0; i < NUM_ORDERS; i++) {
229 struct ion_page_pool *pool;
230 gfp_t gfp_flags = low_order_gfp_flags;
231
232 if (orders[i] > 4)
233 gfp_flags = high_order_gfp_flags;
234
235 pool = ion_page_pool_create(gfp_flags, orders[i]);
236 if (!pool)
237 goto err_create_pool;
238 pools[i] = pool;
239 }
240
241 return 0;
242
243 err_create_pool:
244 ion_system_heap_destroy_pools(pools);
245 return -ENOMEM;
246 }
247
248 static struct ion_heap *__ion_system_heap_create(void)
249 {
250 struct ion_system_heap *heap;
251
252 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
253 if (!heap)
254 return ERR_PTR(-ENOMEM);
255 heap->heap.ops = &system_heap_ops;
256 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
257 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
258
259 if (ion_system_heap_create_pools(heap->pools))
260 goto free_heap;
261
262 return &heap->heap;
263
264 free_heap:
265 kfree(heap);
266 return ERR_PTR(-ENOMEM);
267 }
268
269 static int ion_system_heap_create(void)
270 {
271 struct ion_heap *heap;
272
273 heap = __ion_system_heap_create();
274 if (IS_ERR(heap))
275 return PTR_ERR(heap);
276 heap->name = "ion_system_heap";
277
278 ion_device_add_heap(heap);
279
280 return 0;
281 }
282 device_initcall(ion_system_heap_create);
283
284 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
285 struct ion_buffer *buffer,
286 unsigned long len,
287 unsigned long flags)
288 {
289 int order = get_order(len);
290 struct page *page;
291 struct sg_table *table;
292 unsigned long i;
293 int ret;
294
295 page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
296 if (!page)
297 return -ENOMEM;
298
299 split_page(page, order);
300
301 len = PAGE_ALIGN(len);
302 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
303 __free_page(page + i);
304
305 table = kmalloc(sizeof(*table), GFP_KERNEL);
306 if (!table) {
307 ret = -ENOMEM;
308 goto free_pages;
309 }
310
311 ret = sg_alloc_table(table, 1, GFP_KERNEL);
312 if (ret)
313 goto free_table;
314
315 sg_set_page(table->sgl, page, len, 0);
316
317 buffer->sg_table = table;
318
319 return 0;
320
321 free_table:
322 kfree(table);
323 free_pages:
324 for (i = 0; i < len >> PAGE_SHIFT; i++)
325 __free_page(page + i);
326
327 return ret;
328 }
329
330 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
331 {
332 struct sg_table *table = buffer->sg_table;
333 struct page *page = sg_page(table->sgl);
334 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
335 unsigned long i;
336
337 for (i = 0; i < pages; i++)
338 __free_page(page + i);
339 sg_free_table(table);
340 kfree(table);
341 }
342
343 static struct ion_heap_ops kmalloc_ops = {
344 .allocate = ion_system_contig_heap_allocate,
345 .free = ion_system_contig_heap_free,
346 .map_kernel = ion_heap_map_kernel,
347 .unmap_kernel = ion_heap_unmap_kernel,
348 .map_user = ion_heap_map_user,
349 };
350
351 static struct ion_heap *__ion_system_contig_heap_create(void)
352 {
353 struct ion_heap *heap;
354
355 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
356 if (!heap)
357 return ERR_PTR(-ENOMEM);
358 heap->ops = &kmalloc_ops;
359 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
360 heap->name = "ion_system_contig_heap";
361
362 return heap;
363 }
364
365 static int ion_system_contig_heap_create(void)
366 {
367 struct ion_heap *heap;
368
369 heap = __ion_system_contig_heap_create();
370 if (IS_ERR(heap))
371 return PTR_ERR(heap);
372
373 ion_device_add_heap(heap);
374
375 return 0;
376 }
377 device_initcall(ion_system_contig_heap_create);