1 /*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17 #include <asm/page.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "ion.h"
27 #include "ion_priv.h"
28
29 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
30 __GFP_NORETRY) & ~__GFP_WAIT;
31 static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
32 static const unsigned int orders[] = {8, 4, 0};
33 static const int num_orders = ARRAY_SIZE(orders);
order_to_index(unsigned int order)34 static int order_to_index(unsigned int order)
35 {
36 int i;
37
38 for (i = 0; i < num_orders; i++)
39 if (order == orders[i])
40 return i;
41 BUG();
42 return -1;
43 }
44
order_to_size(int order)45 static inline unsigned int order_to_size(int order)
46 {
47 return PAGE_SIZE << order;
48 }
49
50 struct ion_system_heap {
51 struct ion_heap heap;
52 struct ion_page_pool *pools[0];
53 };
54
alloc_buffer_page(struct ion_system_heap * heap,struct ion_buffer * buffer,unsigned long order)55 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
56 struct ion_buffer *buffer,
57 unsigned long order)
58 {
59 bool cached = ion_buffer_cached(buffer);
60 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
61 struct page *page;
62
63 if (!cached) {
64 page = ion_page_pool_alloc(pool);
65 } else {
66 gfp_t gfp_flags = low_order_gfp_flags;
67
68 if (order > 4)
69 gfp_flags = high_order_gfp_flags;
70 page = alloc_pages(gfp_flags | __GFP_COMP, order);
71 if (!page)
72 return NULL;
73 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
74 DMA_BIDIRECTIONAL);
75 }
76
77 return page;
78 }
79
free_buffer_page(struct ion_system_heap * heap,struct ion_buffer * buffer,struct page * page)80 static void free_buffer_page(struct ion_system_heap *heap,
81 struct ion_buffer *buffer, struct page *page)
82 {
83 unsigned int order = compound_order(page);
84 bool cached = ion_buffer_cached(buffer);
85
86 if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
87 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
88
89 ion_page_pool_free(pool, page);
90 } else {
91 __free_pages(page, order);
92 }
93 }
94
95
alloc_largest_available(struct ion_system_heap * heap,struct ion_buffer * buffer,unsigned long size,unsigned int max_order)96 static struct page *alloc_largest_available(struct ion_system_heap *heap,
97 struct ion_buffer *buffer,
98 unsigned long size,
99 unsigned int max_order)
100 {
101 struct page *page;
102 int i;
103
104 for (i = 0; i < num_orders; i++) {
105 if (size < order_to_size(orders[i]))
106 continue;
107 if (max_order < orders[i])
108 continue;
109
110 page = alloc_buffer_page(heap, buffer, orders[i]);
111 if (!page)
112 continue;
113
114 return page;
115 }
116
117 return NULL;
118 }
119
ion_system_heap_allocate(struct ion_heap * heap,struct ion_buffer * buffer,unsigned long size,unsigned long align,unsigned long flags)120 static int ion_system_heap_allocate(struct ion_heap *heap,
121 struct ion_buffer *buffer,
122 unsigned long size, unsigned long align,
123 unsigned long flags)
124 {
125 struct ion_system_heap *sys_heap = container_of(heap,
126 struct ion_system_heap,
127 heap);
128 struct sg_table *table;
129 struct scatterlist *sg;
130 struct list_head pages;
131 struct page *page, *tmp_page;
132 int i = 0;
133 unsigned long size_remaining = PAGE_ALIGN(size);
134 unsigned int max_order = orders[0];
135
136 if (align > PAGE_SIZE)
137 return -EINVAL;
138
139 if (size / PAGE_SIZE > totalram_pages / 2)
140 return -ENOMEM;
141
142 INIT_LIST_HEAD(&pages);
143 while (size_remaining > 0) {
144 page = alloc_largest_available(sys_heap, buffer, size_remaining,
145 max_order);
146 if (!page)
147 goto free_pages;
148 list_add_tail(&page->lru, &pages);
149 size_remaining -= PAGE_SIZE << compound_order(page);
150 max_order = compound_order(page);
151 i++;
152 }
153 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
154 if (!table)
155 goto free_pages;
156
157 if (sg_alloc_table(table, i, GFP_KERNEL))
158 goto free_table;
159
160 sg = table->sgl;
161 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
162 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
163 sg = sg_next(sg);
164 list_del(&page->lru);
165 }
166
167 buffer->priv_virt = table;
168 return 0;
169
170 free_table:
171 kfree(table);
172 free_pages:
173 list_for_each_entry_safe(page, tmp_page, &pages, lru)
174 free_buffer_page(sys_heap, buffer, page);
175 return -ENOMEM;
176 }
177
ion_system_heap_free(struct ion_buffer * buffer)178 static void ion_system_heap_free(struct ion_buffer *buffer)
179 {
180 struct ion_system_heap *sys_heap = container_of(buffer->heap,
181 struct ion_system_heap,
182 heap);
183 struct sg_table *table = buffer->sg_table;
184 bool cached = ion_buffer_cached(buffer);
185 struct scatterlist *sg;
186 int i;
187
188 /* uncached pages come from the page pools, zero them before returning
189 for security purposes (other allocations are zerod at alloc time */
190 if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
191 ion_heap_buffer_zero(buffer);
192
193 for_each_sg(table->sgl, sg, table->nents, i)
194 free_buffer_page(sys_heap, buffer, sg_page(sg));
195 sg_free_table(table);
196 kfree(table);
197 }
198
ion_system_heap_map_dma(struct ion_heap * heap,struct ion_buffer * buffer)199 static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
200 struct ion_buffer *buffer)
201 {
202 return buffer->priv_virt;
203 }
204
ion_system_heap_unmap_dma(struct ion_heap * heap,struct ion_buffer * buffer)205 static void ion_system_heap_unmap_dma(struct ion_heap *heap,
206 struct ion_buffer *buffer)
207 {
208 }
209
ion_system_heap_shrink(struct ion_heap * heap,gfp_t gfp_mask,int nr_to_scan)210 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
211 int nr_to_scan)
212 {
213 struct ion_system_heap *sys_heap;
214 int nr_total = 0;
215 int i;
216
217 sys_heap = container_of(heap, struct ion_system_heap, heap);
218
219 for (i = 0; i < num_orders; i++) {
220 struct ion_page_pool *pool = sys_heap->pools[i];
221
222 nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
223 }
224
225 return nr_total;
226 }
227
228 static struct ion_heap_ops system_heap_ops = {
229 .allocate = ion_system_heap_allocate,
230 .free = ion_system_heap_free,
231 .map_dma = ion_system_heap_map_dma,
232 .unmap_dma = ion_system_heap_unmap_dma,
233 .map_kernel = ion_heap_map_kernel,
234 .unmap_kernel = ion_heap_unmap_kernel,
235 .map_user = ion_heap_map_user,
236 .shrink = ion_system_heap_shrink,
237 };
238
ion_system_heap_debug_show(struct ion_heap * heap,struct seq_file * s,void * unused)239 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
240 void *unused)
241 {
242
243 struct ion_system_heap *sys_heap = container_of(heap,
244 struct ion_system_heap,
245 heap);
246 int i;
247
248 for (i = 0; i < num_orders; i++) {
249 struct ion_page_pool *pool = sys_heap->pools[i];
250
251 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
252 pool->high_count, pool->order,
253 (PAGE_SIZE << pool->order) * pool->high_count);
254 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
255 pool->low_count, pool->order,
256 (PAGE_SIZE << pool->order) * pool->low_count);
257 }
258 return 0;
259 }
260
ion_system_heap_create(struct ion_platform_heap * unused)261 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
262 {
263 struct ion_system_heap *heap;
264 int i;
265
266 heap = kzalloc(sizeof(struct ion_system_heap) +
267 sizeof(struct ion_page_pool *) * num_orders,
268 GFP_KERNEL);
269 if (!heap)
270 return ERR_PTR(-ENOMEM);
271 heap->heap.ops = &system_heap_ops;
272 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
273 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
274
275 for (i = 0; i < num_orders; i++) {
276 struct ion_page_pool *pool;
277 gfp_t gfp_flags = low_order_gfp_flags;
278
279 if (orders[i] > 4)
280 gfp_flags = high_order_gfp_flags;
281 pool = ion_page_pool_create(gfp_flags, orders[i]);
282 if (!pool)
283 goto destroy_pools;
284 heap->pools[i] = pool;
285 }
286
287 heap->heap.debug_show = ion_system_heap_debug_show;
288 return &heap->heap;
289
290 destroy_pools:
291 while (i--)
292 ion_page_pool_destroy(heap->pools[i]);
293 kfree(heap);
294 return ERR_PTR(-ENOMEM);
295 }
296
ion_system_heap_destroy(struct ion_heap * heap)297 void ion_system_heap_destroy(struct ion_heap *heap)
298 {
299 struct ion_system_heap *sys_heap = container_of(heap,
300 struct ion_system_heap,
301 heap);
302 int i;
303
304 for (i = 0; i < num_orders; i++)
305 ion_page_pool_destroy(sys_heap->pools[i]);
306 kfree(sys_heap);
307 }
308
ion_system_contig_heap_allocate(struct ion_heap * heap,struct ion_buffer * buffer,unsigned long len,unsigned long align,unsigned long flags)309 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
310 struct ion_buffer *buffer,
311 unsigned long len,
312 unsigned long align,
313 unsigned long flags)
314 {
315 int order = get_order(len);
316 struct page *page;
317 struct sg_table *table;
318 unsigned long i;
319 int ret;
320
321 if (align > (PAGE_SIZE << order))
322 return -EINVAL;
323
324 page = alloc_pages(low_order_gfp_flags, order);
325 if (!page)
326 return -ENOMEM;
327
328 split_page(page, order);
329
330 len = PAGE_ALIGN(len);
331 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
332 __free_page(page + i);
333
334 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
335 if (!table) {
336 ret = -ENOMEM;
337 goto free_pages;
338 }
339
340 ret = sg_alloc_table(table, 1, GFP_KERNEL);
341 if (ret)
342 goto free_table;
343
344 sg_set_page(table->sgl, page, len, 0);
345
346 buffer->priv_virt = table;
347
348 ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
349
350 return 0;
351
352 free_table:
353 kfree(table);
354 free_pages:
355 for (i = 0; i < len >> PAGE_SHIFT; i++)
356 __free_page(page + i);
357
358 return ret;
359 }
360
ion_system_contig_heap_free(struct ion_buffer * buffer)361 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
362 {
363 struct sg_table *table = buffer->priv_virt;
364 struct page *page = sg_page(table->sgl);
365 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
366 unsigned long i;
367
368 for (i = 0; i < pages; i++)
369 __free_page(page + i);
370 sg_free_table(table);
371 kfree(table);
372 }
373
ion_system_contig_heap_phys(struct ion_heap * heap,struct ion_buffer * buffer,ion_phys_addr_t * addr,size_t * len)374 static int ion_system_contig_heap_phys(struct ion_heap *heap,
375 struct ion_buffer *buffer,
376 ion_phys_addr_t *addr, size_t *len)
377 {
378 struct sg_table *table = buffer->priv_virt;
379 struct page *page = sg_page(table->sgl);
380 *addr = page_to_phys(page);
381 *len = buffer->size;
382 return 0;
383 }
384
ion_system_contig_heap_map_dma(struct ion_heap * heap,struct ion_buffer * buffer)385 static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
386 struct ion_buffer *buffer)
387 {
388 return buffer->priv_virt;
389 }
390
ion_system_contig_heap_unmap_dma(struct ion_heap * heap,struct ion_buffer * buffer)391 static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
392 struct ion_buffer *buffer)
393 {
394 }
395
396 static struct ion_heap_ops kmalloc_ops = {
397 .allocate = ion_system_contig_heap_allocate,
398 .free = ion_system_contig_heap_free,
399 .phys = ion_system_contig_heap_phys,
400 .map_dma = ion_system_contig_heap_map_dma,
401 .unmap_dma = ion_system_contig_heap_unmap_dma,
402 .map_kernel = ion_heap_map_kernel,
403 .unmap_kernel = ion_heap_unmap_kernel,
404 .map_user = ion_heap_map_user,
405 };
406
ion_system_contig_heap_create(struct ion_platform_heap * unused)407 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
408 {
409 struct ion_heap *heap;
410
411 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
412 if (!heap)
413 return ERR_PTR(-ENOMEM);
414 heap->ops = &kmalloc_ops;
415 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
416 return heap;
417 }
418
ion_system_contig_heap_destroy(struct ion_heap * heap)419 void ion_system_contig_heap_destroy(struct ion_heap *heap)
420 {
421 kfree(heap);
422 }
423