This source file includes following definitions.
- ion_cma_allocate
- ion_cma_free
- __ion_cma_heap_create
- __ion_add_cma_heaps
- ion_add_cma_heaps
1
2
3
4
5
6
7
8
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/cma.h>
14 #include <linux/scatterlist.h>
15 #include <linux/highmem.h>
16
17 #include "ion.h"
18
19 struct ion_cma_heap {
20 struct ion_heap heap;
21 struct cma *cma;
22 };
23
24 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
25
26
27 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
28 unsigned long len,
29 unsigned long flags)
30 {
31 struct ion_cma_heap *cma_heap = to_cma_heap(heap);
32 struct sg_table *table;
33 struct page *pages;
34 unsigned long size = PAGE_ALIGN(len);
35 unsigned long nr_pages = size >> PAGE_SHIFT;
36 unsigned long align = get_order(size);
37 int ret;
38
39 if (align > CONFIG_CMA_ALIGNMENT)
40 align = CONFIG_CMA_ALIGNMENT;
41
42 pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
43 if (!pages)
44 return -ENOMEM;
45
46 if (PageHighMem(pages)) {
47 unsigned long nr_clear_pages = nr_pages;
48 struct page *page = pages;
49
50 while (nr_clear_pages > 0) {
51 void *vaddr = kmap_atomic(page);
52
53 memset(vaddr, 0, PAGE_SIZE);
54 kunmap_atomic(vaddr);
55 page++;
56 nr_clear_pages--;
57 }
58 } else {
59 memset(page_address(pages), 0, size);
60 }
61
62 table = kmalloc(sizeof(*table), GFP_KERNEL);
63 if (!table)
64 goto err;
65
66 ret = sg_alloc_table(table, 1, GFP_KERNEL);
67 if (ret)
68 goto free_mem;
69
70 sg_set_page(table->sgl, pages, size, 0);
71
72 buffer->priv_virt = pages;
73 buffer->sg_table = table;
74 return 0;
75
76 free_mem:
77 kfree(table);
78 err:
79 cma_release(cma_heap->cma, pages, nr_pages);
80 return -ENOMEM;
81 }
82
83 static void ion_cma_free(struct ion_buffer *buffer)
84 {
85 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
86 struct page *pages = buffer->priv_virt;
87 unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
88
89
90 cma_release(cma_heap->cma, pages, nr_pages);
91
92 sg_free_table(buffer->sg_table);
93 kfree(buffer->sg_table);
94 }
95
96 static struct ion_heap_ops ion_cma_ops = {
97 .allocate = ion_cma_allocate,
98 .free = ion_cma_free,
99 .map_user = ion_heap_map_user,
100 .map_kernel = ion_heap_map_kernel,
101 .unmap_kernel = ion_heap_unmap_kernel,
102 };
103
104 static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
105 {
106 struct ion_cma_heap *cma_heap;
107
108 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
109
110 if (!cma_heap)
111 return ERR_PTR(-ENOMEM);
112
113 cma_heap->heap.ops = &ion_cma_ops;
114 cma_heap->cma = cma;
115 cma_heap->heap.type = ION_HEAP_TYPE_DMA;
116 return &cma_heap->heap;
117 }
118
119 static int __ion_add_cma_heaps(struct cma *cma, void *data)
120 {
121 struct ion_heap *heap;
122
123 heap = __ion_cma_heap_create(cma);
124 if (IS_ERR(heap))
125 return PTR_ERR(heap);
126
127 heap->name = cma_get_name(cma);
128
129 ion_device_add_heap(heap);
130 return 0;
131 }
132
133 static int ion_add_cma_heaps(void)
134 {
135 cma_for_each_area(__ion_add_cma_heaps, NULL);
136 return 0;
137 }
138 device_initcall(ion_add_cma_heaps);