This source file includes following definitions.
- dma_common_find_pages
- __dma_common_pages_remap
- dma_common_pages_remap
- dma_common_contiguous_remap
- dma_common_free_remap
- early_coherent_pool
- dma_atomic_pool_gfp
- dma_atomic_pool_init
- dma_in_atomic_pool
- dma_alloc_from_pool
- dma_free_from_pool
- arch_dma_alloc
- arch_dma_free
- arch_dma_coherent_to_pfn
1
2
3
4
5
6 #include <linux/dma-direct.h>
7 #include <linux/dma-noncoherent.h>
8 #include <linux/dma-contiguous.h>
9 #include <linux/init.h>
10 #include <linux/genalloc.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 struct page **dma_common_find_pages(void *cpu_addr)
15 {
16 struct vm_struct *area = find_vm_area(cpu_addr);
17
18 if (!area || area->flags != VM_DMA_COHERENT)
19 return NULL;
20 return area->pages;
21 }
22
23 static struct vm_struct *__dma_common_pages_remap(struct page **pages,
24 size_t size, pgprot_t prot, const void *caller)
25 {
26 struct vm_struct *area;
27
28 area = get_vm_area_caller(size, VM_DMA_COHERENT, caller);
29 if (!area)
30 return NULL;
31
32 if (map_vm_area(area, prot, pages)) {
33 vunmap(area->addr);
34 return NULL;
35 }
36
37 return area;
38 }
39
40
41
42
43
44 void *dma_common_pages_remap(struct page **pages, size_t size,
45 pgprot_t prot, const void *caller)
46 {
47 struct vm_struct *area;
48
49 area = __dma_common_pages_remap(pages, size, prot, caller);
50 if (!area)
51 return NULL;
52
53 area->pages = pages;
54
55 return area->addr;
56 }
57
58
59
60
61
62 void *dma_common_contiguous_remap(struct page *page, size_t size,
63 pgprot_t prot, const void *caller)
64 {
65 int i;
66 struct page **pages;
67 struct vm_struct *area;
68
69 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
70 if (!pages)
71 return NULL;
72
73 for (i = 0; i < (size >> PAGE_SHIFT); i++)
74 pages[i] = nth_page(page, i);
75
76 area = __dma_common_pages_remap(pages, size, prot, caller);
77
78 kfree(pages);
79
80 if (!area)
81 return NULL;
82 return area->addr;
83 }
84
85
86
87
88 void dma_common_free_remap(void *cpu_addr, size_t size)
89 {
90 struct vm_struct *area = find_vm_area(cpu_addr);
91
92 if (!area || area->flags != VM_DMA_COHERENT) {
93 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
94 return;
95 }
96
97 unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
98 vunmap(cpu_addr);
99 }
100
101 #ifdef CONFIG_DMA_DIRECT_REMAP
102 static struct gen_pool *atomic_pool __ro_after_init;
103
104 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
105 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
106
107 static int __init early_coherent_pool(char *p)
108 {
109 atomic_pool_size = memparse(p, &p);
110 return 0;
111 }
112 early_param("coherent_pool", early_coherent_pool);
113
114 static gfp_t dma_atomic_pool_gfp(void)
115 {
116 if (IS_ENABLED(CONFIG_ZONE_DMA))
117 return GFP_DMA;
118 if (IS_ENABLED(CONFIG_ZONE_DMA32))
119 return GFP_DMA32;
120 return GFP_KERNEL;
121 }
122
123 static int __init dma_atomic_pool_init(void)
124 {
125 unsigned int pool_size_order = get_order(atomic_pool_size);
126 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
127 struct page *page;
128 void *addr;
129 int ret;
130
131 if (dev_get_cma_area(NULL))
132 page = dma_alloc_from_contiguous(NULL, nr_pages,
133 pool_size_order, false);
134 else
135 page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
136 if (!page)
137 goto out;
138
139 arch_dma_prep_coherent(page, atomic_pool_size);
140
141 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
142 if (!atomic_pool)
143 goto free_page;
144
145 addr = dma_common_contiguous_remap(page, atomic_pool_size,
146 pgprot_dmacoherent(PAGE_KERNEL),
147 __builtin_return_address(0));
148 if (!addr)
149 goto destroy_genpool;
150
151 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
152 page_to_phys(page), atomic_pool_size, -1);
153 if (ret)
154 goto remove_mapping;
155 gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
156
157 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
158 atomic_pool_size / 1024);
159 return 0;
160
161 remove_mapping:
162 dma_common_free_remap(addr, atomic_pool_size);
163 destroy_genpool:
164 gen_pool_destroy(atomic_pool);
165 atomic_pool = NULL;
166 free_page:
167 if (!dma_release_from_contiguous(NULL, page, nr_pages))
168 __free_pages(page, pool_size_order);
169 out:
170 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
171 atomic_pool_size / 1024);
172 return -ENOMEM;
173 }
174 postcore_initcall(dma_atomic_pool_init);
175
176 bool dma_in_atomic_pool(void *start, size_t size)
177 {
178 if (unlikely(!atomic_pool))
179 return false;
180
181 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
182 }
183
184 void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
185 {
186 unsigned long val;
187 void *ptr = NULL;
188
189 if (!atomic_pool) {
190 WARN(1, "coherent pool not initialised!\n");
191 return NULL;
192 }
193
194 val = gen_pool_alloc(atomic_pool, size);
195 if (val) {
196 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
197
198 *ret_page = pfn_to_page(__phys_to_pfn(phys));
199 ptr = (void *)val;
200 memset(ptr, 0, size);
201 }
202
203 return ptr;
204 }
205
206 bool dma_free_from_pool(void *start, size_t size)
207 {
208 if (!dma_in_atomic_pool(start, size))
209 return false;
210 gen_pool_free(atomic_pool, (unsigned long)start, size);
211 return true;
212 }
213
214 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
215 gfp_t flags, unsigned long attrs)
216 {
217 struct page *page = NULL;
218 void *ret;
219
220 size = PAGE_ALIGN(size);
221
222 if (!gfpflags_allow_blocking(flags)) {
223 ret = dma_alloc_from_pool(size, &page, flags);
224 if (!ret)
225 return NULL;
226 goto done;
227 }
228
229 page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
230 if (!page)
231 return NULL;
232
233
234 arch_dma_prep_coherent(page, size);
235
236
237 ret = dma_common_contiguous_remap(page, size,
238 dma_pgprot(dev, PAGE_KERNEL, attrs),
239 __builtin_return_address(0));
240 if (!ret) {
241 __dma_direct_free_pages(dev, size, page);
242 return ret;
243 }
244
245 memset(ret, 0, size);
246 done:
247 *dma_handle = phys_to_dma(dev, page_to_phys(page));
248 return ret;
249 }
250
251 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
252 dma_addr_t dma_handle, unsigned long attrs)
253 {
254 if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
255 phys_addr_t phys = dma_to_phys(dev, dma_handle);
256 struct page *page = pfn_to_page(__phys_to_pfn(phys));
257
258 vunmap(vaddr);
259 __dma_direct_free_pages(dev, size, page);
260 }
261 }
262
263 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
264 dma_addr_t dma_addr)
265 {
266 return __phys_to_pfn(dma_to_phys(dev, dma_addr));
267 }
268 #endif