This source file includes following definitions.
- show_pools
- dma_pool_create
- pool_initialise_page
- pool_alloc_page
- is_page_busy
- pool_free_page
- dma_pool_destroy
- dma_pool_alloc
- pool_find_page
- dma_pool_free
- dmam_pool_release
- dmam_pool_match
- dmam_pool_create
- dmam_pool_destroy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/poison.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/stat.h>
33 #include <linux/spinlock.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/wait.h>
37
38 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
39 #define DMAPOOL_DEBUG 1
40 #endif
41
42 struct dma_pool {
43 struct list_head page_list;
44 spinlock_t lock;
45 size_t size;
46 struct device *dev;
47 size_t allocation;
48 size_t boundary;
49 char name[32];
50 struct list_head pools;
51 };
52
53 struct dma_page {
54 struct list_head page_list;
55 void *vaddr;
56 dma_addr_t dma;
57 unsigned int in_use;
58 unsigned int offset;
59 };
60
61 static DEFINE_MUTEX(pools_lock);
62 static DEFINE_MUTEX(pools_reg_lock);
63
64 static ssize_t
65 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
66 {
67 unsigned temp;
68 unsigned size;
69 char *next;
70 struct dma_page *page;
71 struct dma_pool *pool;
72
73 next = buf;
74 size = PAGE_SIZE;
75
76 temp = scnprintf(next, size, "poolinfo - 0.1\n");
77 size -= temp;
78 next += temp;
79
80 mutex_lock(&pools_lock);
81 list_for_each_entry(pool, &dev->dma_pools, pools) {
82 unsigned pages = 0;
83 unsigned blocks = 0;
84
85 spin_lock_irq(&pool->lock);
86 list_for_each_entry(page, &pool->page_list, page_list) {
87 pages++;
88 blocks += page->in_use;
89 }
90 spin_unlock_irq(&pool->lock);
91
92
93 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
94 pool->name, blocks,
95 pages * (pool->allocation / pool->size),
96 pool->size, pages);
97 size -= temp;
98 next += temp;
99 }
100 mutex_unlock(&pools_lock);
101
102 return PAGE_SIZE - size;
103 }
104
105 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131 size_t size, size_t align, size_t boundary)
132 {
133 struct dma_pool *retval;
134 size_t allocation;
135 bool empty = false;
136
137 if (align == 0)
138 align = 1;
139 else if (align & (align - 1))
140 return NULL;
141
142 if (size == 0)
143 return NULL;
144 else if (size < 4)
145 size = 4;
146
147 if ((size % align) != 0)
148 size = ALIGN(size, align);
149
150 allocation = max_t(size_t, size, PAGE_SIZE);
151
152 if (!boundary)
153 boundary = allocation;
154 else if ((boundary < size) || (boundary & (boundary - 1)))
155 return NULL;
156
157 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
158 if (!retval)
159 return retval;
160
161 strlcpy(retval->name, name, sizeof(retval->name));
162
163 retval->dev = dev;
164
165 INIT_LIST_HEAD(&retval->page_list);
166 spin_lock_init(&retval->lock);
167 retval->size = size;
168 retval->boundary = boundary;
169 retval->allocation = allocation;
170
171 INIT_LIST_HEAD(&retval->pools);
172
173
174
175
176
177
178
179
180
181 mutex_lock(&pools_reg_lock);
182 mutex_lock(&pools_lock);
183 if (list_empty(&dev->dma_pools))
184 empty = true;
185 list_add(&retval->pools, &dev->dma_pools);
186 mutex_unlock(&pools_lock);
187 if (empty) {
188 int err;
189
190 err = device_create_file(dev, &dev_attr_pools);
191 if (err) {
192 mutex_lock(&pools_lock);
193 list_del(&retval->pools);
194 mutex_unlock(&pools_lock);
195 mutex_unlock(&pools_reg_lock);
196 kfree(retval);
197 return NULL;
198 }
199 }
200 mutex_unlock(&pools_reg_lock);
201 return retval;
202 }
203 EXPORT_SYMBOL(dma_pool_create);
204
205 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
206 {
207 unsigned int offset = 0;
208 unsigned int next_boundary = pool->boundary;
209
210 do {
211 unsigned int next = offset + pool->size;
212 if (unlikely((next + pool->size) >= next_boundary)) {
213 next = next_boundary;
214 next_boundary += pool->boundary;
215 }
216 *(int *)(page->vaddr + offset) = next;
217 offset = next;
218 } while (offset < pool->allocation);
219 }
220
221 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
222 {
223 struct dma_page *page;
224
225 page = kmalloc(sizeof(*page), mem_flags);
226 if (!page)
227 return NULL;
228 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
229 &page->dma, mem_flags);
230 if (page->vaddr) {
231 #ifdef DMAPOOL_DEBUG
232 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
233 #endif
234 pool_initialise_page(pool, page);
235 page->in_use = 0;
236 page->offset = 0;
237 } else {
238 kfree(page);
239 page = NULL;
240 }
241 return page;
242 }
243
244 static inline bool is_page_busy(struct dma_page *page)
245 {
246 return page->in_use != 0;
247 }
248
249 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
250 {
251 dma_addr_t dma = page->dma;
252
253 #ifdef DMAPOOL_DEBUG
254 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
255 #endif
256 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
257 list_del(&page->page_list);
258 kfree(page);
259 }
260
261
262
263
264
265
266
267
268
269 void dma_pool_destroy(struct dma_pool *pool)
270 {
271 bool empty = false;
272
273 if (unlikely(!pool))
274 return;
275
276 mutex_lock(&pools_reg_lock);
277 mutex_lock(&pools_lock);
278 list_del(&pool->pools);
279 if (pool->dev && list_empty(&pool->dev->dma_pools))
280 empty = true;
281 mutex_unlock(&pools_lock);
282 if (empty)
283 device_remove_file(pool->dev, &dev_attr_pools);
284 mutex_unlock(&pools_reg_lock);
285
286 while (!list_empty(&pool->page_list)) {
287 struct dma_page *page;
288 page = list_entry(pool->page_list.next,
289 struct dma_page, page_list);
290 if (is_page_busy(page)) {
291 if (pool->dev)
292 dev_err(pool->dev,
293 "dma_pool_destroy %s, %p busy\n",
294 pool->name, page->vaddr);
295 else
296 pr_err("dma_pool_destroy %s, %p busy\n",
297 pool->name, page->vaddr);
298
299 list_del(&page->page_list);
300 kfree(page);
301 } else
302 pool_free_page(pool, page);
303 }
304
305 kfree(pool);
306 }
307 EXPORT_SYMBOL(dma_pool_destroy);
308
309
310
311
312
313
314
315
316
317
318
319 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
320 dma_addr_t *handle)
321 {
322 unsigned long flags;
323 struct dma_page *page;
324 size_t offset;
325 void *retval;
326
327 might_sleep_if(gfpflags_allow_blocking(mem_flags));
328
329 spin_lock_irqsave(&pool->lock, flags);
330 list_for_each_entry(page, &pool->page_list, page_list) {
331 if (page->offset < pool->allocation)
332 goto ready;
333 }
334
335
336 spin_unlock_irqrestore(&pool->lock, flags);
337
338 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
339 if (!page)
340 return NULL;
341
342 spin_lock_irqsave(&pool->lock, flags);
343
344 list_add(&page->page_list, &pool->page_list);
345 ready:
346 page->in_use++;
347 offset = page->offset;
348 page->offset = *(int *)(page->vaddr + offset);
349 retval = offset + page->vaddr;
350 *handle = offset + page->dma;
351 #ifdef DMAPOOL_DEBUG
352 {
353 int i;
354 u8 *data = retval;
355
356 for (i = sizeof(page->offset); i < pool->size; i++) {
357 if (data[i] == POOL_POISON_FREED)
358 continue;
359 if (pool->dev)
360 dev_err(pool->dev,
361 "dma_pool_alloc %s, %p (corrupted)\n",
362 pool->name, retval);
363 else
364 pr_err("dma_pool_alloc %s, %p (corrupted)\n",
365 pool->name, retval);
366
367
368
369
370
371 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
372 data, pool->size, 1);
373 break;
374 }
375 }
376 if (!(mem_flags & __GFP_ZERO))
377 memset(retval, POOL_POISON_ALLOCATED, pool->size);
378 #endif
379 spin_unlock_irqrestore(&pool->lock, flags);
380
381 if (want_init_on_alloc(mem_flags))
382 memset(retval, 0, pool->size);
383
384 return retval;
385 }
386 EXPORT_SYMBOL(dma_pool_alloc);
387
388 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
389 {
390 struct dma_page *page;
391
392 list_for_each_entry(page, &pool->page_list, page_list) {
393 if (dma < page->dma)
394 continue;
395 if ((dma - page->dma) < pool->allocation)
396 return page;
397 }
398 return NULL;
399 }
400
401
402
403
404
405
406
407
408
409
410 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
411 {
412 struct dma_page *page;
413 unsigned long flags;
414 unsigned int offset;
415
416 spin_lock_irqsave(&pool->lock, flags);
417 page = pool_find_page(pool, dma);
418 if (!page) {
419 spin_unlock_irqrestore(&pool->lock, flags);
420 if (pool->dev)
421 dev_err(pool->dev,
422 "dma_pool_free %s, %p/%lx (bad dma)\n",
423 pool->name, vaddr, (unsigned long)dma);
424 else
425 pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
426 pool->name, vaddr, (unsigned long)dma);
427 return;
428 }
429
430 offset = vaddr - page->vaddr;
431 if (want_init_on_free())
432 memset(vaddr, 0, pool->size);
433 #ifdef DMAPOOL_DEBUG
434 if ((dma - page->dma) != offset) {
435 spin_unlock_irqrestore(&pool->lock, flags);
436 if (pool->dev)
437 dev_err(pool->dev,
438 "dma_pool_free %s, %p (bad vaddr)/%pad\n",
439 pool->name, vaddr, &dma);
440 else
441 pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
442 pool->name, vaddr, &dma);
443 return;
444 }
445 {
446 unsigned int chain = page->offset;
447 while (chain < pool->allocation) {
448 if (chain != offset) {
449 chain = *(int *)(page->vaddr + chain);
450 continue;
451 }
452 spin_unlock_irqrestore(&pool->lock, flags);
453 if (pool->dev)
454 dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
455 pool->name, &dma);
456 else
457 pr_err("dma_pool_free %s, dma %pad already free\n",
458 pool->name, &dma);
459 return;
460 }
461 }
462 memset(vaddr, POOL_POISON_FREED, pool->size);
463 #endif
464
465 page->in_use--;
466 *(int *)vaddr = page->offset;
467 page->offset = offset;
468
469
470
471
472
473 spin_unlock_irqrestore(&pool->lock, flags);
474 }
475 EXPORT_SYMBOL(dma_pool_free);
476
477
478
479
480 static void dmam_pool_release(struct device *dev, void *res)
481 {
482 struct dma_pool *pool = *(struct dma_pool **)res;
483
484 dma_pool_destroy(pool);
485 }
486
487 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
488 {
489 return *(struct dma_pool **)res == match_data;
490 }
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
507 size_t size, size_t align, size_t allocation)
508 {
509 struct dma_pool **ptr, *pool;
510
511 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
512 if (!ptr)
513 return NULL;
514
515 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
516 if (pool)
517 devres_add(dev, ptr);
518 else
519 devres_free(ptr);
520
521 return pool;
522 }
523 EXPORT_SYMBOL(dmam_pool_create);
524
525
526
527
528
529
530
531 void dmam_pool_destroy(struct dma_pool *pool)
532 {
533 struct device *dev = pool->dev;
534
535 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
536 }
537 EXPORT_SYMBOL(dmam_pool_destroy);