This source file includes following definitions.
- arch_dma_alloc
- arch_dma_free
- arch_sync_dma_for_device
1
2
3
4
5 #include <linux/mm.h>
6 #include <linux/init.h>
7 #include <linux/dma-noncoherent.h>
8 #include <linux/module.h>
9 #include <asm/cacheflush.h>
10 #include <asm/addrspace.h>
11
12 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
13 gfp_t gfp, unsigned long attrs)
14 {
15 void *ret, *ret_nocache;
16 int order = get_order(size);
17
18 gfp |= __GFP_ZERO;
19
20 ret = (void *)__get_free_pages(gfp, order);
21 if (!ret)
22 return NULL;
23
24
25
26
27
28 arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
29 DMA_BIDIRECTIONAL);
30
31 ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
32 if (!ret_nocache) {
33 free_pages((unsigned long)ret, order);
34 return NULL;
35 }
36
37 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
38
39 *dma_handle = virt_to_phys(ret);
40 if (!WARN_ON(!dev))
41 *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
42
43 return ret_nocache;
44 }
45
46 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
47 dma_addr_t dma_handle, unsigned long attrs)
48 {
49 int order = get_order(size);
50 unsigned long pfn = (dma_handle >> PAGE_SHIFT);
51 int k;
52
53 if (!WARN_ON(!dev))
54 pfn += dev->dma_pfn_offset;
55
56 for (k = 0; k < (1 << order); k++)
57 __free_pages(pfn_to_page(pfn + k), 0);
58
59 iounmap(vaddr);
60 }
61
62 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
63 size_t size, enum dma_data_direction dir)
64 {
65 void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
66
67 switch (dir) {
68 case DMA_FROM_DEVICE:
69 __flush_invalidate_region(addr, size);
70 break;
71 case DMA_TO_DEVICE:
72 __flush_wback_region(addr, size);
73 break;
74 case DMA_BIDIRECTIONAL:
75 __flush_purge_region(addr, size);
76 break;
77 default:
78 BUG();
79 }
80 }