This source file includes following definitions.
- arch_sync_dma_for_device
- arch_sync_dma_for_cpu
- arch_dma_prep_coherent
- uncached_kernel_address
- cached_kernel_address
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/string.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/io.h>
18 #include <linux/cache.h>
19 #include <asm/cacheflush.h>
20
21 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
22 size_t size, enum dma_data_direction dir)
23 {
24 void *vaddr = phys_to_virt(paddr);
25
26 switch (dir) {
27 case DMA_FROM_DEVICE:
28 invalidate_dcache_range((unsigned long)vaddr,
29 (unsigned long)(vaddr + size));
30 break;
31 case DMA_TO_DEVICE:
32
33
34
35
36 case DMA_BIDIRECTIONAL:
37 flush_dcache_range((unsigned long)vaddr,
38 (unsigned long)(vaddr + size));
39 break;
40 default:
41 BUG();
42 }
43 }
44
45 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
46 size_t size, enum dma_data_direction dir)
47 {
48 void *vaddr = phys_to_virt(paddr);
49
50 switch (dir) {
51 case DMA_BIDIRECTIONAL:
52 case DMA_FROM_DEVICE:
53 invalidate_dcache_range((unsigned long)vaddr,
54 (unsigned long)(vaddr + size));
55 break;
56 case DMA_TO_DEVICE:
57 break;
58 default:
59 BUG();
60 }
61 }
62
63 void arch_dma_prep_coherent(struct page *page, size_t size)
64 {
65 unsigned long start = (unsigned long)page_address(page);
66
67 flush_dcache_range(start, start + size);
68 }
69
70 void *uncached_kernel_address(void *ptr)
71 {
72 unsigned long addr = (unsigned long)ptr;
73
74 addr |= CONFIG_NIOS2_IO_REGION_BASE;
75
76 return (void *)ptr;
77 }
78
79 void *cached_kernel_address(void *ptr)
80 {
81 unsigned long addr = (unsigned long)ptr;
82
83 addr &= ~CONFIG_NIOS2_IO_REGION_BASE;
84 addr |= CONFIG_NIOS2_KERNEL_REGION_BASE;
85
86 return (void *)ptr;
87 }