This source file includes following definitions.
- page_set_nocache
- page_clear_nocache
- arch_dma_alloc
- arch_dma_free
- arch_sync_dma_for_device
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/dma-noncoherent.h>
19 #include <linux/pagewalk.h>
20
21 #include <asm/cpuinfo.h>
22 #include <asm/spr_defs.h>
23 #include <asm/tlbflush.h>
24
25 static int
26 page_set_nocache(pte_t *pte, unsigned long addr,
27 unsigned long next, struct mm_walk *walk)
28 {
29 unsigned long cl;
30 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
31
32 pte_val(*pte) |= _PAGE_CI;
33
34
35
36
37
38 flush_tlb_page(NULL, addr);
39
40
41 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
42 mtspr(SPR_DCBFR, cl);
43
44 return 0;
45 }
46
47 static const struct mm_walk_ops set_nocache_walk_ops = {
48 .pte_entry = page_set_nocache,
49 };
50
51 static int
52 page_clear_nocache(pte_t *pte, unsigned long addr,
53 unsigned long next, struct mm_walk *walk)
54 {
55 pte_val(*pte) &= ~_PAGE_CI;
56
57
58
59
60
61 flush_tlb_page(NULL, addr);
62
63 return 0;
64 }
65
66 static const struct mm_walk_ops clear_nocache_walk_ops = {
67 .pte_entry = page_clear_nocache,
68 };
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 void *
87 arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
88 gfp_t gfp, unsigned long attrs)
89 {
90 unsigned long va;
91 void *page;
92
93 page = alloc_pages_exact(size, gfp | __GFP_ZERO);
94 if (!page)
95 return NULL;
96
97
98 *dma_handle = __pa(page);
99
100 va = (unsigned long)page;
101
102
103
104
105
106 if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
107 NULL)) {
108 free_pages_exact(page, size);
109 return NULL;
110 }
111
112 return (void *)va;
113 }
114
115 void
116 arch_dma_free(struct device *dev, size_t size, void *vaddr,
117 dma_addr_t dma_handle, unsigned long attrs)
118 {
119 unsigned long va = (unsigned long)vaddr;
120
121
122 WARN_ON(walk_page_range(&init_mm, va, va + size,
123 &clear_nocache_walk_ops, NULL));
124
125 free_pages_exact(vaddr, size);
126 }
127
128 void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
129 enum dma_data_direction dir)
130 {
131 unsigned long cl;
132 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
133
134 switch (dir) {
135 case DMA_TO_DEVICE:
136
137 for (cl = addr; cl < addr + size;
138 cl += cpuinfo->dcache_block_size)
139 mtspr(SPR_DCBFR, cl);
140 break;
141 case DMA_FROM_DEVICE:
142
143 for (cl = addr; cl < addr + size;
144 cl += cpuinfo->dcache_block_size)
145 mtspr(SPR_DCBIR, cl);
146 break;
147 default:
148
149
150
151
152
153 break;
154 }
155 }