This source file includes following definitions.
- ioremap_page
- unmap_area_sections
- remap_area_sections
- __uc32_ioremap_pfn_caller
- __uc32_ioremap_caller
- __uc32_ioremap_pfn
- __uc32_ioremap
- __uc32_ioremap_cached
- __uc32_iounmap
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/io.h>
27
28 #include <asm/cputype.h>
29 #include <asm/cacheflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <linux/sizes.h>
34
35 #include <mach/map.h>
36 #include "mm.h"
37
38
39
40
41
42 #define VM_UNICORE_SECTION_MAPPING 0x80000000
43
44 int ioremap_page(unsigned long virt, unsigned long phys,
45 const struct mem_type *mtype)
46 {
47 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
48 __pgprot(mtype->prot_pte));
49 }
50 EXPORT_SYMBOL(ioremap_page);
51
52
53
54
55
56
57
58
59
60
61
62 static void unmap_area_sections(unsigned long virt, unsigned long size)
63 {
64 unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
65 pgd_t *pgd;
66
67 flush_cache_vunmap(addr, end);
68 pgd = pgd_offset_k(addr);
69 do {
70 pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
71
72 pmd = *pmdp;
73 if (!pmd_none(pmd)) {
74
75
76
77
78
79
80
81 pmd_clear(pmdp);
82
83
84
85
86 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
87 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
88 }
89
90 addr += PGDIR_SIZE;
91 pgd++;
92 } while (addr < end);
93
94 flush_tlb_kernel_range(virt, end);
95 }
96
97 static int
98 remap_area_sections(unsigned long virt, unsigned long pfn,
99 size_t size, const struct mem_type *type)
100 {
101 unsigned long addr = virt, end = virt + size;
102 pgd_t *pgd;
103
104
105
106
107
108 unmap_area_sections(virt, size);
109
110 pgd = pgd_offset_k(addr);
111 do {
112 pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
113
114 set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
115 pfn += SZ_4M >> PAGE_SHIFT;
116 flush_pmd_entry(pmd);
117
118 addr += PGDIR_SIZE;
119 pgd++;
120 } while (addr < end);
121
122 return 0;
123 }
124
125 void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
126 unsigned long offset, size_t size, unsigned int mtype, void *caller)
127 {
128 const struct mem_type *type;
129 int err;
130 unsigned long addr;
131 struct vm_struct *area;
132
133
134
135
136 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
137 return NULL;
138
139
140
141
142 if (pfn_valid(pfn)) {
143 WARN(1, "BUG: Your driver calls ioremap() on\n"
144 "system memory. This leads to architecturally\n"
145 "unpredictable behaviour, and ioremap() will fail in\n"
146 "the next kernel release. Please fix your driver.\n");
147 return NULL;
148 }
149
150 type = get_mem_type(mtype);
151 if (!type)
152 return NULL;
153
154
155
156
157 size = PAGE_ALIGN(offset + size);
158
159 area = get_vm_area_caller(size, VM_IOREMAP, caller);
160 if (!area)
161 return NULL;
162 addr = (unsigned long)area->addr;
163
164 if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
165 area->flags |= VM_UNICORE_SECTION_MAPPING;
166 err = remap_area_sections(addr, pfn, size, type);
167 } else
168 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
169 __pgprot(type->prot_pte));
170
171 if (err) {
172 vunmap((void *)addr);
173 return NULL;
174 }
175
176 flush_cache_vmap(addr, addr + size);
177 return (void __iomem *) (offset + addr);
178 }
179
180 void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,
181 unsigned int mtype, void *caller)
182 {
183 unsigned long last_addr;
184 unsigned long offset = phys_addr & ~PAGE_MASK;
185 unsigned long pfn = __phys_to_pfn(phys_addr);
186
187
188
189
190 last_addr = phys_addr + size - 1;
191 if (!size || last_addr < phys_addr)
192 return NULL;
193
194 return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
195 }
196
197
198
199
200
201
202
203
204
205
206 void __iomem *
207 __uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
208 unsigned int mtype)
209 {
210 return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
211 __builtin_return_address(0));
212 }
213 EXPORT_SYMBOL(__uc32_ioremap_pfn);
214
215 void __iomem *
216 __uc32_ioremap(unsigned long phys_addr, size_t size)
217 {
218 return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,
219 __builtin_return_address(0));
220 }
221 EXPORT_SYMBOL(__uc32_ioremap);
222
223 void __iomem *
224 __uc32_ioremap_cached(unsigned long phys_addr, size_t size)
225 {
226 return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
227 __builtin_return_address(0));
228 }
229 EXPORT_SYMBOL(__uc32_ioremap_cached);
230
231 void __uc32_iounmap(volatile void __iomem *io_addr)
232 {
233 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
234 struct vm_struct *vm;
235
236
237
238
239
240
241
242
243 vm = find_vm_area(addr);
244 if (vm && (vm->flags & VM_IOREMAP) &&
245 (vm->flags & VM_UNICORE_SECTION_MAPPING))
246 unmap_area_sections((unsigned long)vm->addr, vm->size);
247
248 vunmap(addr);
249 }
250 EXPORT_SYMBOL(__uc32_iounmap);