This source file includes following definitions.
- find_static_vm_paddr
- find_static_vm_vaddr
- add_static_vm_early
- ioremap_page
- __check_vmalloc_seq
- unmap_area_sections
- remap_area_sections
- remap_area_supersections
- __arm_ioremap_pfn_caller
- __arm_ioremap_caller
- __arm_ioremap_pfn
- ioremap
- ioremap_cache
- ioremap_wc
- __arm_ioremap_exec
- arch_memremap_wb
- __iounmap
- iounmap
- pci_ioremap_set_mem_type
- pci_ioremap_io
- pci_remap_cfgspace
- early_ioremap_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/mm.h>
27 #include <linux/vmalloc.h>
28 #include <linux/io.h>
29 #include <linux/sizes.h>
30
31 #include <asm/cp15.h>
32 #include <asm/cputype.h>
33 #include <asm/cacheflush.h>
34 #include <asm/early_ioremap.h>
35 #include <asm/mmu_context.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlbflush.h>
38 #include <asm/system_info.h>
39
40 #include <asm/mach/map.h>
41 #include <asm/mach/pci.h>
42 #include "mm.h"
43
44
45 LIST_HEAD(static_vmlist);
46
47 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
48 size_t size, unsigned int mtype)
49 {
50 struct static_vm *svm;
51 struct vm_struct *vm;
52
53 list_for_each_entry(svm, &static_vmlist, list) {
54 vm = &svm->vm;
55 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
56 continue;
57 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
58 continue;
59
60 if (vm->phys_addr > paddr ||
61 paddr + size - 1 > vm->phys_addr + vm->size - 1)
62 continue;
63
64 return svm;
65 }
66
67 return NULL;
68 }
69
70 struct static_vm *find_static_vm_vaddr(void *vaddr)
71 {
72 struct static_vm *svm;
73 struct vm_struct *vm;
74
75 list_for_each_entry(svm, &static_vmlist, list) {
76 vm = &svm->vm;
77
78
79 if (vm->addr > vaddr)
80 break;
81
82 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
83 return svm;
84 }
85
86 return NULL;
87 }
88
89 void __init add_static_vm_early(struct static_vm *svm)
90 {
91 struct static_vm *curr_svm;
92 struct vm_struct *vm;
93 void *vaddr;
94
95 vm = &svm->vm;
96 vm_area_add_early(vm);
97 vaddr = vm->addr;
98
99 list_for_each_entry(curr_svm, &static_vmlist, list) {
100 vm = &curr_svm->vm;
101
102 if (vm->addr > vaddr)
103 break;
104 }
105 list_add_tail(&svm->list, &curr_svm->list);
106 }
107
108 int ioremap_page(unsigned long virt, unsigned long phys,
109 const struct mem_type *mtype)
110 {
111 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
112 __pgprot(mtype->prot_pte));
113 }
114 EXPORT_SYMBOL(ioremap_page);
115
116 void __check_vmalloc_seq(struct mm_struct *mm)
117 {
118 unsigned int seq;
119
120 do {
121 seq = init_mm.context.vmalloc_seq;
122 memcpy(pgd_offset(mm, VMALLOC_START),
123 pgd_offset_k(VMALLOC_START),
124 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
125 pgd_index(VMALLOC_START)));
126 mm->context.vmalloc_seq = seq;
127 } while (seq != init_mm.context.vmalloc_seq);
128 }
129
130 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
131
132
133
134
135
136
137
138
139
140
141 static void unmap_area_sections(unsigned long virt, unsigned long size)
142 {
143 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
144 pgd_t *pgd;
145 pud_t *pud;
146 pmd_t *pmdp;
147
148 flush_cache_vunmap(addr, end);
149 pgd = pgd_offset_k(addr);
150 pud = pud_offset(pgd, addr);
151 pmdp = pmd_offset(pud, addr);
152 do {
153 pmd_t pmd = *pmdp;
154
155 if (!pmd_none(pmd)) {
156
157
158
159
160
161
162
163 pmd_clear(pmdp);
164 init_mm.context.vmalloc_seq++;
165
166
167
168
169 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
170 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
171 }
172
173 addr += PMD_SIZE;
174 pmdp += 2;
175 } while (addr < end);
176
177
178
179
180
181 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
182 __check_vmalloc_seq(current->active_mm);
183
184 flush_tlb_kernel_range(virt, end);
185 }
186
187 static int
188 remap_area_sections(unsigned long virt, unsigned long pfn,
189 size_t size, const struct mem_type *type)
190 {
191 unsigned long addr = virt, end = virt + size;
192 pgd_t *pgd;
193 pud_t *pud;
194 pmd_t *pmd;
195
196
197
198
199
200 unmap_area_sections(virt, size);
201
202 pgd = pgd_offset_k(addr);
203 pud = pud_offset(pgd, addr);
204 pmd = pmd_offset(pud, addr);
205 do {
206 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
207 pfn += SZ_1M >> PAGE_SHIFT;
208 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
209 pfn += SZ_1M >> PAGE_SHIFT;
210 flush_pmd_entry(pmd);
211
212 addr += PMD_SIZE;
213 pmd += 2;
214 } while (addr < end);
215
216 return 0;
217 }
218
219 static int
220 remap_area_supersections(unsigned long virt, unsigned long pfn,
221 size_t size, const struct mem_type *type)
222 {
223 unsigned long addr = virt, end = virt + size;
224 pgd_t *pgd;
225 pud_t *pud;
226 pmd_t *pmd;
227
228
229
230
231
232 unmap_area_sections(virt, size);
233
234 pgd = pgd_offset_k(virt);
235 pud = pud_offset(pgd, addr);
236 pmd = pmd_offset(pud, addr);
237 do {
238 unsigned long super_pmd_val, i;
239
240 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
241 PMD_SECT_SUPER;
242 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
243
244 for (i = 0; i < 8; i++) {
245 pmd[0] = __pmd(super_pmd_val);
246 pmd[1] = __pmd(super_pmd_val);
247 flush_pmd_entry(pmd);
248
249 addr += PMD_SIZE;
250 pmd += 2;
251 }
252
253 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
254 } while (addr < end);
255
256 return 0;
257 }
258 #endif
259
260 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
261 unsigned long offset, size_t size, unsigned int mtype, void *caller)
262 {
263 const struct mem_type *type;
264 int err;
265 unsigned long addr;
266 struct vm_struct *area;
267 phys_addr_t paddr = __pfn_to_phys(pfn);
268
269 #ifndef CONFIG_ARM_LPAE
270
271
272
273 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
274 return NULL;
275 #endif
276
277 type = get_mem_type(mtype);
278 if (!type)
279 return NULL;
280
281
282
283
284 size = PAGE_ALIGN(offset + size);
285
286
287
288
289 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
290 struct static_vm *svm;
291
292 svm = find_static_vm_paddr(paddr, size, mtype);
293 if (svm) {
294 addr = (unsigned long)svm->vm.addr;
295 addr += paddr - svm->vm.phys_addr;
296 return (void __iomem *) (offset + addr);
297 }
298 }
299
300
301
302
303
304 if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
305 return NULL;
306
307 area = get_vm_area_caller(size, VM_IOREMAP, caller);
308 if (!area)
309 return NULL;
310 addr = (unsigned long)area->addr;
311 area->phys_addr = paddr;
312
313 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
314 if (DOMAIN_IO == 0 &&
315 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
316 cpu_is_xsc3()) && pfn >= 0x100000 &&
317 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
318 area->flags |= VM_ARM_SECTION_MAPPING;
319 err = remap_area_supersections(addr, pfn, size, type);
320 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
321 area->flags |= VM_ARM_SECTION_MAPPING;
322 err = remap_area_sections(addr, pfn, size, type);
323 } else
324 #endif
325 err = ioremap_page_range(addr, addr + size, paddr,
326 __pgprot(type->prot_pte));
327
328 if (err) {
329 vunmap((void *)addr);
330 return NULL;
331 }
332
333 flush_cache_vmap(addr, addr + size);
334 return (void __iomem *) (offset + addr);
335 }
336
337 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
338 unsigned int mtype, void *caller)
339 {
340 phys_addr_t last_addr;
341 unsigned long offset = phys_addr & ~PAGE_MASK;
342 unsigned long pfn = __phys_to_pfn(phys_addr);
343
344
345
346
347 last_addr = phys_addr + size - 1;
348 if (!size || last_addr < phys_addr)
349 return NULL;
350
351 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
352 caller);
353 }
354
355
356
357
358
359
360
361
362
363
364 void __iomem *
365 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
366 unsigned int mtype)
367 {
368 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
369 __builtin_return_address(0));
370 }
371 EXPORT_SYMBOL(__arm_ioremap_pfn);
372
373 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
374 unsigned int, void *) =
375 __arm_ioremap_caller;
376
377 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
378 {
379 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
380 __builtin_return_address(0));
381 }
382 EXPORT_SYMBOL(ioremap);
383
384 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
385 __alias(ioremap_cached);
386
387 void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
388 {
389 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
390 __builtin_return_address(0));
391 }
392 EXPORT_SYMBOL(ioremap_cache);
393 EXPORT_SYMBOL(ioremap_cached);
394
395 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
396 {
397 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
398 __builtin_return_address(0));
399 }
400 EXPORT_SYMBOL(ioremap_wc);
401
402
403
404
405
406
407
408
409 void __iomem *
410 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
411 {
412 unsigned int mtype;
413
414 if (cached)
415 mtype = MT_MEMORY_RWX;
416 else
417 mtype = MT_MEMORY_RWX_NONCACHED;
418
419 return __arm_ioremap_caller(phys_addr, size, mtype,
420 __builtin_return_address(0));
421 }
422
423 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
424 {
425 return (__force void *)arch_ioremap_caller(phys_addr, size,
426 MT_MEMORY_RW,
427 __builtin_return_address(0));
428 }
429
430 void __iounmap(volatile void __iomem *io_addr)
431 {
432 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
433 struct static_vm *svm;
434
435
436 svm = find_static_vm_vaddr(addr);
437 if (svm)
438 return;
439
440 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
441 {
442 struct vm_struct *vm;
443
444 vm = find_vm_area(addr);
445
446
447
448
449
450
451 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
452 unmap_area_sections((unsigned long)vm->addr, vm->size);
453 }
454 #endif
455
456 vunmap(addr);
457 }
458
459 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
460
461 void iounmap(volatile void __iomem *cookie)
462 {
463 arch_iounmap(cookie);
464 }
465 EXPORT_SYMBOL(iounmap);
466
467 #ifdef CONFIG_PCI
468 static int pci_ioremap_mem_type = MT_DEVICE;
469
470 void pci_ioremap_set_mem_type(int mem_type)
471 {
472 pci_ioremap_mem_type = mem_type;
473 }
474
475 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
476 {
477 BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
478
479 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
480 PCI_IO_VIRT_BASE + offset + SZ_64K,
481 phys_addr,
482 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
483 }
484 EXPORT_SYMBOL_GPL(pci_ioremap_io);
485
486 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
487 {
488 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
489 __builtin_return_address(0));
490 }
491 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
492 #endif
493
494
495
496
497 void __init early_ioremap_init(void)
498 {
499 early_ioremap_setup();
500 }