This source file includes following definitions.
- noalign_setup
- adjust_cr
- get_mem_type
- build_mem_type_table
- early_pte_alloc
- alloc_init_pte
- alloc_init_section
- create_mapping
- early_vmalloc
- sanity_check_meminfo
- prepare_page_table
- uc32_mm_memblock_reserve
- devicemaps_init
- map_lowmem
- paging_init
- setup_mm_for_reboot
- update_mmu_cache
1
2
3
4
5
6
7
8
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/init.h>
13 #include <linux/mman.h>
14 #include <linux/nodemask.h>
15 #include <linux/memblock.h>
16 #include <linux/fs.h>
17 #include <linux/io.h>
18
19 #include <asm/cputype.h>
20 #include <asm/sections.h>
21 #include <asm/setup.h>
22 #include <linux/sizes.h>
23 #include <asm/tlb.h>
24 #include <asm/memblock.h>
25
26 #include <mach/map.h>
27
28 #include "mm.h"
29
30
31
32
33
34 struct page *empty_zero_page;
35 EXPORT_SYMBOL(empty_zero_page);
36
37
38
39
40 pmd_t *top_pmd;
41
42 pgprot_t pgprot_user;
43 EXPORT_SYMBOL(pgprot_user);
44
45 pgprot_t pgprot_kernel;
46 EXPORT_SYMBOL(pgprot_kernel);
47
48 static int __init noalign_setup(char *__unused)
49 {
50 cr_alignment &= ~CR_A;
51 cr_no_alignment &= ~CR_A;
52 set_cr(cr_alignment);
53 return 1;
54 }
55 __setup("noalign", noalign_setup);
56
57 void adjust_cr(unsigned long mask, unsigned long set)
58 {
59 unsigned long flags;
60
61 mask &= ~CR_A;
62
63 set &= mask;
64
65 local_irq_save(flags);
66
67 cr_no_alignment = (cr_no_alignment & ~mask) | set;
68 cr_alignment = (cr_alignment & ~mask) | set;
69
70 set_cr((get_cr() & ~mask) | set);
71
72 local_irq_restore(flags);
73 }
74
75 struct map_desc {
76 unsigned long virtual;
77 unsigned long pfn;
78 unsigned long length;
79 unsigned int type;
80 };
81
82 #define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \
83 PTE_DIRTY | PTE_READ | PTE_WRITE)
84 #define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \
85 PMD_SECT_READ | PMD_SECT_WRITE)
86
87 static struct mem_type mem_types[] = {
88 [MT_DEVICE] = {
89 .prot_pte = PROT_PTE_DEVICE,
90 .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
91 .prot_sect = PROT_SECT_DEVICE,
92 },
93
94
95
96
97 [MT_KUSER] = {
98 .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
99 PTE_CACHEABLE | PTE_READ | PTE_EXEC,
100 .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
101 .prot_sect = PROT_SECT_DEVICE,
102 },
103 [MT_HIGH_VECTORS] = {
104 .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
105 PTE_CACHEABLE | PTE_READ | PTE_WRITE |
106 PTE_EXEC,
107 .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
108 },
109 [MT_MEMORY] = {
110 .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
111 PTE_WRITE | PTE_EXEC,
112 .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
113 .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
114 PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC,
115 },
116 [MT_ROM] = {
117 .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
118 PMD_SECT_READ,
119 },
120 };
121
122 const struct mem_type *get_mem_type(unsigned int type)
123 {
124 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
125 }
126 EXPORT_SYMBOL(get_mem_type);
127
128
129
130
131 static void __init build_mem_type_table(void)
132 {
133 pgprot_user = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE);
134 pgprot_kernel = __pgprot(PTE_PRESENT | PTE_YOUNG |
135 PTE_DIRTY | PTE_READ | PTE_WRITE |
136 PTE_EXEC | PTE_CACHEABLE);
137 }
138
139 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
140
141 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
142 unsigned long prot)
143 {
144 if (pmd_none(*pmd)) {
145 size_t size = PTRS_PER_PTE * sizeof(pte_t);
146 pte_t *pte = memblock_alloc(size, size);
147
148 if (!pte)
149 panic("%s: Failed to allocate %zu bytes align=%zx\n",
150 __func__, size, size);
151
152 __pmd_populate(pmd, __pa(pte) | prot);
153 }
154 BUG_ON(pmd_bad(*pmd));
155 return pte_offset_kernel(pmd, addr);
156 }
157
158 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
159 unsigned long end, unsigned long pfn,
160 const struct mem_type *type)
161 {
162 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
163 do {
164 set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte)));
165 pfn++;
166 } while (pte++, addr += PAGE_SIZE, addr != end);
167 }
168
169 static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
170 unsigned long end, unsigned long phys,
171 const struct mem_type *type)
172 {
173 pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
174
175
176
177
178
179 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
180 pmd_t *p = pmd;
181
182 do {
183 set_pmd(pmd, __pmd(phys | type->prot_sect));
184 phys += SECTION_SIZE;
185 } while (pmd++, addr += SECTION_SIZE, addr != end);
186
187 flush_pmd_entry(p);
188 } else {
189
190
191
192
193 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
194 }
195 }
196
197
198
199
200
201
202
203 static void __init create_mapping(struct map_desc *md)
204 {
205 unsigned long phys, addr, length, end;
206 const struct mem_type *type;
207 pgd_t *pgd;
208
209 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
210 printk(KERN_WARNING "BUG: not creating mapping for "
211 "0x%08llx at 0x%08lx in user region\n",
212 __pfn_to_phys((u64)md->pfn), md->virtual);
213 return;
214 }
215
216 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
217 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
218 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
219 "overlaps vmalloc space\n",
220 __pfn_to_phys((u64)md->pfn), md->virtual);
221 }
222
223 type = &mem_types[md->type];
224
225 addr = md->virtual & PAGE_MASK;
226 phys = (unsigned long)__pfn_to_phys(md->pfn);
227 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
228
229 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
230 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
231 "be mapped using pages, ignoring.\n",
232 __pfn_to_phys(md->pfn), addr);
233 return;
234 }
235
236 pgd = pgd_offset_k(addr);
237 end = addr + length;
238 do {
239 unsigned long next = pgd_addr_end(addr, end);
240
241 alloc_init_section(pgd, addr, next, phys, type);
242
243 phys += next - addr;
244 addr = next;
245 } while (pgd++, addr != end);
246 }
247
248 static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
249
250
251
252
253
254
255 static int __init early_vmalloc(char *arg)
256 {
257 unsigned long vmalloc_reserve = memparse(arg, NULL);
258
259 if (vmalloc_reserve < SZ_16M) {
260 vmalloc_reserve = SZ_16M;
261 printk(KERN_WARNING
262 "vmalloc area too small, limiting to %luMB\n",
263 vmalloc_reserve >> 20);
264 }
265
266 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
267 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
268 printk(KERN_WARNING
269 "vmalloc area is too big, limiting to %luMB\n",
270 vmalloc_reserve >> 20);
271 }
272
273 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
274 return 0;
275 }
276 early_param("vmalloc", early_vmalloc);
277
278 static phys_addr_t lowmem_limit __initdata = SZ_1G;
279
280 static void __init sanity_check_meminfo(void)
281 {
282 int i, j;
283
284 lowmem_limit = __pa(vmalloc_min - 1) + 1;
285 memblock_set_current_limit(lowmem_limit);
286
287 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
288 struct membank *bank = &meminfo.bank[j];
289 *bank = meminfo.bank[i];
290 j++;
291 }
292 meminfo.nr_banks = j;
293 }
294
295 static inline void prepare_page_table(void)
296 {
297 unsigned long addr;
298 phys_addr_t end;
299
300
301
302
303 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
304 pmd_clear(pmd_off_k(addr));
305
306 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
307 pmd_clear(pmd_off_k(addr));
308
309
310
311
312 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
313 if (end >= lowmem_limit)
314 end = lowmem_limit;
315
316
317
318
319
320 for (addr = __phys_to_virt(end);
321 addr < VMALLOC_END; addr += PGDIR_SIZE)
322 pmd_clear(pmd_off_k(addr));
323 }
324
325
326
327
328 void __init uc32_mm_memblock_reserve(void)
329 {
330
331
332
333
334 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
335 }
336
337
338
339
340
341
342
343
344 static void __init devicemaps_init(void)
345 {
346 struct map_desc map;
347 unsigned long addr;
348 void *vectors;
349
350
351
352
353 vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
354 if (!vectors)
355 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
356 __func__, PAGE_SIZE, PAGE_SIZE);
357
358 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
359 pmd_clear(pmd_off_k(addr));
360
361
362
363
364
365
366 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
367 map.virtual = VECTORS_BASE;
368 map.length = PAGE_SIZE;
369 map.type = MT_HIGH_VECTORS;
370 create_mapping(&map);
371
372
373
374
375
376 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
377 map.virtual = KUSER_VECPAGE_BASE;
378 map.length = PAGE_SIZE;
379 map.type = MT_KUSER;
380 create_mapping(&map);
381
382
383
384
385
386
387
388 local_flush_tlb_all();
389 flush_cache_all();
390 }
391
392 static void __init map_lowmem(void)
393 {
394 struct memblock_region *reg;
395
396
397 for_each_memblock(memory, reg) {
398 phys_addr_t start = reg->base;
399 phys_addr_t end = start + reg->size;
400 struct map_desc map;
401
402 if (end > lowmem_limit)
403 end = lowmem_limit;
404 if (start >= end)
405 break;
406
407 map.pfn = __phys_to_pfn(start);
408 map.virtual = __phys_to_virt(start);
409 map.length = end - start;
410 map.type = MT_MEMORY;
411
412 create_mapping(&map);
413 }
414 }
415
416
417
418
419
420 void __init paging_init(void)
421 {
422 void *zero_page;
423
424 build_mem_type_table();
425 sanity_check_meminfo();
426 prepare_page_table();
427 map_lowmem();
428 devicemaps_init();
429
430 top_pmd = pmd_off_k(0xffff0000);
431
432
433 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
434 if (!zero_page)
435 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
436 __func__, PAGE_SIZE, PAGE_SIZE);
437
438 bootmem_init();
439
440 empty_zero_page = virt_to_page(zero_page);
441 __flush_dcache_page(NULL, empty_zero_page);
442 }
443
444
445
446
447
448
449 void setup_mm_for_reboot(void)
450 {
451 unsigned long base_pmdval;
452 pgd_t *pgd;
453 int i;
454
455
456
457
458
459
460 pgd = current->active_mm->pgd;
461
462 base_pmdval = PMD_SECT_WRITE | PMD_SECT_READ | PMD_TYPE_SECT;
463
464 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
465 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
466 pmd_t *pmd;
467
468 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
469 set_pmd(pmd, __pmd(pmdval));
470 flush_pmd_entry(pmd);
471 }
472
473 local_flush_tlb_all();
474 }
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
490 pte_t *ptep)
491 {
492 unsigned long pfn = pte_pfn(*ptep);
493 struct address_space *mapping;
494 struct page *page;
495
496 if (!pfn_valid(pfn))
497 return;
498
499
500
501
502
503 page = pfn_to_page(pfn);
504 if (page == ZERO_PAGE(0))
505 return;
506
507 mapping = page_mapping_file(page);
508 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
509 __flush_dcache_page(mapping, page);
510 if (mapping)
511 if (vma->vm_flags & VM_EXEC)
512 __flush_icache_all();
513 }