This source file includes following definitions.
- virt_to_kpte
- highmem_init
- highmem_setup
- paging_init
- setup_memory
- mem_init
- page_is_ram
- page_is_ram
- mm_cmdline_setup
- mmu_init_hw
- mmu_init
- early_get_page
- zalloc_maybe_bootmem
1
2
3
4
5
6
7
8
9
10 #include <linux/memblock.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/initrd.h>
15 #include <linux/pagemap.h>
16 #include <linux/pfn.h>
17 #include <linux/slab.h>
18 #include <linux/swap.h>
19 #include <linux/export.h>
20
21 #include <asm/page.h>
22 #include <asm/mmu_context.h>
23 #include <asm/pgalloc.h>
24 #include <asm/sections.h>
25 #include <asm/tlb.h>
26 #include <asm/fixmap.h>
27
28
29 int mem_init_done;
30
31 #ifndef CONFIG_MMU
32 unsigned int __page_offset;
33 EXPORT_SYMBOL(__page_offset);
34 #endif
35
36 char *klimit = _end;
37
38
39
40
41
42 unsigned long memory_start;
43 EXPORT_SYMBOL(memory_start);
44 unsigned long memory_size;
45 EXPORT_SYMBOL(memory_size);
46 unsigned long lowmem_size;
47
48 #ifdef CONFIG_HIGHMEM
49 pte_t *kmap_pte;
50 EXPORT_SYMBOL(kmap_pte);
51 pgprot_t kmap_prot;
52 EXPORT_SYMBOL(kmap_prot);
53
54 static inline pte_t *virt_to_kpte(unsigned long vaddr)
55 {
56 return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
57 vaddr), vaddr);
58 }
59
60 static void __init highmem_init(void)
61 {
62 pr_debug("%x\n", (u32)PKMAP_BASE);
63 map_page(PKMAP_BASE, 0, 0);
64 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
65
66 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
67 kmap_prot = PAGE_KERNEL;
68 }
69
70 static void highmem_setup(void)
71 {
72 unsigned long pfn;
73
74 for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
75 struct page *page = pfn_to_page(pfn);
76
77
78 if (!memblock_is_reserved(pfn << PAGE_SHIFT))
79 free_highmem_page(page);
80 }
81 }
82 #endif
83
84
85
86
87 static void __init paging_init(void)
88 {
89 unsigned long zones_size[MAX_NR_ZONES];
90 #ifdef CONFIG_MMU
91 int idx;
92
93
94 for (idx = 0; idx < __end_of_fixed_addresses; idx++)
95 clear_fixmap(idx);
96 #endif
97
98
99 memset(zones_size, 0, sizeof(zones_size));
100
101 #ifdef CONFIG_HIGHMEM
102 highmem_init();
103
104 zones_size[ZONE_DMA] = max_low_pfn;
105 zones_size[ZONE_HIGHMEM] = max_pfn;
106 #else
107 zones_size[ZONE_DMA] = max_pfn;
108 #endif
109
110
111 free_area_init_nodes(zones_size);
112 }
113
114 void __init setup_memory(void)
115 {
116 struct memblock_region *reg;
117
118 #ifndef CONFIG_MMU
119 u32 kernel_align_start, kernel_align_size;
120
121
122 for_each_memblock(memory, reg) {
123 memory_start = (u32)reg->base;
124 lowmem_size = reg->size;
125 if ((memory_start <= (u32)_text) &&
126 ((u32)_text <= (memory_start + lowmem_size - 1))) {
127 memory_size = lowmem_size;
128 PAGE_OFFSET = memory_start;
129 pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
130 __func__, (u32) memory_start,
131 (u32) memory_size);
132 break;
133 }
134 }
135
136 if (!memory_start || !memory_size) {
137 panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
138 __func__, (u32) memory_start, (u32) memory_size);
139 }
140
141
142 kernel_align_start = PAGE_DOWN((u32)_text);
143
144 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
145 pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
146 __func__, kernel_align_start, kernel_align_start
147 + kernel_align_size, kernel_align_size);
148 memblock_reserve(kernel_align_start, kernel_align_size);
149 #endif
150
151
152
153
154
155
156
157
158
159
160
161 min_low_pfn = memory_start >> PAGE_SHIFT;
162
163 max_mapnr = memory_size >> PAGE_SHIFT;
164 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
165 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
166
167 pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
168 pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
169 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
170 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
171
172
173 for_each_memblock(memory, reg) {
174 unsigned long start_pfn, end_pfn;
175
176 start_pfn = memblock_region_memory_base_pfn(reg);
177 end_pfn = memblock_region_memory_end_pfn(reg);
178 memblock_set_node(start_pfn << PAGE_SHIFT,
179 (end_pfn - start_pfn) << PAGE_SHIFT,
180 &memblock.memory, 0);
181 }
182
183
184 sparse_memory_present_with_active_regions(0);
185
186 paging_init();
187 }
188
189 void __init mem_init(void)
190 {
191 high_memory = (void *)__va(memory_start + lowmem_size - 1);
192
193
194 memblock_free_all();
195 #ifdef CONFIG_HIGHMEM
196 highmem_setup();
197 #endif
198
199 mem_init_print_info(NULL);
200 #ifdef CONFIG_MMU
201 pr_info("Kernel virtual memory layout:\n");
202 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
203 #ifdef CONFIG_HIGHMEM
204 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
205 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
206 #endif
207 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
208 ioremap_bot, ioremap_base);
209 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
210 (unsigned long)VMALLOC_START, VMALLOC_END);
211 #endif
212 mem_init_done = 1;
213 }
214
215 #ifndef CONFIG_MMU
216 int page_is_ram(unsigned long pfn)
217 {
218 return __range_ok(pfn, 0);
219 }
220 #else
221 int page_is_ram(unsigned long pfn)
222 {
223 return pfn < max_low_pfn;
224 }
225
226
227
228
229 static void mm_cmdline_setup(void)
230 {
231 unsigned long maxmem = 0;
232 char *p = cmd_line;
233
234
235 p = strstr(cmd_line, "mem=");
236 if (p) {
237 p += 4;
238 maxmem = memparse(p, &p);
239 if (maxmem && memory_size > maxmem) {
240 memory_size = maxmem;
241 memblock.memory.regions[0].size = memory_size;
242 }
243 }
244 }
245
246
247
248
249 static void __init mmu_init_hw(void)
250 {
251
252
253
254
255
256
257
258
259
260
261
262
263 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
264 "mts rzpr, r11;"
265 : : : "r11");
266 }
267
268
269
270
271
272
273
274
275 asmlinkage void __init mmu_init(void)
276 {
277 unsigned int kstart, ksize;
278
279 if (!memblock.reserved.cnt) {
280 pr_emerg("Error memory count\n");
281 machine_restart(NULL);
282 }
283
284 if ((u32) memblock.memory.regions[0].size < 0x400000) {
285 pr_emerg("Memory must be greater than 4MB\n");
286 machine_restart(NULL);
287 }
288
289 if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
290 pr_emerg("Kernel size is greater than memory node\n");
291 machine_restart(NULL);
292 }
293
294
295 memory_start = (u32) memblock.memory.regions[0].base;
296 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
297
298 if (lowmem_size > CONFIG_LOWMEM_SIZE) {
299 lowmem_size = CONFIG_LOWMEM_SIZE;
300 #ifndef CONFIG_HIGHMEM
301 memory_size = lowmem_size;
302 #endif
303 }
304
305 mm_cmdline_setup();
306
307
308
309
310
311 kstart = __pa(CONFIG_KERNEL_START);
312
313 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
314 memblock_reserve(kstart, ksize);
315
316 #if defined(CONFIG_BLK_DEV_INITRD)
317
318 if (initrd_start) {
319 unsigned long size;
320 size = initrd_end - initrd_start;
321 memblock_reserve(__virt_to_phys(initrd_start), size);
322 }
323 #endif
324
325
326 mmu_init_hw();
327
328
329 mapin_ram();
330
331
332 #ifdef CONFIG_HIGHMEM
333 ioremap_base = ioremap_bot = PKMAP_BASE;
334 #else
335 ioremap_base = ioremap_bot = FIXADDR_START;
336 #endif
337
338
339 mmu_context_init();
340
341
342
343
344 memblock_set_current_limit(memory_start + lowmem_size - 1);
345 }
346
347
348 void __init *early_get_page(void)
349 {
350
351
352
353
354 return memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
355 MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb,
356 NUMA_NO_NODE);
357 }
358
359 #endif
360
361 void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
362 {
363 void *p;
364
365 if (mem_init_done) {
366 p = kzalloc(size, mask);
367 } else {
368 p = memblock_alloc(size, SMP_CACHE_BYTES);
369 if (!p)
370 panic("%s: Failed to allocate %zu bytes\n",
371 __func__, size);
372 }
373
374 return p;
375 }