1/* 2 * linux/arch/unicore32/mm/init.c 3 * 4 * Copyright (C) 2010 GUAN Xue-tao 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/kernel.h> 11#include <linux/errno.h> 12#include <linux/swap.h> 13#include <linux/init.h> 14#include <linux/bootmem.h> 15#include <linux/mman.h> 16#include <linux/nodemask.h> 17#include <linux/initrd.h> 18#include <linux/highmem.h> 19#include <linux/gfp.h> 20#include <linux/memblock.h> 21#include <linux/sort.h> 22#include <linux/dma-mapping.h> 23#include <linux/export.h> 24 25#include <asm/sections.h> 26#include <asm/setup.h> 27#include <asm/sizes.h> 28#include <asm/tlb.h> 29#include <asm/memblock.h> 30#include <mach/map.h> 31 32#include "mm.h" 33 34static unsigned long phys_initrd_start __initdata = 0x01000000; 35static unsigned long phys_initrd_size __initdata = SZ_8M; 36 37static int __init early_initrd(char *p) 38{ 39 unsigned long start, size; 40 char *endp; 41 42 start = memparse(p, &endp); 43 if (*endp == ',') { 44 size = memparse(endp + 1, NULL); 45 46 phys_initrd_start = start; 47 phys_initrd_size = size; 48 } 49 return 0; 50} 51early_param("initrd", early_initrd); 52 53/* 54 * This keeps memory configuration data used by a couple memory 55 * initialization functions, as well as show_mem() for the skipping 56 * of holes in the memory map. It is populated by uc32_add_memory(). 57 */ 58struct meminfo meminfo; 59 60void show_mem(unsigned int filter) 61{ 62 int free = 0, total = 0, reserved = 0; 63 int shared = 0, cached = 0, slab = 0, i; 64 struct meminfo *mi = &meminfo; 65 66 printk(KERN_DEFAULT "Mem-info:\n"); 67 show_free_areas(filter); 68 69 for_each_bank(i, mi) { 70 struct membank *bank = &mi->bank[i]; 71 unsigned int pfn1, pfn2; 72 struct page *page, *end; 73 74 pfn1 = bank_pfn_start(bank); 75 pfn2 = bank_pfn_end(bank); 76 77 page = pfn_to_page(pfn1); 78 end = pfn_to_page(pfn2 - 1) + 1; 79 80 do { 81 total++; 82 if (PageReserved(page)) 83 reserved++; 84 else if (PageSwapCache(page)) 85 cached++; 86 else if (PageSlab(page)) 87 slab++; 88 else if (!page_count(page)) 89 free++; 90 else 91 shared += page_count(page) - 1; 92 page++; 93 } while (page < end); 94 } 95 96 printk(KERN_DEFAULT "%d pages of RAM\n", total); 97 printk(KERN_DEFAULT "%d free pages\n", free); 98 printk(KERN_DEFAULT "%d reserved pages\n", reserved); 99 printk(KERN_DEFAULT "%d slab pages\n", slab); 100 printk(KERN_DEFAULT "%d pages shared\n", shared); 101 printk(KERN_DEFAULT "%d pages swap cached\n", cached); 102} 103 104static void __init find_limits(unsigned long *min, unsigned long *max_low, 105 unsigned long *max_high) 106{ 107 struct meminfo *mi = &meminfo; 108 int i; 109 110 *min = -1UL; 111 *max_low = *max_high = 0; 112 113 for_each_bank(i, mi) { 114 struct membank *bank = &mi->bank[i]; 115 unsigned long start, end; 116 117 start = bank_pfn_start(bank); 118 end = bank_pfn_end(bank); 119 120 if (*min > start) 121 *min = start; 122 if (*max_high < end) 123 *max_high = end; 124 if (bank->highmem) 125 continue; 126 if (*max_low < end) 127 *max_low = end; 128 } 129} 130 131static void __init uc32_bootmem_init(unsigned long start_pfn, 132 unsigned long end_pfn) 133{ 134 struct memblock_region *reg; 135 unsigned int boot_pages; 136 phys_addr_t bitmap; 137 pg_data_t *pgdat; 138 139 /* 140 * Allocate the bootmem bitmap page. This must be in a region 141 * of memory which has already been mapped. 142 */ 143 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 144 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, 145 __pfn_to_phys(end_pfn)); 146 147 /* 148 * Initialise the bootmem allocator, handing the 149 * memory banks over to bootmem. 150 */ 151 node_set_online(0); 152 pgdat = NODE_DATA(0); 153 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); 154 155 /* Free the lowmem regions from memblock into bootmem. */ 156 for_each_memblock(memory, reg) { 157 unsigned long start = memblock_region_memory_base_pfn(reg); 158 unsigned long end = memblock_region_memory_end_pfn(reg); 159 160 if (end >= end_pfn) 161 end = end_pfn; 162 if (start >= end) 163 break; 164 165 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); 166 } 167 168 /* Reserve the lowmem memblock reserved regions in bootmem. */ 169 for_each_memblock(reserved, reg) { 170 unsigned long start = memblock_region_reserved_base_pfn(reg); 171 unsigned long end = memblock_region_reserved_end_pfn(reg); 172 173 if (end >= end_pfn) 174 end = end_pfn; 175 if (start >= end) 176 break; 177 178 reserve_bootmem(__pfn_to_phys(start), 179 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); 180 } 181} 182 183static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low, 184 unsigned long max_high) 185{ 186 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 187 struct memblock_region *reg; 188 189 /* 190 * initialise the zones. 191 */ 192 memset(zone_size, 0, sizeof(zone_size)); 193 194 /* 195 * The memory size has already been determined. If we need 196 * to do anything fancy with the allocation of this memory 197 * to the zones, now is the time to do it. 198 */ 199 zone_size[0] = max_low - min; 200 201 /* 202 * Calculate the size of the holes. 203 * holes = node_size - sum(bank_sizes) 204 */ 205 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 206 for_each_memblock(memory, reg) { 207 unsigned long start = memblock_region_memory_base_pfn(reg); 208 unsigned long end = memblock_region_memory_end_pfn(reg); 209 210 if (start < max_low) { 211 unsigned long low_end = min(end, max_low); 212 zhole_size[0] -= low_end - start; 213 } 214 } 215 216 /* 217 * Adjust the sizes according to any special requirements for 218 * this machine type. 219 */ 220 arch_adjust_zones(zone_size, zhole_size); 221 222 free_area_init_node(0, zone_size, min, zhole_size); 223} 224 225int pfn_valid(unsigned long pfn) 226{ 227 return memblock_is_memory(pfn << PAGE_SHIFT); 228} 229EXPORT_SYMBOL(pfn_valid); 230 231static void uc32_memory_present(void) 232{ 233} 234 235static int __init meminfo_cmp(const void *_a, const void *_b) 236{ 237 const struct membank *a = _a, *b = _b; 238 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 239 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 240} 241 242void __init uc32_memblock_init(struct meminfo *mi) 243{ 244 int i; 245 246 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), 247 meminfo_cmp, NULL); 248 249 for (i = 0; i < mi->nr_banks; i++) 250 memblock_add(mi->bank[i].start, mi->bank[i].size); 251 252 /* Register the kernel text, kernel data and initrd with memblock. */ 253 memblock_reserve(__pa(_text), _end - _text); 254 255#ifdef CONFIG_BLK_DEV_INITRD 256 if (phys_initrd_size) { 257 memblock_reserve(phys_initrd_start, phys_initrd_size); 258 259 /* Now convert initrd to virtual addresses */ 260 initrd_start = __phys_to_virt(phys_initrd_start); 261 initrd_end = initrd_start + phys_initrd_size; 262 } 263#endif 264 265 uc32_mm_memblock_reserve(); 266 267 memblock_allow_resize(); 268 memblock_dump_all(); 269} 270 271void __init bootmem_init(void) 272{ 273 unsigned long min, max_low, max_high; 274 275 max_low = max_high = 0; 276 277 find_limits(&min, &max_low, &max_high); 278 279 uc32_bootmem_init(min, max_low); 280 281#ifdef CONFIG_SWIOTLB 282 swiotlb_init(1); 283#endif 284 /* 285 * Sparsemem tries to allocate bootmem in memory_present(), 286 * so must be done after the fixed reservations 287 */ 288 uc32_memory_present(); 289 290 /* 291 * sparse_init() needs the bootmem allocator up and running. 292 */ 293 sparse_init(); 294 295 /* 296 * Now free the memory - free_area_init_node needs 297 * the sparse mem_map arrays initialized by sparse_init() 298 * for memmap_init_zone(), otherwise all PFNs are invalid. 299 */ 300 uc32_bootmem_free(min, max_low, max_high); 301 302 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 303 304 /* 305 * This doesn't seem to be used by the Linux memory manager any 306 * more, but is used by ll_rw_block. If we can get rid of it, we 307 * also get rid of some of the stuff above as well. 308 * 309 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 310 * the system, not the maximum PFN. 311 */ 312 max_low_pfn = max_low - PHYS_PFN_OFFSET; 313 max_pfn = max_high - PHYS_PFN_OFFSET; 314} 315 316static inline void 317free_memmap(unsigned long start_pfn, unsigned long end_pfn) 318{ 319 struct page *start_pg, *end_pg; 320 unsigned long pg, pgend; 321 322 /* 323 * Convert start_pfn/end_pfn to a struct page pointer. 324 */ 325 start_pg = pfn_to_page(start_pfn - 1) + 1; 326 end_pg = pfn_to_page(end_pfn); 327 328 /* 329 * Convert to physical addresses, and 330 * round start upwards and end downwards. 331 */ 332 pg = PAGE_ALIGN(__pa(start_pg)); 333 pgend = __pa(end_pg) & PAGE_MASK; 334 335 /* 336 * If there are free pages between these, 337 * free the section of the memmap array. 338 */ 339 if (pg < pgend) 340 free_bootmem(pg, pgend - pg); 341} 342 343/* 344 * The mem_map array can get very big. Free the unused area of the memory map. 345 */ 346static void __init free_unused_memmap(struct meminfo *mi) 347{ 348 unsigned long bank_start, prev_bank_end = 0; 349 unsigned int i; 350 351 /* 352 * This relies on each bank being in address order. 353 * The banks are sorted previously in bootmem_init(). 354 */ 355 for_each_bank(i, mi) { 356 struct membank *bank = &mi->bank[i]; 357 358 bank_start = bank_pfn_start(bank); 359 360 /* 361 * If we had a previous bank, and there is a space 362 * between the current bank and the previous, free it. 363 */ 364 if (prev_bank_end && prev_bank_end < bank_start) 365 free_memmap(prev_bank_end, bank_start); 366 367 /* 368 * Align up here since the VM subsystem insists that the 369 * memmap entries are valid from the bank end aligned to 370 * MAX_ORDER_NR_PAGES. 371 */ 372 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 373 } 374} 375 376/* 377 * mem_init() marks the free areas in the mem_map and tells us how much 378 * memory is free. This is done after various parts of the system have 379 * claimed their memory after the kernel image. 380 */ 381void __init mem_init(void) 382{ 383 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 384 385 free_unused_memmap(&meminfo); 386 387 /* this will put all unused low memory onto the freelists */ 388 free_all_bootmem(); 389 390 mem_init_print_info(NULL); 391 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 392 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 393 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 394 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 395 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 396 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 397 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 398 " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 399 400 VECTORS_BASE, VECTORS_BASE + PAGE_SIZE, 401 DIV_ROUND_UP(PAGE_SIZE, SZ_1K), 402 VMALLOC_START, VMALLOC_END, 403 DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M), 404 PAGE_OFFSET, (unsigned long)high_memory, 405 DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M), 406 MODULES_VADDR, MODULES_END, 407 DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M), 408 409 __init_begin, __init_end, 410 DIV_ROUND_UP((__init_end - __init_begin), SZ_1K), 411 _stext, _etext, 412 DIV_ROUND_UP((_etext - _stext), SZ_1K), 413 _sdata, _edata, 414 DIV_ROUND_UP((_edata - _sdata), SZ_1K)); 415 416 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 417 BUG_ON(TASK_SIZE > MODULES_VADDR); 418 419 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 420 /* 421 * On a machine this small we won't get 422 * anywhere without overcommit, so turn 423 * it on by default. 424 */ 425 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 426 } 427} 428 429void free_initmem(void) 430{ 431 free_initmem_default(-1); 432} 433 434#ifdef CONFIG_BLK_DEV_INITRD 435 436static int keep_initrd; 437 438void free_initrd_mem(unsigned long start, unsigned long end) 439{ 440 if (!keep_initrd) 441 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 442} 443 444static int __init keepinitrd_setup(char *__unused) 445{ 446 keep_initrd = 1; 447 return 1; 448} 449 450__setup("keepinitrd", keepinitrd_setup); 451#endif 452