root/arch/mips/kernel/setup.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. add_memory_region
  2. detect_memory_region
  3. rd_start_early
  4. rd_size_early
  5. init_initrd
  6. maybe_bswap_initrd
  7. finalize_initrd
  8. init_initrd
  9. bootmem_init
  10. bootmem_init
  11. early_parse_mem
  12. early_parse_memmap
  13. early_parse_elfcorehdr
  14. mips_parse_crashkernel
  15. request_crashkernel
  16. mips_parse_crashkernel
  17. request_crashkernel
  18. check_kernel_sections_mem
  19. arch_mem_init
  20. resource_init
  21. prefill_possible_map
  22. prefill_possible_map
  23. setup_arch
  24. debugfs_mips
  25. setcoherentio
  26. setnocoherentio

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * Copyright (C) 1995 Linus Torvalds
   7  * Copyright (C) 1995 Waldorf Electronics
   8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
   9  * Copyright (C) 1996 Stoned Elipot
  10  * Copyright (C) 1999 Silicon Graphics, Inc.
  11  * Copyright (C) 2000, 2001, 2002, 2007  Maciej W. Rozycki
  12  */
  13 #include <linux/init.h>
  14 #include <linux/ioport.h>
  15 #include <linux/export.h>
  16 #include <linux/screen_info.h>
  17 #include <linux/memblock.h>
  18 #include <linux/initrd.h>
  19 #include <linux/root_dev.h>
  20 #include <linux/highmem.h>
  21 #include <linux/console.h>
  22 #include <linux/pfn.h>
  23 #include <linux/debugfs.h>
  24 #include <linux/kexec.h>
  25 #include <linux/sizes.h>
  26 #include <linux/device.h>
  27 #include <linux/dma-contiguous.h>
  28 #include <linux/decompress/generic.h>
  29 #include <linux/of_fdt.h>
  30 #include <linux/of_reserved_mem.h>
  31 
  32 #include <asm/addrspace.h>
  33 #include <asm/bootinfo.h>
  34 #include <asm/bugs.h>
  35 #include <asm/cache.h>
  36 #include <asm/cdmm.h>
  37 #include <asm/cpu.h>
  38 #include <asm/debug.h>
  39 #include <asm/dma-coherence.h>
  40 #include <asm/sections.h>
  41 #include <asm/setup.h>
  42 #include <asm/smp-ops.h>
  43 #include <asm/prom.h>
  44 
  45 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
  46 const char __section(.appended_dtb) __appended_dtb[0x100000];
  47 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
  48 
  49 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
  50 
  51 EXPORT_SYMBOL(cpu_data);
  52 
  53 #ifdef CONFIG_VT
  54 struct screen_info screen_info;
  55 #endif
  56 
  57 /*
  58  * Setup information
  59  *
  60  * These are initialized so they are in the .data section
  61  */
  62 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
  63 
  64 EXPORT_SYMBOL(mips_machtype);
  65 
  66 static char __initdata command_line[COMMAND_LINE_SIZE];
  67 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
  68 
  69 #ifdef CONFIG_CMDLINE_BOOL
  70 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
  71 #endif
  72 
  73 /*
  74  * mips_io_port_base is the begin of the address space to which x86 style
  75  * I/O ports are mapped.
  76  */
  77 unsigned long mips_io_port_base = -1;
  78 EXPORT_SYMBOL(mips_io_port_base);
  79 
  80 static struct resource code_resource = { .name = "Kernel code", };
  81 static struct resource data_resource = { .name = "Kernel data", };
  82 static struct resource bss_resource = { .name = "Kernel bss", };
  83 
  84 static void *detect_magic __initdata = detect_memory_region;
  85 
  86 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
  87 unsigned long ARCH_PFN_OFFSET;
  88 EXPORT_SYMBOL(ARCH_PFN_OFFSET);
  89 #endif
  90 
  91 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
  92 {
  93         /*
  94          * Note: This function only exists for historical reason,
  95          * new code should use memblock_add or memblock_add_node instead.
  96          */
  97 
  98         /*
  99          * If the region reaches the top of the physical address space, adjust
 100          * the size slightly so that (start + size) doesn't overflow
 101          */
 102         if (start + size - 1 == PHYS_ADDR_MAX)
 103                 --size;
 104 
 105         /* Sanity check */
 106         if (start + size < start) {
 107                 pr_warn("Trying to add an invalid memory region, skipped\n");
 108                 return;
 109         }
 110 
 111         if (start < PHYS_OFFSET)
 112                 return;
 113 
 114         memblock_add(start, size);
 115         /* Reserve any memory except the ordinary RAM ranges. */
 116         switch (type) {
 117         case BOOT_MEM_RAM:
 118                 break;
 119 
 120         case BOOT_MEM_NOMAP: /* Discard the range from the system. */
 121                 memblock_remove(start, size);
 122                 break;
 123 
 124         default: /* Reserve the rest of the memory types at boot time */
 125                 memblock_reserve(start, size);
 126                 break;
 127         }
 128 }
 129 
 130 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
 131 {
 132         void *dm = &detect_magic;
 133         phys_addr_t size;
 134 
 135         for (size = sz_min; size < sz_max; size <<= 1) {
 136                 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
 137                         break;
 138         }
 139 
 140         pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
 141                 ((unsigned long long) size) / SZ_1M,
 142                 (unsigned long long) start,
 143                 ((unsigned long long) sz_min) / SZ_1M,
 144                 ((unsigned long long) sz_max) / SZ_1M);
 145 
 146         add_memory_region(start, size, BOOT_MEM_RAM);
 147 }
 148 
 149 /*
 150  * Manage initrd
 151  */
 152 #ifdef CONFIG_BLK_DEV_INITRD
 153 
 154 static int __init rd_start_early(char *p)
 155 {
 156         unsigned long start = memparse(p, &p);
 157 
 158 #ifdef CONFIG_64BIT
 159         /* Guess if the sign extension was forgotten by bootloader */
 160         if (start < XKPHYS)
 161                 start = (int)start;
 162 #endif
 163         initrd_start = start;
 164         initrd_end += start;
 165         return 0;
 166 }
 167 early_param("rd_start", rd_start_early);
 168 
 169 static int __init rd_size_early(char *p)
 170 {
 171         initrd_end += memparse(p, &p);
 172         return 0;
 173 }
 174 early_param("rd_size", rd_size_early);
 175 
 176 /* it returns the next free pfn after initrd */
 177 static unsigned long __init init_initrd(void)
 178 {
 179         unsigned long end;
 180 
 181         /*
 182          * Board specific code or command line parser should have
 183          * already set up initrd_start and initrd_end. In these cases
 184          * perfom sanity checks and use them if all looks good.
 185          */
 186         if (!initrd_start || initrd_end <= initrd_start)
 187                 goto disable;
 188 
 189         if (initrd_start & ~PAGE_MASK) {
 190                 pr_err("initrd start must be page aligned\n");
 191                 goto disable;
 192         }
 193         if (initrd_start < PAGE_OFFSET) {
 194                 pr_err("initrd start < PAGE_OFFSET\n");
 195                 goto disable;
 196         }
 197 
 198         /*
 199          * Sanitize initrd addresses. For example firmware
 200          * can't guess if they need to pass them through
 201          * 64-bits values if the kernel has been built in pure
 202          * 32-bit. We need also to switch from KSEG0 to XKPHYS
 203          * addresses now, so the code can now safely use __pa().
 204          */
 205         end = __pa(initrd_end);
 206         initrd_end = (unsigned long)__va(end);
 207         initrd_start = (unsigned long)__va(__pa(initrd_start));
 208 
 209         ROOT_DEV = Root_RAM0;
 210         return PFN_UP(end);
 211 disable:
 212         initrd_start = 0;
 213         initrd_end = 0;
 214         return 0;
 215 }
 216 
 217 /* In some conditions (e.g. big endian bootloader with a little endian
 218    kernel), the initrd might appear byte swapped.  Try to detect this and
 219    byte swap it if needed.  */
 220 static void __init maybe_bswap_initrd(void)
 221 {
 222 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
 223         u64 buf;
 224 
 225         /* Check for CPIO signature */
 226         if (!memcmp((void *)initrd_start, "070701", 6))
 227                 return;
 228 
 229         /* Check for compressed initrd */
 230         if (decompress_method((unsigned char *)initrd_start, 8, NULL))
 231                 return;
 232 
 233         /* Try again with a byte swapped header */
 234         buf = swab64p((u64 *)initrd_start);
 235         if (!memcmp(&buf, "070701", 6) ||
 236             decompress_method((unsigned char *)(&buf), 8, NULL)) {
 237                 unsigned long i;
 238 
 239                 pr_info("Byteswapped initrd detected\n");
 240                 for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
 241                         swab64s((u64 *)i);
 242         }
 243 #endif
 244 }
 245 
 246 static void __init finalize_initrd(void)
 247 {
 248         unsigned long size = initrd_end - initrd_start;
 249 
 250         if (size == 0) {
 251                 printk(KERN_INFO "Initrd not found or empty");
 252                 goto disable;
 253         }
 254         if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
 255                 printk(KERN_ERR "Initrd extends beyond end of memory");
 256                 goto disable;
 257         }
 258 
 259         maybe_bswap_initrd();
 260 
 261         memblock_reserve(__pa(initrd_start), size);
 262         initrd_below_start_ok = 1;
 263 
 264         pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
 265                 initrd_start, size);
 266         return;
 267 disable:
 268         printk(KERN_CONT " - disabling initrd\n");
 269         initrd_start = 0;
 270         initrd_end = 0;
 271 }
 272 
 273 #else  /* !CONFIG_BLK_DEV_INITRD */
 274 
 275 static unsigned long __init init_initrd(void)
 276 {
 277         return 0;
 278 }
 279 
 280 #define finalize_initrd()       do {} while (0)
 281 
 282 #endif
 283 
 284 /*
 285  * Initialize the bootmem allocator. It also setup initrd related data
 286  * if needed.
 287  */
 288 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
 289 
 290 static void __init bootmem_init(void)
 291 {
 292         init_initrd();
 293         finalize_initrd();
 294 }
 295 
 296 #else  /* !CONFIG_SGI_IP27 */
 297 
 298 static void __init bootmem_init(void)
 299 {
 300         struct memblock_region *mem;
 301         phys_addr_t ramstart, ramend;
 302 
 303         ramstart = memblock_start_of_DRAM();
 304         ramend = memblock_end_of_DRAM();
 305 
 306         /*
 307          * Sanity check any INITRD first. We don't take it into account
 308          * for bootmem setup initially, rely on the end-of-kernel-code
 309          * as our memory range starting point. Once bootmem is inited we
 310          * will reserve the area used for the initrd.
 311          */
 312         init_initrd();
 313 
 314         /* Reserve memory occupied by kernel. */
 315         memblock_reserve(__pa_symbol(&_text),
 316                         __pa_symbol(&_end) - __pa_symbol(&_text));
 317 
 318         /* max_low_pfn is not a number of pages but the end pfn of low mem */
 319 
 320 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 321         ARCH_PFN_OFFSET = PFN_UP(ramstart);
 322 #else
 323         /*
 324          * Reserve any memory between the start of RAM and PHYS_OFFSET
 325          */
 326         if (ramstart > PHYS_OFFSET)
 327                 memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
 328 
 329         if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
 330                 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
 331                         (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
 332                         (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
 333         }
 334 #endif
 335 
 336         min_low_pfn = ARCH_PFN_OFFSET;
 337         max_pfn = PFN_DOWN(ramend);
 338         for_each_memblock(memory, mem) {
 339                 unsigned long start = memblock_region_memory_base_pfn(mem);
 340                 unsigned long end = memblock_region_memory_end_pfn(mem);
 341 
 342                 /*
 343                  * Skip highmem here so we get an accurate max_low_pfn if low
 344                  * memory stops short of high memory.
 345                  * If the region overlaps HIGHMEM_START, end is clipped so
 346                  * max_pfn excludes the highmem portion.
 347                  */
 348                 if (memblock_is_nomap(mem))
 349                         continue;
 350                 if (start >= PFN_DOWN(HIGHMEM_START))
 351                         continue;
 352                 if (end > PFN_DOWN(HIGHMEM_START))
 353                         end = PFN_DOWN(HIGHMEM_START);
 354                 if (end > max_low_pfn)
 355                         max_low_pfn = end;
 356         }
 357 
 358         if (min_low_pfn >= max_low_pfn)
 359                 panic("Incorrect memory mapping !!!");
 360 
 361         if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
 362 #ifdef CONFIG_HIGHMEM
 363                 highstart_pfn = PFN_DOWN(HIGHMEM_START);
 364                 highend_pfn = max_pfn;
 365 #else
 366                 max_low_pfn = PFN_DOWN(HIGHMEM_START);
 367                 max_pfn = max_low_pfn;
 368 #endif
 369         }
 370 
 371 
 372         /*
 373          * In any case the added to the memblock memory regions
 374          * (highmem/lowmem, available/reserved, etc) are considered
 375          * as present, so inform sparsemem about them.
 376          */
 377         memblocks_present();
 378 
 379         /*
 380          * Reserve initrd memory if needed.
 381          */
 382         finalize_initrd();
 383 }
 384 
 385 #endif  /* CONFIG_SGI_IP27 */
 386 
 387 static int usermem __initdata;
 388 
 389 static int __init early_parse_mem(char *p)
 390 {
 391         phys_addr_t start, size;
 392 
 393         /*
 394          * If a user specifies memory size, we
 395          * blow away any automatically generated
 396          * size.
 397          */
 398         if (usermem == 0) {
 399                 usermem = 1;
 400                 memblock_remove(memblock_start_of_DRAM(),
 401                         memblock_end_of_DRAM() - memblock_start_of_DRAM());
 402         }
 403         start = 0;
 404         size = memparse(p, &p);
 405         if (*p == '@')
 406                 start = memparse(p + 1, &p);
 407 
 408         add_memory_region(start, size, BOOT_MEM_RAM);
 409 
 410         return 0;
 411 }
 412 early_param("mem", early_parse_mem);
 413 
 414 static int __init early_parse_memmap(char *p)
 415 {
 416         char *oldp;
 417         u64 start_at, mem_size;
 418 
 419         if (!p)
 420                 return -EINVAL;
 421 
 422         if (!strncmp(p, "exactmap", 8)) {
 423                 pr_err("\"memmap=exactmap\" invalid on MIPS\n");
 424                 return 0;
 425         }
 426 
 427         oldp = p;
 428         mem_size = memparse(p, &p);
 429         if (p == oldp)
 430                 return -EINVAL;
 431 
 432         if (*p == '@') {
 433                 start_at = memparse(p+1, &p);
 434                 add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
 435         } else if (*p == '#') {
 436                 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
 437                 return -EINVAL;
 438         } else if (*p == '$') {
 439                 start_at = memparse(p+1, &p);
 440                 add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
 441         } else {
 442                 pr_err("\"memmap\" invalid format!\n");
 443                 return -EINVAL;
 444         }
 445 
 446         if (*p == '\0') {
 447                 usermem = 1;
 448                 return 0;
 449         } else
 450                 return -EINVAL;
 451 }
 452 early_param("memmap", early_parse_memmap);
 453 
 454 #ifdef CONFIG_PROC_VMCORE
 455 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
 456 static int __init early_parse_elfcorehdr(char *p)
 457 {
 458         struct memblock_region *mem;
 459 
 460         setup_elfcorehdr = memparse(p, &p);
 461 
 462          for_each_memblock(memory, mem) {
 463                 unsigned long start = mem->base;
 464                 unsigned long end = start + mem->size;
 465                 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
 466                         /*
 467                          * Reserve from the elf core header to the end of
 468                          * the memory segment, that should all be kdump
 469                          * reserved memory.
 470                          */
 471                         setup_elfcorehdr_size = end - setup_elfcorehdr;
 472                         break;
 473                 }
 474         }
 475         /*
 476          * If we don't find it in the memory map, then we shouldn't
 477          * have to worry about it, as the new kernel won't use it.
 478          */
 479         return 0;
 480 }
 481 early_param("elfcorehdr", early_parse_elfcorehdr);
 482 #endif
 483 
 484 #ifdef CONFIG_KEXEC
 485 static void __init mips_parse_crashkernel(void)
 486 {
 487         unsigned long long total_mem;
 488         unsigned long long crash_size, crash_base;
 489         int ret;
 490 
 491         total_mem = memblock_phys_mem_size();
 492         ret = parse_crashkernel(boot_command_line, total_mem,
 493                                 &crash_size, &crash_base);
 494         if (ret != 0 || crash_size <= 0)
 495                 return;
 496 
 497         if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
 498                 pr_warn("Invalid memory region reserved for crash kernel\n");
 499                 return;
 500         }
 501 
 502         crashk_res.start = crash_base;
 503         crashk_res.end   = crash_base + crash_size - 1;
 504 }
 505 
 506 static void __init request_crashkernel(struct resource *res)
 507 {
 508         int ret;
 509 
 510         if (crashk_res.start == crashk_res.end)
 511                 return;
 512 
 513         ret = request_resource(res, &crashk_res);
 514         if (!ret)
 515                 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
 516                         (unsigned long)((crashk_res.end -
 517                                          crashk_res.start + 1) >> 20),
 518                         (unsigned long)(crashk_res.start  >> 20));
 519 }
 520 #else /* !defined(CONFIG_KEXEC)         */
 521 static void __init mips_parse_crashkernel(void)
 522 {
 523 }
 524 
 525 static void __init request_crashkernel(struct resource *res)
 526 {
 527 }
 528 #endif /* !defined(CONFIG_KEXEC)  */
 529 
 530 static void __init check_kernel_sections_mem(void)
 531 {
 532         phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text)));
 533         phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start;
 534 
 535         if (!memblock_is_region_memory(start, size)) {
 536                 pr_info("Kernel sections are not in the memory maps\n");
 537                 memblock_add(start, size);
 538         }
 539 }
 540 
 541 #define USE_PROM_CMDLINE        IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
 542 #define USE_DTB_CMDLINE         IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
 543 #define EXTEND_WITH_PROM        IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
 544 #define BUILTIN_EXTEND_WITH_PROM        \
 545         IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
 546 
 547 /*
 548  * arch_mem_init - initialize memory management subsystem
 549  *
 550  *  o plat_mem_setup() detects the memory configuration and will record detected
 551  *    memory areas using add_memory_region.
 552  *
 553  * At this stage the memory configuration of the system is known to the
 554  * kernel but generic memory management system is still entirely uninitialized.
 555  *
 556  *  o bootmem_init()
 557  *  o sparse_init()
 558  *  o paging_init()
 559  *  o dma_contiguous_reserve()
 560  *
 561  * At this stage the bootmem allocator is ready to use.
 562  *
 563  * NOTE: historically plat_mem_setup did the entire platform initialization.
 564  *       This was rather impractical because it meant plat_mem_setup had to
 565  * get away without any kind of memory allocator.  To keep old code from
 566  * breaking plat_setup was just renamed to plat_mem_setup and a second platform
 567  * initialization hook for anything else was introduced.
 568  */
 569 static void __init arch_mem_init(char **cmdline_p)
 570 {
 571         extern void plat_mem_setup(void);
 572 
 573         /*
 574          * Initialize boot_command_line to an innocuous but non-empty string in
 575          * order to prevent early_init_dt_scan_chosen() from copying
 576          * CONFIG_CMDLINE into it without our knowledge. We handle
 577          * CONFIG_CMDLINE ourselves below & don't want to duplicate its
 578          * content because repeating arguments can be problematic.
 579          */
 580         strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
 581 
 582         /* call board setup routine */
 583         plat_mem_setup();
 584         memblock_set_bottom_up(true);
 585 
 586 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
 587         strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 588 #else
 589         if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
 590             (USE_DTB_CMDLINE && !boot_command_line[0]))
 591                 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
 592 
 593         if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
 594                 if (boot_command_line[0])
 595                         strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 596                 strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
 597         }
 598 
 599 #if defined(CONFIG_CMDLINE_BOOL)
 600         if (builtin_cmdline[0]) {
 601                 if (boot_command_line[0])
 602                         strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 603                 strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 604         }
 605 
 606         if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
 607                 if (boot_command_line[0])
 608                         strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 609                 strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
 610         }
 611 #endif
 612 #endif
 613         strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 614 
 615         *cmdline_p = command_line;
 616 
 617         parse_early_param();
 618 
 619         if (usermem)
 620                 pr_info("User-defined physical RAM map overwrite\n");
 621 
 622         check_kernel_sections_mem();
 623 
 624         early_init_fdt_reserve_self();
 625         early_init_fdt_scan_reserved_mem();
 626 
 627 #ifndef CONFIG_NUMA
 628         memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
 629 #endif
 630         bootmem_init();
 631 
 632         /*
 633          * Prevent memblock from allocating high memory.
 634          * This cannot be done before max_low_pfn is detected, so up
 635          * to this point is possible to only reserve physical memory
 636          * with memblock_reserve; memblock_alloc* can be used
 637          * only after this point
 638          */
 639         memblock_set_current_limit(PFN_PHYS(max_low_pfn));
 640 
 641 #ifdef CONFIG_PROC_VMCORE
 642         if (setup_elfcorehdr && setup_elfcorehdr_size) {
 643                 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
 644                        setup_elfcorehdr, setup_elfcorehdr_size);
 645                 memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
 646         }
 647 #endif
 648 
 649         mips_parse_crashkernel();
 650 #ifdef CONFIG_KEXEC
 651         if (crashk_res.start != crashk_res.end)
 652                 memblock_reserve(crashk_res.start,
 653                                  crashk_res.end - crashk_res.start + 1);
 654 #endif
 655         device_tree_init();
 656         sparse_init();
 657         plat_swiotlb_setup();
 658 
 659         dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
 660 
 661         /* Reserve for hibernation. */
 662         memblock_reserve(__pa_symbol(&__nosave_begin),
 663                 __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
 664 
 665         fdt_init_reserved_mem();
 666 
 667         memblock_dump_all();
 668 
 669         early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
 670 }
 671 
 672 static void __init resource_init(void)
 673 {
 674         struct memblock_region *region;
 675 
 676         if (UNCAC_BASE != IO_BASE)
 677                 return;
 678 
 679         code_resource.start = __pa_symbol(&_text);
 680         code_resource.end = __pa_symbol(&_etext) - 1;
 681         data_resource.start = __pa_symbol(&_etext);
 682         data_resource.end = __pa_symbol(&_edata) - 1;
 683         bss_resource.start = __pa_symbol(&__bss_start);
 684         bss_resource.end = __pa_symbol(&__bss_stop) - 1;
 685 
 686         for_each_memblock(memory, region) {
 687                 phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
 688                 phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
 689                 struct resource *res;
 690 
 691                 res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
 692                 if (!res)
 693                         panic("%s: Failed to allocate %zu bytes\n", __func__,
 694                               sizeof(struct resource));
 695 
 696                 res->start = start;
 697                 res->end = end;
 698                 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 699                 res->name = "System RAM";
 700 
 701                 request_resource(&iomem_resource, res);
 702 
 703                 /*
 704                  *  We don't know which RAM region contains kernel data,
 705                  *  so we try it repeatedly and let the resource manager
 706                  *  test it.
 707                  */
 708                 request_resource(res, &code_resource);
 709                 request_resource(res, &data_resource);
 710                 request_resource(res, &bss_resource);
 711                 request_crashkernel(res);
 712         }
 713 }
 714 
 715 #ifdef CONFIG_SMP
 716 static void __init prefill_possible_map(void)
 717 {
 718         int i, possible = num_possible_cpus();
 719 
 720         if (possible > nr_cpu_ids)
 721                 possible = nr_cpu_ids;
 722 
 723         for (i = 0; i < possible; i++)
 724                 set_cpu_possible(i, true);
 725         for (; i < NR_CPUS; i++)
 726                 set_cpu_possible(i, false);
 727 
 728         nr_cpu_ids = possible;
 729 }
 730 #else
 731 static inline void prefill_possible_map(void) {}
 732 #endif
 733 
 734 void __init setup_arch(char **cmdline_p)
 735 {
 736         cpu_probe();
 737         mips_cm_probe();
 738         prom_init();
 739 
 740         setup_early_fdc_console();
 741 #ifdef CONFIG_EARLY_PRINTK
 742         setup_early_printk();
 743 #endif
 744         cpu_report();
 745         check_bugs_early();
 746 
 747 #if defined(CONFIG_VT)
 748 #if defined(CONFIG_VGA_CONSOLE)
 749         conswitchp = &vga_con;
 750 #elif defined(CONFIG_DUMMY_CONSOLE)
 751         conswitchp = &dummy_con;
 752 #endif
 753 #endif
 754 
 755         arch_mem_init(cmdline_p);
 756 
 757         resource_init();
 758         plat_smp_setup();
 759         prefill_possible_map();
 760 
 761         cpu_cache_init();
 762         paging_init();
 763 }
 764 
 765 unsigned long kernelsp[NR_CPUS];
 766 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
 767 
 768 #ifdef CONFIG_USE_OF
 769 unsigned long fw_passed_dtb;
 770 #endif
 771 
 772 #ifdef CONFIG_DEBUG_FS
 773 struct dentry *mips_debugfs_dir;
 774 static int __init debugfs_mips(void)
 775 {
 776         mips_debugfs_dir = debugfs_create_dir("mips", NULL);
 777         return 0;
 778 }
 779 arch_initcall(debugfs_mips);
 780 #endif
 781 
 782 #ifdef CONFIG_DMA_MAYBE_COHERENT
 783 /* User defined DMA coherency from command line. */
 784 enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
 785 EXPORT_SYMBOL_GPL(coherentio);
 786 int hw_coherentio = 0;  /* Actual hardware supported DMA coherency setting. */
 787 
 788 static int __init setcoherentio(char *str)
 789 {
 790         coherentio = IO_COHERENCE_ENABLED;
 791         pr_info("Hardware DMA cache coherency (command line)\n");
 792         return 0;
 793 }
 794 early_param("coherentio", setcoherentio);
 795 
 796 static int __init setnocoherentio(char *str)
 797 {
 798         coherentio = IO_COHERENCE_DISABLED;
 799         pr_info("Software DMA cache coherency (command line)\n");
 800         return 0;
 801 }
 802 early_param("nocoherentio", setnocoherentio);
 803 #endif

/* [<][>][^][v][top][bottom][index][help] */