1/* 2 * Procedures for creating, accessing and interpreting the device tree. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16#undef DEBUG 17 18#include <stdarg.h> 19#include <linux/kernel.h> 20#include <linux/string.h> 21#include <linux/init.h> 22#include <linux/threads.h> 23#include <linux/spinlock.h> 24#include <linux/types.h> 25#include <linux/pci.h> 26#include <linux/stringify.h> 27#include <linux/delay.h> 28#include <linux/initrd.h> 29#include <linux/bitops.h> 30#include <linux/export.h> 31#include <linux/kexec.h> 32#include <linux/irq.h> 33#include <linux/memblock.h> 34#include <linux/of.h> 35#include <linux/of_fdt.h> 36#include <linux/libfdt.h> 37 38#include <asm/prom.h> 39#include <asm/rtas.h> 40#include <asm/page.h> 41#include <asm/processor.h> 42#include <asm/irq.h> 43#include <asm/io.h> 44#include <asm/kdump.h> 45#include <asm/smp.h> 46#include <asm/mmu.h> 47#include <asm/paca.h> 48#include <asm/pgtable.h> 49#include <asm/iommu.h> 50#include <asm/btext.h> 51#include <asm/sections.h> 52#include <asm/machdep.h> 53#include <asm/pci-bridge.h> 54#include <asm/kexec.h> 55#include <asm/opal.h> 56#include <asm/fadump.h> 57#include <asm/debug.h> 58 59#include <mm/mmu_decl.h> 60 61#ifdef DEBUG 62#define DBG(fmt...) printk(KERN_ERR fmt) 63#else 64#define DBG(fmt...) 65#endif 66 67#ifdef CONFIG_PPC64 68int __initdata iommu_is_off; 69int __initdata iommu_force_on; 70unsigned long tce_alloc_start, tce_alloc_end; 71u64 ppc64_rma_size; 72#endif 73static phys_addr_t first_memblock_size; 74static int __initdata boot_cpu_count; 75 76static int __init early_parse_mem(char *p) 77{ 78 if (!p) 79 return 1; 80 81 memory_limit = PAGE_ALIGN(memparse(p, &p)); 82 DBG("memory limit = 0x%llx\n", memory_limit); 83 84 return 0; 85} 86early_param("mem", early_parse_mem); 87 88/* 89 * overlaps_initrd - check for overlap with page aligned extension of 90 * initrd. 91 */ 92static inline int overlaps_initrd(unsigned long start, unsigned long size) 93{ 94#ifdef CONFIG_BLK_DEV_INITRD 95 if (!initrd_start) 96 return 0; 97 98 return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) && 99 start <= _ALIGN_UP(initrd_end, PAGE_SIZE); 100#else 101 return 0; 102#endif 103} 104 105/** 106 * move_device_tree - move tree to an unused area, if needed. 107 * 108 * The device tree may be allocated beyond our memory limit, or inside the 109 * crash kernel region for kdump, or within the page aligned range of initrd. 110 * If so, move it out of the way. 111 */ 112static void __init move_device_tree(void) 113{ 114 unsigned long start, size; 115 void *p; 116 117 DBG("-> move_device_tree\n"); 118 119 start = __pa(initial_boot_params); 120 size = fdt_totalsize(initial_boot_params); 121 122 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || 123 overlaps_crashkernel(start, size) || 124 overlaps_initrd(start, size)) { 125 p = __va(memblock_alloc(size, PAGE_SIZE)); 126 memcpy(p, initial_boot_params, size); 127 initial_boot_params = p; 128 DBG("Moved device tree to 0x%p\n", p); 129 } 130 131 DBG("<- move_device_tree\n"); 132} 133 134/* 135 * ibm,pa-features is a per-cpu property that contains a string of 136 * attribute descriptors, each of which has a 2 byte header plus up 137 * to 254 bytes worth of processor attribute bits. First header 138 * byte specifies the number of bytes following the header. 139 * Second header byte is an "attribute-specifier" type, of which 140 * zero is the only currently-defined value. 141 * Implementation: Pass in the byte and bit offset for the feature 142 * that we are interested in. The function will return -1 if the 143 * pa-features property is missing, or a 1/0 to indicate if the feature 144 * is supported/not supported. Note that the bit numbers are 145 * big-endian to match the definition in PAPR. 146 */ 147static struct ibm_pa_feature { 148 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 149 unsigned long mmu_features; /* MMU_FTR_xxx bit */ 150 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 151 unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */ 152 unsigned char pabyte; /* byte number in ibm,pa-features */ 153 unsigned char pabit; /* bit number (big-endian) */ 154 unsigned char invert; /* if 1, pa bit set => clear feature */ 155} ibm_pa_features[] __initdata = { 156 {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0}, 157 {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0}, 158 {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0}, 159 {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0}, 160 {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1}, 161 {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0}, 162 {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0}, 163 /* 164 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n), 165 * we don't want to turn on TM here, so we use the *_COMP versions 166 * which are 0 if the kernel doesn't support TM. 167 */ 168 {CPU_FTR_TM_COMP, 0, 0, 169 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0}, 170}; 171 172static void __init scan_features(unsigned long node, const unsigned char *ftrs, 173 unsigned long tablelen, 174 struct ibm_pa_feature *fp, 175 unsigned long ft_size) 176{ 177 unsigned long i, len, bit; 178 179 /* find descriptor with type == 0 */ 180 for (;;) { 181 if (tablelen < 3) 182 return; 183 len = 2 + ftrs[0]; 184 if (tablelen < len) 185 return; /* descriptor 0 not found */ 186 if (ftrs[1] == 0) 187 break; 188 tablelen -= len; 189 ftrs += len; 190 } 191 192 /* loop over bits we know about */ 193 for (i = 0; i < ft_size; ++i, ++fp) { 194 if (fp->pabyte >= ftrs[0]) 195 continue; 196 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; 197 if (bit ^ fp->invert) { 198 cur_cpu_spec->cpu_features |= fp->cpu_features; 199 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 200 cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2; 201 cur_cpu_spec->mmu_features |= fp->mmu_features; 202 } else { 203 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 204 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 205 cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2; 206 cur_cpu_spec->mmu_features &= ~fp->mmu_features; 207 } 208 } 209} 210 211static void __init check_cpu_pa_features(unsigned long node) 212{ 213 const unsigned char *pa_ftrs; 214 int tablelen; 215 216 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); 217 if (pa_ftrs == NULL) 218 return; 219 220 scan_features(node, pa_ftrs, tablelen, 221 ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); 222} 223 224#ifdef CONFIG_PPC_STD_MMU_64 225static void __init init_mmu_slb_size(unsigned long node) 226{ 227 const __be32 *slb_size_ptr; 228 229 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? : 230 of_get_flat_dt_prop(node, "ibm,slb-size", NULL); 231 232 if (slb_size_ptr) 233 mmu_slb_size = be32_to_cpup(slb_size_ptr); 234} 235#else 236#define init_mmu_slb_size(node) do { } while(0) 237#endif 238 239static struct feature_property { 240 const char *name; 241 u32 min_value; 242 unsigned long cpu_feature; 243 unsigned long cpu_user_ftr; 244} feature_properties[] __initdata = { 245#ifdef CONFIG_ALTIVEC 246 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 247 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 248#endif /* CONFIG_ALTIVEC */ 249#ifdef CONFIG_VSX 250 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */ 251 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX}, 252#endif /* CONFIG_VSX */ 253#ifdef CONFIG_PPC64 254 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP}, 255 {"ibm,purr", 1, CPU_FTR_PURR, 0}, 256 {"ibm,spurr", 1, CPU_FTR_SPURR, 0}, 257#endif /* CONFIG_PPC64 */ 258}; 259 260#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU) 261static inline void identical_pvr_fixup(unsigned long node) 262{ 263 unsigned int pvr; 264 const char *model = of_get_flat_dt_prop(node, "model", NULL); 265 266 /* 267 * Since 440GR(x)/440EP(x) processors have the same pvr, 268 * we check the node path and set bit 28 in the cur_cpu_spec 269 * pvr for EP(x) processor version. This bit is always 0 in 270 * the "real" pvr. Then we call identify_cpu again with 271 * the new logical pvr to enable FPU support. 272 */ 273 if (model && strstr(model, "440EP")) { 274 pvr = cur_cpu_spec->pvr_value | 0x8; 275 identify_cpu(0, pvr); 276 DBG("Using logical pvr %x for %s\n", pvr, model); 277 } 278} 279#else 280#define identical_pvr_fixup(node) do { } while(0) 281#endif 282 283static void __init check_cpu_feature_properties(unsigned long node) 284{ 285 unsigned long i; 286 struct feature_property *fp = feature_properties; 287 const __be32 *prop; 288 289 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) { 290 prop = of_get_flat_dt_prop(node, fp->name, NULL); 291 if (prop && be32_to_cpup(prop) >= fp->min_value) { 292 cur_cpu_spec->cpu_features |= fp->cpu_feature; 293 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr; 294 } 295 } 296} 297 298static int __init early_init_dt_scan_cpus(unsigned long node, 299 const char *uname, int depth, 300 void *data) 301{ 302 const char *type = of_get_flat_dt_prop(node, "device_type", NULL); 303 const __be32 *prop; 304 const __be32 *intserv; 305 int i, nthreads; 306 int len; 307 int found = -1; 308 int found_thread = 0; 309 310 /* We are scanning "cpu" nodes only */ 311 if (type == NULL || strcmp(type, "cpu") != 0) 312 return 0; 313 314 /* Get physical cpuid */ 315 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); 316 if (!intserv) 317 intserv = of_get_flat_dt_prop(node, "reg", &len); 318 319 nthreads = len / sizeof(int); 320 321 /* 322 * Now see if any of these threads match our boot cpu. 323 * NOTE: This must match the parsing done in smp_setup_cpu_maps. 324 */ 325 for (i = 0; i < nthreads; i++) { 326 /* 327 * version 2 of the kexec param format adds the phys cpuid of 328 * booted proc. 329 */ 330 if (fdt_version(initial_boot_params) >= 2) { 331 if (be32_to_cpu(intserv[i]) == 332 fdt_boot_cpuid_phys(initial_boot_params)) { 333 found = boot_cpu_count; 334 found_thread = i; 335 } 336 } else { 337 /* 338 * Check if it's the boot-cpu, set it's hw index now, 339 * unfortunately this format did not support booting 340 * off secondary threads. 341 */ 342 if (of_get_flat_dt_prop(node, 343 "linux,boot-cpu", NULL) != NULL) 344 found = boot_cpu_count; 345 } 346#ifdef CONFIG_SMP 347 /* logical cpu id is always 0 on UP kernels */ 348 boot_cpu_count++; 349#endif 350 } 351 352 /* Not the boot CPU */ 353 if (found < 0) 354 return 0; 355 356 DBG("boot cpu: logical %d physical %d\n", found, 357 be32_to_cpu(intserv[found_thread])); 358 boot_cpuid = found; 359 set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread])); 360 361 /* 362 * PAPR defines "logical" PVR values for cpus that 363 * meet various levels of the architecture: 364 * 0x0f000001 Architecture version 2.04 365 * 0x0f000002 Architecture version 2.05 366 * If the cpu-version property in the cpu node contains 367 * such a value, we call identify_cpu again with the 368 * logical PVR value in order to use the cpu feature 369 * bits appropriate for the architecture level. 370 * 371 * A POWER6 partition in "POWER6 architected" mode 372 * uses the 0x0f000002 PVR value; in POWER5+ mode 373 * it uses 0x0f000001. 374 */ 375 prop = of_get_flat_dt_prop(node, "cpu-version", NULL); 376 if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) 377 identify_cpu(0, be32_to_cpup(prop)); 378 379 identical_pvr_fixup(node); 380 381 check_cpu_feature_properties(node); 382 check_cpu_pa_features(node); 383 init_mmu_slb_size(node); 384 385#ifdef CONFIG_PPC64 386 if (nthreads > 1) 387 cur_cpu_spec->cpu_features |= CPU_FTR_SMT; 388 else 389 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; 390#endif 391 return 0; 392} 393 394static int __init early_init_dt_scan_chosen_ppc(unsigned long node, 395 const char *uname, 396 int depth, void *data) 397{ 398 const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */ 399 400 /* Use common scan routine to determine if this is the chosen node */ 401 if (early_init_dt_scan_chosen(node, uname, depth, data) == 0) 402 return 0; 403 404#ifdef CONFIG_PPC64 405 /* check if iommu is forced on or off */ 406 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 407 iommu_is_off = 1; 408 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) 409 iommu_force_on = 1; 410#endif 411 412 /* mem=x on the command line is the preferred mechanism */ 413 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 414 if (lprop) 415 memory_limit = *lprop; 416 417#ifdef CONFIG_PPC64 418 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 419 if (lprop) 420 tce_alloc_start = *lprop; 421 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 422 if (lprop) 423 tce_alloc_end = *lprop; 424#endif 425 426#ifdef CONFIG_KEXEC 427 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 428 if (lprop) 429 crashk_res.start = *lprop; 430 431 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 432 if (lprop) 433 crashk_res.end = crashk_res.start + *lprop - 1; 434#endif 435 436 /* break now */ 437 return 1; 438} 439 440#ifdef CONFIG_PPC_PSERIES 441/* 442 * Interpret the ibm,dynamic-memory property in the 443 * /ibm,dynamic-reconfiguration-memory node. 444 * This contains a list of memory blocks along with NUMA affinity 445 * information. 446 */ 447static int __init early_init_dt_scan_drconf_memory(unsigned long node) 448{ 449 const __be32 *dm, *ls, *usm; 450 int l; 451 unsigned long n, flags; 452 u64 base, size, memblock_size; 453 unsigned int is_kexec_kdump = 0, rngs; 454 455 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 456 if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) 457 return 0; 458 memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); 459 460 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 461 if (dm == NULL || l < sizeof(__be32)) 462 return 0; 463 464 n = of_read_number(dm++, 1); /* number of entries */ 465 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32)) 466 return 0; 467 468 /* check if this is a kexec/kdump kernel. */ 469 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", 470 &l); 471 if (usm != NULL) 472 is_kexec_kdump = 1; 473 474 for (; n != 0; --n) { 475 base = dt_mem_next_cell(dt_root_addr_cells, &dm); 476 flags = of_read_number(&dm[3], 1); 477 /* skip DRC index, pad, assoc. list index, flags */ 478 dm += 4; 479 /* skip this block if the reserved bit is set in flags 480 or if the block is not assigned to this partition */ 481 if ((flags & DRCONF_MEM_RESERVED) || 482 !(flags & DRCONF_MEM_ASSIGNED)) 483 continue; 484 size = memblock_size; 485 rngs = 1; 486 if (is_kexec_kdump) { 487 /* 488 * For each memblock in ibm,dynamic-memory, a corresponding 489 * entry in linux,drconf-usable-memory property contains 490 * a counter 'p' followed by 'p' (base, size) duple. 491 * Now read the counter from 492 * linux,drconf-usable-memory property 493 */ 494 rngs = dt_mem_next_cell(dt_root_size_cells, &usm); 495 if (!rngs) /* there are no (base, size) duple */ 496 continue; 497 } 498 do { 499 if (is_kexec_kdump) { 500 base = dt_mem_next_cell(dt_root_addr_cells, 501 &usm); 502 size = dt_mem_next_cell(dt_root_size_cells, 503 &usm); 504 } 505 if (iommu_is_off) { 506 if (base >= 0x80000000ul) 507 continue; 508 if ((base + size) > 0x80000000ul) 509 size = 0x80000000ul - base; 510 } 511 memblock_add(base, size); 512 } while (--rngs); 513 } 514 memblock_dump_all(); 515 return 0; 516} 517#else 518#define early_init_dt_scan_drconf_memory(node) 0 519#endif /* CONFIG_PPC_PSERIES */ 520 521static int __init early_init_dt_scan_memory_ppc(unsigned long node, 522 const char *uname, 523 int depth, void *data) 524{ 525 if (depth == 1 && 526 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) 527 return early_init_dt_scan_drconf_memory(node); 528 529 return early_init_dt_scan_memory(node, uname, depth, data); 530} 531 532/* 533 * For a relocatable kernel, we need to get the memstart_addr first, 534 * then use it to calculate the virtual kernel start address. This has 535 * to happen at a very early stage (before machine_init). In this case, 536 * we just want to get the memstart_address and would not like to mess the 537 * memblock at this stage. So introduce a variable to skip the memblock_add() 538 * for this reason. 539 */ 540#ifdef CONFIG_RELOCATABLE 541static int add_mem_to_memblock = 1; 542#else 543#define add_mem_to_memblock 1 544#endif 545 546void __init early_init_dt_add_memory_arch(u64 base, u64 size) 547{ 548#ifdef CONFIG_PPC64 549 if (iommu_is_off) { 550 if (base >= 0x80000000ul) 551 return; 552 if ((base + size) > 0x80000000ul) 553 size = 0x80000000ul - base; 554 } 555#endif 556 /* Keep track of the beginning of memory -and- the size of 557 * the very first block in the device-tree as it represents 558 * the RMA on ppc64 server 559 */ 560 if (base < memstart_addr) { 561 memstart_addr = base; 562 first_memblock_size = size; 563 } 564 565 /* Add the chunk to the MEMBLOCK list */ 566 if (add_mem_to_memblock) 567 memblock_add(base, size); 568} 569 570static void __init early_reserve_mem_dt(void) 571{ 572 unsigned long i, dt_root; 573 int len; 574 const __be32 *prop; 575 576 early_init_fdt_reserve_self(); 577 early_init_fdt_scan_reserved_mem(); 578 579 dt_root = of_get_flat_dt_root(); 580 581 prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len); 582 583 if (!prop) 584 return; 585 586 DBG("Found new-style reserved-ranges\n"); 587 588 /* Each reserved range is an (address,size) pair, 2 cells each, 589 * totalling 4 cells per range. */ 590 for (i = 0; i < len / (sizeof(*prop) * 4); i++) { 591 u64 base, size; 592 593 base = of_read_number(prop + (i * 4) + 0, 2); 594 size = of_read_number(prop + (i * 4) + 2, 2); 595 596 if (size) { 597 DBG("reserving: %llx -> %llx\n", base, size); 598 memblock_reserve(base, size); 599 } 600 } 601} 602 603static void __init early_reserve_mem(void) 604{ 605 __be64 *reserve_map; 606 607 reserve_map = (__be64 *)(((unsigned long)initial_boot_params) + 608 fdt_off_mem_rsvmap(initial_boot_params)); 609 610 /* Look for the new "reserved-regions" property in the DT */ 611 early_reserve_mem_dt(); 612 613#ifdef CONFIG_BLK_DEV_INITRD 614 /* Then reserve the initrd, if any */ 615 if (initrd_start && (initrd_end > initrd_start)) { 616 memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), 617 _ALIGN_UP(initrd_end, PAGE_SIZE) - 618 _ALIGN_DOWN(initrd_start, PAGE_SIZE)); 619 } 620#endif /* CONFIG_BLK_DEV_INITRD */ 621 622#ifdef CONFIG_PPC32 623 /* 624 * Handle the case where we might be booting from an old kexec 625 * image that setup the mem_rsvmap as pairs of 32-bit values 626 */ 627 if (be64_to_cpup(reserve_map) > 0xffffffffull) { 628 u32 base_32, size_32; 629 __be32 *reserve_map_32 = (__be32 *)reserve_map; 630 631 DBG("Found old 32-bit reserve map\n"); 632 633 while (1) { 634 base_32 = be32_to_cpup(reserve_map_32++); 635 size_32 = be32_to_cpup(reserve_map_32++); 636 if (size_32 == 0) 637 break; 638 DBG("reserving: %x -> %x\n", base_32, size_32); 639 memblock_reserve(base_32, size_32); 640 } 641 return; 642 } 643#endif 644} 645 646void __init early_init_devtree(void *params) 647{ 648 phys_addr_t limit; 649 650 DBG(" -> early_init_devtree(%p)\n", params); 651 652 /* Too early to BUG_ON(), do it by hand */ 653 if (!early_init_dt_verify(params)) 654 panic("BUG: Failed verifying flat device tree, bad version?"); 655 656#ifdef CONFIG_PPC_RTAS 657 /* Some machines might need RTAS info for debugging, grab it now. */ 658 of_scan_flat_dt(early_init_dt_scan_rtas, NULL); 659#endif 660 661#ifdef CONFIG_PPC_POWERNV 662 /* Some machines might need OPAL info for debugging, grab it now. */ 663 of_scan_flat_dt(early_init_dt_scan_opal, NULL); 664#endif 665 666#ifdef CONFIG_FA_DUMP 667 /* scan tree to see if dump is active during last boot */ 668 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL); 669#endif 670 671 /* Retrieve various informations from the /chosen node of the 672 * device-tree, including the platform type, initrd location and 673 * size, TCE reserve, and more ... 674 */ 675 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line); 676 677 /* Scan memory nodes and rebuild MEMBLOCKs */ 678 of_scan_flat_dt(early_init_dt_scan_root, NULL); 679 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); 680 681 parse_early_param(); 682 683 /* make sure we've parsed cmdline for mem= before this */ 684 if (memory_limit) 685 first_memblock_size = min_t(u64, first_memblock_size, memory_limit); 686 setup_initial_memory_limit(memstart_addr, first_memblock_size); 687 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ 688 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 689 /* If relocatable, reserve first 32k for interrupt vectors etc. */ 690 if (PHYSICAL_START > MEMORY_START) 691 memblock_reserve(MEMORY_START, 0x8000); 692 reserve_kdump_trampoline(); 693#ifdef CONFIG_FA_DUMP 694 /* 695 * If we fail to reserve memory for firmware-assisted dump then 696 * fallback to kexec based kdump. 697 */ 698 if (fadump_reserve_mem() == 0) 699#endif 700 reserve_crashkernel(); 701 early_reserve_mem(); 702 703 /* Ensure that total memory size is page-aligned. */ 704 limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); 705 memblock_enforce_memory_limit(limit); 706 707 memblock_allow_resize(); 708 memblock_dump_all(); 709 710 DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); 711 712 /* We may need to relocate the flat tree, do it now. 713 * FIXME .. and the initrd too? */ 714 move_device_tree(); 715 716 allocate_pacas(); 717 718 DBG("Scanning CPUs ...\n"); 719 720 /* Retrieve CPU related informations from the flat tree 721 * (altivec support, boot CPU ID, ...) 722 */ 723 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 724 if (boot_cpuid < 0) { 725 printk("Failed to identify boot CPU !\n"); 726 BUG(); 727 } 728 729#if defined(CONFIG_SMP) && defined(CONFIG_PPC64) 730 /* We'll later wait for secondaries to check in; there are 731 * NCPUS-1 non-boot CPUs :-) 732 */ 733 spinning_secondaries = boot_cpu_count - 1; 734#endif 735 736#ifdef CONFIG_PPC_POWERNV 737 /* Scan and build the list of machine check recoverable ranges */ 738 of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL); 739#endif 740 741 DBG(" <- early_init_devtree()\n"); 742} 743 744#ifdef CONFIG_RELOCATABLE 745/* 746 * This function run before early_init_devtree, so we have to init 747 * initial_boot_params. 748 */ 749void __init early_get_first_memblock_info(void *params, phys_addr_t *size) 750{ 751 /* Setup flat device-tree pointer */ 752 initial_boot_params = params; 753 754 /* 755 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid 756 * mess the memblock. 757 */ 758 add_mem_to_memblock = 0; 759 of_scan_flat_dt(early_init_dt_scan_root, NULL); 760 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); 761 add_mem_to_memblock = 1; 762 763 if (size) 764 *size = first_memblock_size; 765} 766#endif 767 768/******* 769 * 770 * New implementation of the OF "find" APIs, return a refcounted 771 * object, call of_node_put() when done. The device tree and list 772 * are protected by a rw_lock. 773 * 774 * Note that property management will need some locking as well, 775 * this isn't dealt with yet. 776 * 777 *******/ 778 779/** 780 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device 781 * @np: device node of the device 782 * 783 * This looks for a property "ibm,chip-id" in the node or any 784 * of its parents and returns its content, or -1 if it cannot 785 * be found. 786 */ 787int of_get_ibm_chip_id(struct device_node *np) 788{ 789 of_node_get(np); 790 while (np) { 791 u32 chip_id; 792 793 /* 794 * Skiboot may produce memory nodes that contain more than one 795 * cell in chip-id, we only read the first one here. 796 */ 797 if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) { 798 of_node_put(np); 799 return chip_id; 800 } 801 802 np = of_get_next_parent(np); 803 } 804 return -1; 805} 806EXPORT_SYMBOL(of_get_ibm_chip_id); 807 808/** 809 * cpu_to_chip_id - Return the cpus chip-id 810 * @cpu: The logical cpu number. 811 * 812 * Return the value of the ibm,chip-id property corresponding to the given 813 * logical cpu number. If the chip-id can not be found, returns -1. 814 */ 815int cpu_to_chip_id(int cpu) 816{ 817 struct device_node *np; 818 819 np = of_get_cpu_node(cpu, NULL); 820 if (!np) 821 return -1; 822 823 of_node_put(np); 824 return of_get_ibm_chip_id(np); 825} 826EXPORT_SYMBOL(cpu_to_chip_id); 827 828bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 829{ 830 return (int)phys_id == get_hard_smp_processor_id(cpu); 831} 832