1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/psci.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach-types.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cachetype.h>
48 #include <asm/tlbflush.h>
49
50 #include <asm/prom.h>
51 #include <asm/mach/arch.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/time.h>
54 #include <asm/system_info.h>
55 #include <asm/system_misc.h>
56 #include <asm/traps.h>
57 #include <asm/unwind.h>
58 #include <asm/memblock.h>
59 #include <asm/virt.h>
60
61 #include "atags.h"
62
63
64 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65 char fpe_type[8];
66
fpe_setup(char * line)67 static int __init fpe_setup(char *line)
68 {
69 memcpy(fpe_type, line, 8);
70 return 1;
71 }
72
73 __setup("fpe=", fpe_setup);
74 #endif
75
76 extern void init_default_cache_policy(unsigned long);
77 extern void paging_init(const struct machine_desc *desc);
78 extern void early_paging_init(const struct machine_desc *,
79 struct proc_info_list *);
80 extern void sanity_check_meminfo(void);
81 extern enum reboot_mode reboot_mode;
82 extern void setup_dma_zone(const struct machine_desc *desc);
83
84 unsigned int processor_id;
85 EXPORT_SYMBOL(processor_id);
86 unsigned int __machine_arch_type __read_mostly;
87 EXPORT_SYMBOL(__machine_arch_type);
88 unsigned int cacheid __read_mostly;
89 EXPORT_SYMBOL(cacheid);
90
91 unsigned int __atags_pointer __initdata;
92
93 unsigned int system_rev;
94 EXPORT_SYMBOL(system_rev);
95
96 unsigned int system_serial_low;
97 EXPORT_SYMBOL(system_serial_low);
98
99 unsigned int system_serial_high;
100 EXPORT_SYMBOL(system_serial_high);
101
102 unsigned int elf_hwcap __read_mostly;
103 EXPORT_SYMBOL(elf_hwcap);
104
105 unsigned int elf_hwcap2 __read_mostly;
106 EXPORT_SYMBOL(elf_hwcap2);
107
108
109 #ifdef MULTI_CPU
110 struct processor processor __read_mostly;
111 #endif
112 #ifdef MULTI_TLB
113 struct cpu_tlb_fns cpu_tlb __read_mostly;
114 #endif
115 #ifdef MULTI_USER
116 struct cpu_user_fns cpu_user __read_mostly;
117 #endif
118 #ifdef MULTI_CACHE
119 struct cpu_cache_fns cpu_cache __read_mostly;
120 #endif
121 #ifdef CONFIG_OUTER_CACHE
122 struct outer_cache_fns outer_cache __read_mostly;
123 EXPORT_SYMBOL(outer_cache);
124 #endif
125
126 /*
127 * Cached cpu_architecture() result for use by assembler code.
128 * C code should use the cpu_architecture() function instead of accessing this
129 * variable directly.
130 */
131 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
132
133 struct stack {
134 u32 irq[3];
135 u32 abt[3];
136 u32 und[3];
137 u32 fiq[3];
138 } ____cacheline_aligned;
139
140 #ifndef CONFIG_CPU_V7M
141 static struct stack stacks[NR_CPUS];
142 #endif
143
144 char elf_platform[ELF_PLATFORM_SIZE];
145 EXPORT_SYMBOL(elf_platform);
146
147 static const char *cpu_name;
148 static const char *machine_name;
149 static char __initdata cmd_line[COMMAND_LINE_SIZE];
150 const struct machine_desc *machine_desc __initdata;
151
152 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
153 #define ENDIANNESS ((char)endian_test.l)
154
155 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
156
157 /*
158 * Standard memory resources
159 */
160 static struct resource mem_res[] = {
161 {
162 .name = "Video RAM",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 },
167 {
168 .name = "Kernel code",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 }
179 };
180
181 #define video_ram mem_res[0]
182 #define kernel_code mem_res[1]
183 #define kernel_data mem_res[2]
184
185 static struct resource io_res[] = {
186 {
187 .name = "reserved",
188 .start = 0x3bc,
189 .end = 0x3be,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 },
192 {
193 .name = "reserved",
194 .start = 0x378,
195 .end = 0x37f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x278,
201 .end = 0x27f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 }
204 };
205
206 #define lp0 io_res[0]
207 #define lp1 io_res[1]
208 #define lp2 io_res[2]
209
210 static const char *proc_arch[] = {
211 "undefined/unknown",
212 "3",
213 "4",
214 "4T",
215 "5",
216 "5T",
217 "5TE",
218 "5TEJ",
219 "6TEJ",
220 "7",
221 "7M",
222 "?(12)",
223 "?(13)",
224 "?(14)",
225 "?(15)",
226 "?(16)",
227 "?(17)",
228 };
229
230 #ifdef CONFIG_CPU_V7M
__get_cpu_architecture(void)231 static int __get_cpu_architecture(void)
232 {
233 return CPU_ARCH_ARMv7M;
234 }
235 #else
__get_cpu_architecture(void)236 static int __get_cpu_architecture(void)
237 {
238 int cpu_arch;
239
240 if ((read_cpuid_id() & 0x0008f000) == 0) {
241 cpu_arch = CPU_ARCH_UNKNOWN;
242 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
243 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
244 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
245 cpu_arch = (read_cpuid_id() >> 16) & 7;
246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3;
248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
249 /* Revised CPUID format. Read the Memory Model Feature
250 * Register 0 and check for VMSAv7 or PMSAv7 */
251 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
252 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
253 (mmfr0 & 0x000000f0) >= 0x00000030)
254 cpu_arch = CPU_ARCH_ARMv7;
255 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
256 (mmfr0 & 0x000000f0) == 0x00000020)
257 cpu_arch = CPU_ARCH_ARMv6;
258 else
259 cpu_arch = CPU_ARCH_UNKNOWN;
260 } else
261 cpu_arch = CPU_ARCH_UNKNOWN;
262
263 return cpu_arch;
264 }
265 #endif
266
cpu_architecture(void)267 int __pure cpu_architecture(void)
268 {
269 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
270
271 return __cpu_architecture;
272 }
273
cpu_has_aliasing_icache(unsigned int arch)274 static int cpu_has_aliasing_icache(unsigned int arch)
275 {
276 int aliasing_icache;
277 unsigned int id_reg, num_sets, line_size;
278
279 /* PIPT caches never alias. */
280 if (icache_is_pipt())
281 return 0;
282
283 /* arch specifies the register format */
284 switch (arch) {
285 case CPU_ARCH_ARMv7:
286 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
287 : /* No output operands */
288 : "r" (1));
289 isb();
290 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
291 : "=r" (id_reg));
292 line_size = 4 << ((id_reg & 0x7) + 2);
293 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 break;
296 case CPU_ARCH_ARMv6:
297 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 break;
299 default:
300 /* I-cache aliases will be handled by D-cache aliasing code */
301 aliasing_icache = 0;
302 }
303
304 return aliasing_icache;
305 }
306
cacheid_init(void)307 static void __init cacheid_init(void)
308 {
309 unsigned int arch = cpu_architecture();
310
311 if (arch == CPU_ARCH_ARMv7M) {
312 cacheid = 0;
313 } else if (arch >= CPU_ARCH_ARMv6) {
314 unsigned int cachetype = read_cpuid_cachetype();
315 if ((cachetype & (7 << 29)) == 4 << 29) {
316 /* ARMv7 register format */
317 arch = CPU_ARCH_ARMv7;
318 cacheid = CACHEID_VIPT_NONALIASING;
319 switch (cachetype & (3 << 14)) {
320 case (1 << 14):
321 cacheid |= CACHEID_ASID_TAGGED;
322 break;
323 case (3 << 14):
324 cacheid |= CACHEID_PIPT;
325 break;
326 }
327 } else {
328 arch = CPU_ARCH_ARMv6;
329 if (cachetype & (1 << 23))
330 cacheid = CACHEID_VIPT_ALIASING;
331 else
332 cacheid = CACHEID_VIPT_NONALIASING;
333 }
334 if (cpu_has_aliasing_icache(arch))
335 cacheid |= CACHEID_VIPT_I_ALIASING;
336 } else {
337 cacheid = CACHEID_VIVT;
338 }
339
340 pr_info("CPU: %s data cache, %s instruction cache\n",
341 cache_is_vivt() ? "VIVT" :
342 cache_is_vipt_aliasing() ? "VIPT aliasing" :
343 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
344 cache_is_vivt() ? "VIVT" :
345 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
346 icache_is_vipt_aliasing() ? "VIPT aliasing" :
347 icache_is_pipt() ? "PIPT" :
348 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
349 }
350
351 /*
352 * These functions re-use the assembly code in head.S, which
353 * already provide the required functionality.
354 */
355 extern struct proc_info_list *lookup_processor_type(unsigned int);
356
early_print(const char * str,...)357 void __init early_print(const char *str, ...)
358 {
359 extern void printascii(const char *);
360 char buf[256];
361 va_list ap;
362
363 va_start(ap, str);
364 vsnprintf(buf, sizeof(buf), str, ap);
365 va_end(ap);
366
367 #ifdef CONFIG_DEBUG_LL
368 printascii(buf);
369 #endif
370 printk("%s", buf);
371 }
372
cpuid_init_hwcaps(void)373 static void __init cpuid_init_hwcaps(void)
374 {
375 int block;
376 u32 isar5;
377
378 if (cpu_architecture() < CPU_ARCH_ARMv7)
379 return;
380
381 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
382 if (block >= 2)
383 elf_hwcap |= HWCAP_IDIVA;
384 if (block >= 1)
385 elf_hwcap |= HWCAP_IDIVT;
386
387 /* LPAE implies atomic ldrd/strd instructions */
388 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
389 if (block >= 5)
390 elf_hwcap |= HWCAP_LPAE;
391
392 /* check for supported v8 Crypto instructions */
393 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
394
395 block = cpuid_feature_extract_field(isar5, 4);
396 if (block >= 2)
397 elf_hwcap2 |= HWCAP2_PMULL;
398 if (block >= 1)
399 elf_hwcap2 |= HWCAP2_AES;
400
401 block = cpuid_feature_extract_field(isar5, 8);
402 if (block >= 1)
403 elf_hwcap2 |= HWCAP2_SHA1;
404
405 block = cpuid_feature_extract_field(isar5, 12);
406 if (block >= 1)
407 elf_hwcap2 |= HWCAP2_SHA2;
408
409 block = cpuid_feature_extract_field(isar5, 16);
410 if (block >= 1)
411 elf_hwcap2 |= HWCAP2_CRC32;
412 }
413
elf_hwcap_fixup(void)414 static void __init elf_hwcap_fixup(void)
415 {
416 unsigned id = read_cpuid_id();
417
418 /*
419 * HWCAP_TLS is available only on 1136 r1p0 and later,
420 * see also kuser_get_tls_init.
421 */
422 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
423 ((id >> 20) & 3) == 0) {
424 elf_hwcap &= ~HWCAP_TLS;
425 return;
426 }
427
428 /* Verify if CPUID scheme is implemented */
429 if ((id & 0x000f0000) != 0x000f0000)
430 return;
431
432 /*
433 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
434 * avoid advertising SWP; it may not be atomic with
435 * multiprocessing cores.
436 */
437 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
438 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
439 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
440 elf_hwcap &= ~HWCAP_SWP;
441 }
442
443 /*
444 * cpu_init - initialise one CPU.
445 *
446 * cpu_init sets up the per-CPU stacks.
447 */
cpu_init(void)448 void notrace cpu_init(void)
449 {
450 #ifndef CONFIG_CPU_V7M
451 unsigned int cpu = smp_processor_id();
452 struct stack *stk = &stacks[cpu];
453
454 if (cpu >= NR_CPUS) {
455 pr_crit("CPU%u: bad primary CPU number\n", cpu);
456 BUG();
457 }
458
459 /*
460 * This only works on resume and secondary cores. For booting on the
461 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
462 */
463 set_my_cpu_offset(per_cpu_offset(cpu));
464
465 cpu_proc_init();
466
467 /*
468 * Define the placement constraint for the inline asm directive below.
469 * In Thumb-2, msr with an immediate value is not allowed.
470 */
471 #ifdef CONFIG_THUMB2_KERNEL
472 #define PLC "r"
473 #else
474 #define PLC "I"
475 #endif
476
477 /*
478 * setup stacks for re-entrant exception handlers
479 */
480 __asm__ (
481 "msr cpsr_c, %1\n\t"
482 "add r14, %0, %2\n\t"
483 "mov sp, r14\n\t"
484 "msr cpsr_c, %3\n\t"
485 "add r14, %0, %4\n\t"
486 "mov sp, r14\n\t"
487 "msr cpsr_c, %5\n\t"
488 "add r14, %0, %6\n\t"
489 "mov sp, r14\n\t"
490 "msr cpsr_c, %7\n\t"
491 "add r14, %0, %8\n\t"
492 "mov sp, r14\n\t"
493 "msr cpsr_c, %9"
494 :
495 : "r" (stk),
496 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
497 "I" (offsetof(struct stack, irq[0])),
498 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
499 "I" (offsetof(struct stack, abt[0])),
500 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
501 "I" (offsetof(struct stack, und[0])),
502 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
503 "I" (offsetof(struct stack, fiq[0])),
504 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
505 : "r14");
506 #endif
507 }
508
509 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
510
smp_setup_processor_id(void)511 void __init smp_setup_processor_id(void)
512 {
513 int i;
514 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
515 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
516
517 cpu_logical_map(0) = cpu;
518 for (i = 1; i < nr_cpu_ids; ++i)
519 cpu_logical_map(i) = i == cpu ? 0 : i;
520
521 /*
522 * clear __my_cpu_offset on boot CPU to avoid hang caused by
523 * using percpu variable early, for example, lockdep will
524 * access percpu variable inside lock_release
525 */
526 set_my_cpu_offset(0);
527
528 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
529 }
530
531 struct mpidr_hash mpidr_hash;
532 #ifdef CONFIG_SMP
533 /**
534 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
535 * level in order to build a linear index from an
536 * MPIDR value. Resulting algorithm is a collision
537 * free hash carried out through shifting and ORing
538 */
smp_build_mpidr_hash(void)539 static void __init smp_build_mpidr_hash(void)
540 {
541 u32 i, affinity;
542 u32 fs[3], bits[3], ls, mask = 0;
543 /*
544 * Pre-scan the list of MPIDRS and filter out bits that do
545 * not contribute to affinity levels, ie they never toggle.
546 */
547 for_each_possible_cpu(i)
548 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
549 pr_debug("mask of set bits 0x%x\n", mask);
550 /*
551 * Find and stash the last and first bit set at all affinity levels to
552 * check how many bits are required to represent them.
553 */
554 for (i = 0; i < 3; i++) {
555 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
556 /*
557 * Find the MSB bit and LSB bits position
558 * to determine how many bits are required
559 * to express the affinity level.
560 */
561 ls = fls(affinity);
562 fs[i] = affinity ? ffs(affinity) - 1 : 0;
563 bits[i] = ls - fs[i];
564 }
565 /*
566 * An index can be created from the MPIDR by isolating the
567 * significant bits at each affinity level and by shifting
568 * them in order to compress the 24 bits values space to a
569 * compressed set of values. This is equivalent to hashing
570 * the MPIDR through shifting and ORing. It is a collision free
571 * hash though not minimal since some levels might contain a number
572 * of CPUs that is not an exact power of 2 and their bit
573 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
574 */
575 mpidr_hash.shift_aff[0] = fs[0];
576 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
577 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
578 (bits[1] + bits[0]);
579 mpidr_hash.mask = mask;
580 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
581 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
582 mpidr_hash.shift_aff[0],
583 mpidr_hash.shift_aff[1],
584 mpidr_hash.shift_aff[2],
585 mpidr_hash.mask,
586 mpidr_hash.bits);
587 /*
588 * 4x is an arbitrary value used to warn on a hash table much bigger
589 * than expected on most systems.
590 */
591 if (mpidr_hash_size() > 4 * num_possible_cpus())
592 pr_warn("Large number of MPIDR hash buckets detected\n");
593 sync_cache_w(&mpidr_hash);
594 }
595 #endif
596
setup_processor(void)597 static void __init setup_processor(void)
598 {
599 struct proc_info_list *list;
600
601 /*
602 * locate processor in the list of supported processor
603 * types. The linker builds this table for us from the
604 * entries in arch/arm/mm/proc-*.S
605 */
606 list = lookup_processor_type(read_cpuid_id());
607 if (!list) {
608 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
609 read_cpuid_id());
610 while (1);
611 }
612
613 cpu_name = list->cpu_name;
614 __cpu_architecture = __get_cpu_architecture();
615
616 #ifdef MULTI_CPU
617 processor = *list->proc;
618 #endif
619 #ifdef MULTI_TLB
620 cpu_tlb = *list->tlb;
621 #endif
622 #ifdef MULTI_USER
623 cpu_user = *list->user;
624 #endif
625 #ifdef MULTI_CACHE
626 cpu_cache = *list->cache;
627 #endif
628
629 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
630 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
631 proc_arch[cpu_architecture()], get_cr());
632
633 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
634 list->arch_name, ENDIANNESS);
635 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
636 list->elf_name, ENDIANNESS);
637 elf_hwcap = list->elf_hwcap;
638
639 cpuid_init_hwcaps();
640
641 #ifndef CONFIG_ARM_THUMB
642 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
643 #endif
644 #ifdef CONFIG_MMU
645 init_default_cache_policy(list->__cpu_mm_mmu_flags);
646 #endif
647 erratum_a15_798181_init();
648
649 elf_hwcap_fixup();
650
651 cacheid_init();
652 cpu_init();
653 }
654
dump_machine_table(void)655 void __init dump_machine_table(void)
656 {
657 const struct machine_desc *p;
658
659 early_print("Available machine support:\n\nID (hex)\tNAME\n");
660 for_each_machine_desc(p)
661 early_print("%08x\t%s\n", p->nr, p->name);
662
663 early_print("\nPlease check your kernel config and/or bootloader.\n");
664
665 while (true)
666 /* can't use cpu_relax() here as it may require MMU setup */;
667 }
668
arm_add_memory(u64 start,u64 size)669 int __init arm_add_memory(u64 start, u64 size)
670 {
671 u64 aligned_start;
672
673 /*
674 * Ensure that start/size are aligned to a page boundary.
675 * Size is rounded down, start is rounded up.
676 */
677 aligned_start = PAGE_ALIGN(start);
678 if (aligned_start > start + size)
679 size = 0;
680 else
681 size -= aligned_start - start;
682
683 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
684 if (aligned_start > ULONG_MAX) {
685 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
686 (long long)start);
687 return -EINVAL;
688 }
689
690 if (aligned_start + size > ULONG_MAX) {
691 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
692 (long long)start);
693 /*
694 * To ensure bank->start + bank->size is representable in
695 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
696 * This means we lose a page after masking.
697 */
698 size = ULONG_MAX - aligned_start;
699 }
700 #endif
701
702 if (aligned_start < PHYS_OFFSET) {
703 if (aligned_start + size <= PHYS_OFFSET) {
704 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
705 aligned_start, aligned_start + size);
706 return -EINVAL;
707 }
708
709 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
710 aligned_start, (u64)PHYS_OFFSET);
711
712 size -= PHYS_OFFSET - aligned_start;
713 aligned_start = PHYS_OFFSET;
714 }
715
716 start = aligned_start;
717 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
718
719 /*
720 * Check whether this memory region has non-zero size or
721 * invalid node number.
722 */
723 if (size == 0)
724 return -EINVAL;
725
726 memblock_add(start, size);
727 return 0;
728 }
729
730 /*
731 * Pick out the memory size. We look for mem=size@start,
732 * where start and size are "size[KkMm]"
733 */
734
early_mem(char * p)735 static int __init early_mem(char *p)
736 {
737 static int usermem __initdata = 0;
738 u64 size;
739 u64 start;
740 char *endp;
741
742 /*
743 * If the user specifies memory size, we
744 * blow away any automatically generated
745 * size.
746 */
747 if (usermem == 0) {
748 usermem = 1;
749 memblock_remove(memblock_start_of_DRAM(),
750 memblock_end_of_DRAM() - memblock_start_of_DRAM());
751 }
752
753 start = PHYS_OFFSET;
754 size = memparse(p, &endp);
755 if (*endp == '@')
756 start = memparse(endp + 1, NULL);
757
758 arm_add_memory(start, size);
759
760 return 0;
761 }
762 early_param("mem", early_mem);
763
request_standard_resources(const struct machine_desc * mdesc)764 static void __init request_standard_resources(const struct machine_desc *mdesc)
765 {
766 struct memblock_region *region;
767 struct resource *res;
768
769 kernel_code.start = virt_to_phys(_text);
770 kernel_code.end = virt_to_phys(_etext - 1);
771 kernel_data.start = virt_to_phys(_sdata);
772 kernel_data.end = virt_to_phys(_end - 1);
773
774 for_each_memblock(memory, region) {
775 res = memblock_virt_alloc(sizeof(*res), 0);
776 res->name = "System RAM";
777 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
778 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
779 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
780
781 request_resource(&iomem_resource, res);
782
783 if (kernel_code.start >= res->start &&
784 kernel_code.end <= res->end)
785 request_resource(res, &kernel_code);
786 if (kernel_data.start >= res->start &&
787 kernel_data.end <= res->end)
788 request_resource(res, &kernel_data);
789 }
790
791 if (mdesc->video_start) {
792 video_ram.start = mdesc->video_start;
793 video_ram.end = mdesc->video_end;
794 request_resource(&iomem_resource, &video_ram);
795 }
796
797 /*
798 * Some machines don't have the possibility of ever
799 * possessing lp0, lp1 or lp2
800 */
801 if (mdesc->reserve_lp0)
802 request_resource(&ioport_resource, &lp0);
803 if (mdesc->reserve_lp1)
804 request_resource(&ioport_resource, &lp1);
805 if (mdesc->reserve_lp2)
806 request_resource(&ioport_resource, &lp2);
807 }
808
809 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
810 struct screen_info screen_info = {
811 .orig_video_lines = 30,
812 .orig_video_cols = 80,
813 .orig_video_mode = 0,
814 .orig_video_ega_bx = 0,
815 .orig_video_isVGA = 1,
816 .orig_video_points = 8
817 };
818 #endif
819
customize_machine(void)820 static int __init customize_machine(void)
821 {
822 /*
823 * customizes platform devices, or adds new ones
824 * On DT based machines, we fall back to populating the
825 * machine from the device tree, if no callback is provided,
826 * otherwise we would always need an init_machine callback.
827 */
828 of_iommu_init();
829 if (machine_desc->init_machine)
830 machine_desc->init_machine();
831 #ifdef CONFIG_OF
832 else
833 of_platform_populate(NULL, of_default_bus_match_table,
834 NULL, NULL);
835 #endif
836 return 0;
837 }
838 arch_initcall(customize_machine);
839
init_machine_late(void)840 static int __init init_machine_late(void)
841 {
842 if (machine_desc->init_late)
843 machine_desc->init_late();
844 return 0;
845 }
846 late_initcall(init_machine_late);
847
848 #ifdef CONFIG_KEXEC
get_total_mem(void)849 static inline unsigned long long get_total_mem(void)
850 {
851 unsigned long total;
852
853 total = max_low_pfn - min_low_pfn;
854 return total << PAGE_SHIFT;
855 }
856
857 /**
858 * reserve_crashkernel() - reserves memory are for crash kernel
859 *
860 * This function reserves memory area given in "crashkernel=" kernel command
861 * line parameter. The memory reserved is used by a dump capture kernel when
862 * primary kernel is crashing.
863 */
reserve_crashkernel(void)864 static void __init reserve_crashkernel(void)
865 {
866 unsigned long long crash_size, crash_base;
867 unsigned long long total_mem;
868 int ret;
869
870 total_mem = get_total_mem();
871 ret = parse_crashkernel(boot_command_line, total_mem,
872 &crash_size, &crash_base);
873 if (ret)
874 return;
875
876 ret = memblock_reserve(crash_base, crash_size);
877 if (ret < 0) {
878 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
879 (unsigned long)crash_base);
880 return;
881 }
882
883 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
884 (unsigned long)(crash_size >> 20),
885 (unsigned long)(crash_base >> 20),
886 (unsigned long)(total_mem >> 20));
887
888 crashk_res.start = crash_base;
889 crashk_res.end = crash_base + crash_size - 1;
890 insert_resource(&iomem_resource, &crashk_res);
891 }
892 #else
reserve_crashkernel(void)893 static inline void reserve_crashkernel(void) {}
894 #endif /* CONFIG_KEXEC */
895
hyp_mode_check(void)896 void __init hyp_mode_check(void)
897 {
898 #ifdef CONFIG_ARM_VIRT_EXT
899 sync_boot_mode();
900
901 if (is_hyp_mode_available()) {
902 pr_info("CPU: All CPU(s) started in HYP mode.\n");
903 pr_info("CPU: Virtualization extensions available.\n");
904 } else if (is_hyp_mode_mismatched()) {
905 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
906 __boot_cpu_mode & MODE_MASK);
907 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
908 } else
909 pr_info("CPU: All CPU(s) started in SVC mode.\n");
910 #endif
911 }
912
setup_arch(char ** cmdline_p)913 void __init setup_arch(char **cmdline_p)
914 {
915 const struct machine_desc *mdesc;
916
917 setup_processor();
918 mdesc = setup_machine_fdt(__atags_pointer);
919 if (!mdesc)
920 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
921 machine_desc = mdesc;
922 machine_name = mdesc->name;
923 dump_stack_set_arch_desc("%s", mdesc->name);
924
925 if (mdesc->reboot_mode != REBOOT_HARD)
926 reboot_mode = mdesc->reboot_mode;
927
928 init_mm.start_code = (unsigned long) _text;
929 init_mm.end_code = (unsigned long) _etext;
930 init_mm.end_data = (unsigned long) _edata;
931 init_mm.brk = (unsigned long) _end;
932
933 /* populate cmd_line too for later use, preserving boot_command_line */
934 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
935 *cmdline_p = cmd_line;
936
937 parse_early_param();
938
939 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
940 setup_dma_zone(mdesc);
941 sanity_check_meminfo();
942 arm_memblock_init(mdesc);
943
944 paging_init(mdesc);
945 request_standard_resources(mdesc);
946
947 if (mdesc->restart)
948 arm_pm_restart = mdesc->restart;
949
950 unflatten_device_tree();
951
952 arm_dt_init_cpu_maps();
953 psci_init();
954 #ifdef CONFIG_SMP
955 if (is_smp()) {
956 if (!mdesc->smp_init || !mdesc->smp_init()) {
957 if (psci_smp_available())
958 smp_set_ops(&psci_smp_ops);
959 else if (mdesc->smp)
960 smp_set_ops(mdesc->smp);
961 }
962 smp_init_cpus();
963 smp_build_mpidr_hash();
964 }
965 #endif
966
967 if (!is_smp())
968 hyp_mode_check();
969
970 reserve_crashkernel();
971
972 #ifdef CONFIG_MULTI_IRQ_HANDLER
973 handle_arch_irq = mdesc->handle_irq;
974 #endif
975
976 #ifdef CONFIG_VT
977 #if defined(CONFIG_VGA_CONSOLE)
978 conswitchp = &vga_con;
979 #elif defined(CONFIG_DUMMY_CONSOLE)
980 conswitchp = &dummy_con;
981 #endif
982 #endif
983
984 if (mdesc->init_early)
985 mdesc->init_early();
986 }
987
988
topology_init(void)989 static int __init topology_init(void)
990 {
991 int cpu;
992
993 for_each_possible_cpu(cpu) {
994 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
995 cpuinfo->cpu.hotpluggable = 1;
996 register_cpu(&cpuinfo->cpu, cpu);
997 }
998
999 return 0;
1000 }
1001 subsys_initcall(topology_init);
1002
1003 #ifdef CONFIG_HAVE_PROC_CPU
proc_cpu_init(void)1004 static int __init proc_cpu_init(void)
1005 {
1006 struct proc_dir_entry *res;
1007
1008 res = proc_mkdir("cpu", NULL);
1009 if (!res)
1010 return -ENOMEM;
1011 return 0;
1012 }
1013 fs_initcall(proc_cpu_init);
1014 #endif
1015
1016 static const char *hwcap_str[] = {
1017 "swp",
1018 "half",
1019 "thumb",
1020 "26bit",
1021 "fastmult",
1022 "fpa",
1023 "vfp",
1024 "edsp",
1025 "java",
1026 "iwmmxt",
1027 "crunch",
1028 "thumbee",
1029 "neon",
1030 "vfpv3",
1031 "vfpv3d16",
1032 "tls",
1033 "vfpv4",
1034 "idiva",
1035 "idivt",
1036 "vfpd32",
1037 "lpae",
1038 "evtstrm",
1039 NULL
1040 };
1041
1042 static const char *hwcap2_str[] = {
1043 "aes",
1044 "pmull",
1045 "sha1",
1046 "sha2",
1047 "crc32",
1048 NULL
1049 };
1050
c_show(struct seq_file * m,void * v)1051 static int c_show(struct seq_file *m, void *v)
1052 {
1053 int i, j;
1054 u32 cpuid;
1055
1056 for_each_online_cpu(i) {
1057 /*
1058 * glibc reads /proc/cpuinfo to determine the number of
1059 * online processors, looking for lines beginning with
1060 * "processor". Give glibc what it expects.
1061 */
1062 seq_printf(m, "processor\t: %d\n", i);
1063 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1064 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1065 cpu_name, cpuid & 15, elf_platform);
1066
1067 #if defined(CONFIG_SMP)
1068 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1069 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1070 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1071 #else
1072 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1073 loops_per_jiffy / (500000/HZ),
1074 (loops_per_jiffy / (5000/HZ)) % 100);
1075 #endif
1076 /* dump out the processor features */
1077 seq_puts(m, "Features\t: ");
1078
1079 for (j = 0; hwcap_str[j]; j++)
1080 if (elf_hwcap & (1 << j))
1081 seq_printf(m, "%s ", hwcap_str[j]);
1082
1083 for (j = 0; hwcap2_str[j]; j++)
1084 if (elf_hwcap2 & (1 << j))
1085 seq_printf(m, "%s ", hwcap2_str[j]);
1086
1087 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1088 seq_printf(m, "CPU architecture: %s\n",
1089 proc_arch[cpu_architecture()]);
1090
1091 if ((cpuid & 0x0008f000) == 0x00000000) {
1092 /* pre-ARM7 */
1093 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1094 } else {
1095 if ((cpuid & 0x0008f000) == 0x00007000) {
1096 /* ARM7 */
1097 seq_printf(m, "CPU variant\t: 0x%02x\n",
1098 (cpuid >> 16) & 127);
1099 } else {
1100 /* post-ARM7 */
1101 seq_printf(m, "CPU variant\t: 0x%x\n",
1102 (cpuid >> 20) & 15);
1103 }
1104 seq_printf(m, "CPU part\t: 0x%03x\n",
1105 (cpuid >> 4) & 0xfff);
1106 }
1107 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1108 }
1109
1110 seq_printf(m, "Hardware\t: %s\n", machine_name);
1111 seq_printf(m, "Revision\t: %04x\n", system_rev);
1112 seq_printf(m, "Serial\t\t: %08x%08x\n",
1113 system_serial_high, system_serial_low);
1114
1115 return 0;
1116 }
1117
c_start(struct seq_file * m,loff_t * pos)1118 static void *c_start(struct seq_file *m, loff_t *pos)
1119 {
1120 return *pos < 1 ? (void *)1 : NULL;
1121 }
1122
c_next(struct seq_file * m,void * v,loff_t * pos)1123 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1124 {
1125 ++*pos;
1126 return NULL;
1127 }
1128
c_stop(struct seq_file * m,void * v)1129 static void c_stop(struct seq_file *m, void *v)
1130 {
1131 }
1132
1133 const struct seq_operations cpuinfo_op = {
1134 .start = c_start,
1135 .next = c_next,
1136 .stop = c_stop,
1137 .show = c_show
1138 };
1139