1/*
2 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
3 *
4 * This file contains the architecture-dependant parts of system setup.
5 *
6 */
7
8#include <linux/export.h>
9#include <linux/bootmem.h>
10#include <linux/console.h>
11#include <linux/cpu.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/fs.h>
15#include <linux/genhd.h>
16#include <linux/init.h>
17#include <linux/initrd.h>
18#include <linux/interrupt.h>
19#include <linux/kernel.h>
20#include <linux/memblock.h>
21#include <linux/mm.h>
22#include <linux/of_fdt.h>
23#include <linux/of_platform.h>
24#include <linux/pfn.h>
25#include <linux/root_dev.h>
26#include <linux/sched.h>
27#include <linux/seq_file.h>
28#include <linux/start_kernel.h>
29#include <linux/string.h>
30
31#include <asm/cachepart.h>
32#include <asm/clock.h>
33#include <asm/core_reg.h>
34#include <asm/cpu.h>
35#include <asm/da.h>
36#include <asm/highmem.h>
37#include <asm/hwthread.h>
38#include <asm/l2cache.h>
39#include <asm/mach/arch.h>
40#include <asm/metag_mem.h>
41#include <asm/metag_regs.h>
42#include <asm/mmu.h>
43#include <asm/mmzone.h>
44#include <asm/processor.h>
45#include <asm/sections.h>
46#include <asm/setup.h>
47#include <asm/traps.h>
48
49/* Priv protect as many registers as possible. */
50#define DEFAULT_PRIV	(TXPRIVEXT_COPRO_BITS		| \
51			 TXPRIVEXT_TXTRIGGER_BIT	| \
52			 TXPRIVEXT_TXGBLCREG_BIT	| \
53			 TXPRIVEXT_ILOCK_BIT		| \
54			 TXPRIVEXT_TXITACCYC_BIT	| \
55			 TXPRIVEXT_TXDIVTIME_BIT	| \
56			 TXPRIVEXT_TXAMAREGX_BIT	| \
57			 TXPRIVEXT_TXTIMERI_BIT		| \
58			 TXPRIVEXT_TXSTATUS_BIT		| \
59			 TXPRIVEXT_TXDISABLE_BIT)
60
61/* Meta2 specific bits. */
62#ifdef CONFIG_METAG_META12
63#define META2_PRIV	0
64#else
65#define META2_PRIV	(TXPRIVEXT_TXTIMER_BIT		| \
66			 TXPRIVEXT_TRACE_BIT)
67#endif
68
69/* Unaligned access checking bits. */
70#ifdef CONFIG_METAG_UNALIGNED
71#define UNALIGNED_PRIV	TXPRIVEXT_ALIGNREW_BIT
72#else
73#define UNALIGNED_PRIV	0
74#endif
75
76#define PRIV_BITS 	(DEFAULT_PRIV			| \
77			 META2_PRIV			| \
78			 UNALIGNED_PRIV)
79
80/*
81 * Protect access to:
82 * 0x06000000-0x07ffffff Direct mapped region
83 * 0x05000000-0x05ffffff MMU table region (Meta1)
84 * 0x04400000-0x047fffff Cache flush region
85 * 0x84000000-0x87ffffff Core cache memory region (Meta2)
86 *
87 * Allow access to:
88 * 0x80000000-0x81ffffff Core code memory region (Meta2)
89 */
90#ifdef CONFIG_METAG_META12
91#define PRIVSYSR_BITS	TXPRIVSYSR_ALL_BITS
92#else
93#define PRIVSYSR_BITS	(TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT)
94#endif
95
96/* Protect all 0x02xxxxxx and 0x048xxxxx. */
97#define PIOREG_BITS	0xffffffff
98
99/*
100 * Protect all 0x04000xx0 (system events)
101 * except write combiner flush and write fence (system events 4 and 5).
102 */
103#define PSYREG_BITS	0xfffffffb
104
105
106extern char _heap_start[];
107
108#ifdef CONFIG_DA_CONSOLE
109/* Our early channel based console driver */
110extern struct console dash_console;
111#endif
112
113const struct machine_desc *machine_desc __initdata;
114
115/*
116 * Map a Linux CPU number to a hardware thread ID
117 * In SMP this will be setup with the correct mapping at startup; in UP this
118 * will map to the HW thread on which we are running.
119 */
120u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
121	[0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
122};
123EXPORT_SYMBOL_GPL(cpu_2_hwthread_id);
124
125/*
126 * Map a hardware thread ID to a Linux CPU number
127 * In SMP this will be fleshed out with the correct CPU ID for a particular
128 * hardware thread. In UP this will be initialised with the boot CPU ID.
129 */
130u8 hwthread_id_2_cpu[4] __read_mostly = {
131	[0 ... 3] = BAD_CPU_ID
132};
133
134/* The relative offset of the MMU mapped memory (from ldlk or bootloader)
135 * to the real physical memory.  This is needed as we have to use the
136 * physical addresses in the MMU tables (pte entries), and not the virtual
137 * addresses.
138 * This variable is used in the __pa() and __va() macros, and should
139 * probably only be used via them.
140 */
141unsigned int meta_memoffset;
142EXPORT_SYMBOL(meta_memoffset);
143
144static char __initdata *original_cmd_line;
145
146DEFINE_PER_CPU(PTBI, pTBI);
147
148/*
149 * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
150 *
151 *	"hwthread_map=0:1,1:2,2:3,3:0"
152 *
153 *	Linux CPU ID	HWTHREAD_ID
154 *	---------------------------
155 *	    0		      1
156 *	    1		      2
157 *	    2		      3
158 *	    3		      0
159 */
160static int __init parse_hwthread_map(char *p)
161{
162	int cpu;
163
164	while (*p) {
165		cpu = (*p++) - '0';
166		if (cpu < 0 || cpu > 9)
167			goto err_cpu;
168
169		p++;		/* skip semi-colon */
170		cpu_2_hwthread_id[cpu] = (*p++) - '0';
171		if (cpu_2_hwthread_id[cpu] >= 4)
172			goto err_thread;
173		hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu;
174
175		if (*p == ',')
176			p++;		/* skip comma */
177	}
178
179	return 0;
180err_cpu:
181	pr_err("%s: hwthread_map cpu argument out of range\n", __func__);
182	return -EINVAL;
183err_thread:
184	pr_err("%s: hwthread_map thread argument out of range\n", __func__);
185	return -EINVAL;
186}
187early_param("hwthread_map", parse_hwthread_map);
188
189void __init dump_machine_table(void)
190{
191	struct machine_desc *p;
192	const char **compat;
193
194	pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
195	for_each_machine_desc(p) {
196		pr_info("\t%s\t[", p->name);
197		for (compat = p->dt_compat; compat && *compat; ++compat)
198			printk(" '%s'", *compat);
199		printk(" ]\n");
200	}
201
202	pr_info("\nPlease check your kernel config and/or bootloader.\n");
203
204	hard_processor_halt(HALT_PANIC);
205}
206
207#ifdef CONFIG_METAG_HALT_ON_PANIC
208static int metag_panic_event(struct notifier_block *this, unsigned long event,
209			     void *ptr)
210{
211	hard_processor_halt(HALT_PANIC);
212	return NOTIFY_DONE;
213}
214
215static struct notifier_block metag_panic_block = {
216	metag_panic_event,
217	NULL,
218	0
219};
220#endif
221
222void __init setup_arch(char **cmdline_p)
223{
224	unsigned long start_pfn;
225	unsigned long text_start = (unsigned long)(&_stext);
226	unsigned long cpu = smp_processor_id();
227	unsigned long heap_start, heap_end;
228	unsigned long start_pte;
229	PTBI _pTBI;
230	PTBISEG p_heap;
231	int heap_id, i;
232
233	metag_cache_probe();
234
235	metag_da_probe();
236#ifdef CONFIG_DA_CONSOLE
237	if (metag_da_enabled()) {
238		/* An early channel based console driver */
239		register_console(&dash_console);
240		add_preferred_console("ttyDA", 1, NULL);
241	}
242#endif
243
244	/* try interpreting the argument as a device tree */
245	machine_desc = setup_machine_fdt(original_cmd_line);
246	/* if it doesn't look like a device tree it must be a command line */
247	if (!machine_desc) {
248#ifdef CONFIG_METAG_BUILTIN_DTB
249		/* try the embedded device tree */
250		machine_desc = setup_machine_fdt(__dtb_start);
251		if (!machine_desc)
252			panic("Invalid embedded device tree.");
253#else
254		/* use the default machine description */
255		machine_desc = default_machine_desc();
256#endif
257#ifndef CONFIG_CMDLINE_FORCE
258		/* append the bootloader cmdline to any builtin fdt cmdline */
259		if (boot_command_line[0] && original_cmd_line[0])
260			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
261		strlcat(boot_command_line, original_cmd_line,
262			COMMAND_LINE_SIZE);
263#endif
264	}
265	setup_meta_clocks(machine_desc->clocks);
266
267	*cmdline_p = boot_command_line;
268	parse_early_param();
269
270	/*
271	 * Make sure we don't alias in dcache or icache
272	 */
273	check_for_cache_aliasing(cpu);
274
275
276#ifdef CONFIG_METAG_HALT_ON_PANIC
277	atomic_notifier_chain_register(&panic_notifier_list,
278				       &metag_panic_block);
279#endif
280
281#ifdef CONFIG_DUMMY_CONSOLE
282	conswitchp = &dummy_con;
283#endif
284
285	if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT))
286		panic("Privilege must be enabled for this thread.");
287
288	_pTBI = __TBI(TBID_ISTAT_BIT);
289
290	per_cpu(pTBI, cpu) = _pTBI;
291
292	if (!per_cpu(pTBI, cpu))
293		panic("No TBI found!");
294
295	/*
296	 * Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
297	 * rather than the version from the bootloader. This makes call
298	 * stacks easier to understand and may allow us to unmap the
299	 * bootloader at some point.
300	 */
301	for (i = 0; i <= TBID_SIGNUM_MAX; i++)
302		_pTBI->fnSigs[i] = __TBIUnExpXXX;
303
304	/* A Meta requirement is that the kernel is loaded (virtually)
305	 * at the PAGE_OFFSET.
306	 */
307	if (PAGE_OFFSET != text_start)
308		panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
309		      PAGE_OFFSET, text_start);
310
311	start_pte = mmu_read_second_level_page(text_start);
312
313	/*
314	 * Kernel pages should have the PRIV bit set by the bootloader.
315	 */
316	if (!(start_pte & _PAGE_KERNEL))
317		panic("kernel pte does not have PRIV set");
318
319	/*
320	 * See __pa and __va in include/asm/page.h.
321	 * This value is negative when running in local space but the
322	 * calculations work anyway.
323	 */
324	meta_memoffset = text_start - (start_pte & PAGE_MASK);
325
326	/* Now lets look at the heap space */
327	heap_id = (__TBIThreadId() & TBID_THREAD_BITS)
328		+ TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP);
329
330	p_heap = __TBIFindSeg(NULL, heap_id);
331
332	if (!p_heap)
333		panic("Could not find heap from TBI!");
334
335	/* The heap begins at the first full page after the kernel data. */
336	heap_start = (unsigned long) &_heap_start;
337
338	/* The heap ends at the end of the heap segment specified with
339	 * ldlk.
340	 */
341	if (is_global_space(text_start)) {
342		pr_debug("WARNING: running in global space!\n");
343		heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes;
344	} else {
345		heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes;
346	}
347
348	ROOT_DEV = Root_RAM0;
349
350	/* init_mm is the mm struct used for the first task.  It is then
351	 * cloned for all other tasks spawned from that task.
352	 *
353	 * Note - we are using the virtual addresses here.
354	 */
355	init_mm.start_code = (unsigned long)(&_stext);
356	init_mm.end_code = (unsigned long)(&_etext);
357	init_mm.end_data = (unsigned long)(&_edata);
358	init_mm.brk = (unsigned long)heap_start;
359
360	min_low_pfn = PFN_UP(__pa(text_start));
361	max_low_pfn = PFN_DOWN(__pa(heap_end));
362
363	pfn_base = min_low_pfn;
364
365	/* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
366	 * call later makes sure to keep the rounded up pages marked reserved.
367	 */
368	max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1);
369	max_pfn &= ~((1 << MAX_ORDER) - 1);
370
371	start_pfn = PFN_UP(__pa(heap_start));
372
373	if (min_low_pfn & ((1 << MAX_ORDER) - 1)) {
374		/* Theoretically, we could expand the space that the
375		 * bootmem allocator covers - much as we do for the
376		 * 'high' address, and then tell the bootmem system
377		 * that the lowest chunk is 'not available'.  Right
378		 * now it is just much easier to constrain the
379		 * user to always MAX_ORDER align their kernel space.
380		 */
381
382		panic("Kernel must be %d byte aligned, currently at %#lx.",
383		      1 << (MAX_ORDER + PAGE_SHIFT),
384		      min_low_pfn << PAGE_SHIFT);
385	}
386
387#ifdef CONFIG_HIGHMEM
388	highstart_pfn = highend_pfn = max_pfn;
389	high_memory = (void *) __va(PFN_PHYS(highstart_pfn));
390#else
391	high_memory = (void *)__va(PFN_PHYS(max_pfn));
392#endif
393
394	paging_init(heap_end);
395
396	setup_priv();
397
398	/* Setup the boot cpu's mapping. The rest will be setup below. */
399	cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
400	hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
401
402	unflatten_and_copy_device_tree();
403
404#ifdef CONFIG_SMP
405	smp_init_cpus();
406#endif
407
408	if (machine_desc->init_early)
409		machine_desc->init_early();
410}
411
412static int __init customize_machine(void)
413{
414	/* customizes platform devices, or adds new ones */
415	if (machine_desc->init_machine)
416		machine_desc->init_machine();
417	else
418		of_platform_populate(NULL, of_default_bus_match_table, NULL,
419				     NULL);
420	return 0;
421}
422arch_initcall(customize_machine);
423
424static int __init init_machine_late(void)
425{
426	if (machine_desc->init_late)
427		machine_desc->init_late();
428	return 0;
429}
430late_initcall(init_machine_late);
431
432#ifdef CONFIG_PROC_FS
433/*
434 *	Get CPU information for use by the procfs.
435 */
436static const char *get_cpu_capabilities(unsigned int txenable)
437{
438#ifdef CONFIG_METAG_META21
439	/* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
440	int coreid = metag_in32(METAC_CORE_ID);
441	unsigned int dsp_type = (coreid >> 3) & 7;
442	unsigned int fpu_type = (coreid >> 7) & 3;
443
444	switch (dsp_type | fpu_type << 3) {
445	case (0x00): return "EDSP";
446	case (0x01): return "DSP";
447	case (0x08): return "EDSP+LFPU";
448	case (0x09): return "DSP+LFPU";
449	case (0x10): return "EDSP+FPU";
450	case (0x11): return "DSP+FPU";
451	}
452	return "UNKNOWN";
453
454#else
455	if (!(txenable & TXENABLE_CLASS_BITS))
456		return "DSP";
457	else
458		return "";
459#endif
460}
461
462static int show_cpuinfo(struct seq_file *m, void *v)
463{
464	const char *cpu;
465	unsigned int txenable, thread_id, major, minor;
466	unsigned long clockfreq = get_coreclock();
467#ifdef CONFIG_SMP
468	int i;
469	unsigned long lpj;
470#endif
471
472	cpu = "META";
473
474	txenable = __core_reg_get(TXENABLE);
475	major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S;
476	minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S;
477	thread_id = (txenable >> 8) & 0x3;
478
479#ifdef CONFIG_SMP
480	for_each_online_cpu(i) {
481		lpj = per_cpu(cpu_data, i).loops_per_jiffy;
482		txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM,
483							cpu_2_hwthread_id[i]);
484
485		seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
486			      "Clocking:\t%lu.%1luMHz\n"
487			      "BogoMips:\t%lu.%02lu\n"
488			      "Calibration:\t%lu loops\n"
489			      "Capabilities:\t%s\n\n",
490			      cpu, major, minor, i,
491			      clockfreq / 1000000, (clockfreq / 100000) % 10,
492			      lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100,
493			      lpj,
494			      get_cpu_capabilities(txenable));
495	}
496#else
497	seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
498		   "Clocking:\t%lu.%1luMHz\n"
499		   "BogoMips:\t%lu.%02lu\n"
500		   "Calibration:\t%lu loops\n"
501		   "Capabilities:\t%s\n",
502		   cpu, major, minor, thread_id,
503		   clockfreq / 1000000, (clockfreq / 100000) % 10,
504		   loops_per_jiffy / (500000 / HZ),
505		   (loops_per_jiffy / (5000 / HZ)) % 100,
506		   loops_per_jiffy,
507		   get_cpu_capabilities(txenable));
508#endif /* CONFIG_SMP */
509
510#ifdef CONFIG_METAG_L2C
511	if (meta_l2c_is_present()) {
512		seq_printf(m, "L2 cache:\t%s\n"
513			      "L2 cache size:\t%d KB\n",
514			      meta_l2c_is_enabled() ? "enabled" : "disabled",
515			      meta_l2c_size() >> 10);
516	}
517#endif
518	return 0;
519}
520
521static void *c_start(struct seq_file *m, loff_t *pos)
522{
523	return (void *)(*pos == 0);
524}
525static void *c_next(struct seq_file *m, void *v, loff_t *pos)
526{
527	return NULL;
528}
529static void c_stop(struct seq_file *m, void *v)
530{
531}
532const struct seq_operations cpuinfo_op = {
533	.start = c_start,
534	.next  = c_next,
535	.stop  = c_stop,
536	.show  = show_cpuinfo,
537};
538#endif /* CONFIG_PROC_FS */
539
540void __init metag_start_kernel(char *args)
541{
542	/* Zero the timer register so timestamps are from the point at
543	 * which the kernel started running.
544	 */
545	__core_reg_set(TXTIMER, 0);
546
547	/* Clear the bss. */
548	memset(__bss_start, 0,
549	       (unsigned long)__bss_stop - (unsigned long)__bss_start);
550
551	/* Remember where these are for use in setup_arch */
552	original_cmd_line = args;
553
554	current_thread_info()->cpu = hard_processor_id();
555
556	start_kernel();
557}
558
559/**
560 * setup_priv() - Set up privilege protection registers.
561 *
562 * Set up privilege protection registers such as TXPRIVEXT to prevent userland
563 * from touching our precious registers and sensitive memory areas.
564 */
565void setup_priv(void)
566{
567	unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S;
568
569	__core_reg_set(TXPRIVEXT, PRIV_BITS);
570
571	metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset);
572	metag_out32(PIOREG_BITS,   T0PIOREG   + offset);
573	metag_out32(PSYREG_BITS,   T0PSYREG   + offset);
574}
575
576PTBI pTBI_get(unsigned int cpu)
577{
578	return per_cpu(pTBI, cpu);
579}
580EXPORT_SYMBOL(pTBI_get);
581
582#if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
583static char capabilities[] = "dsp fpu";
584#elif defined(CONFIG_METAG_DSP)
585static char capabilities[] = "dsp";
586#elif defined(CONFIG_METAG_FPU)
587static char capabilities[] = "fpu";
588#else
589static char capabilities[] = "";
590#endif
591
592static struct ctl_table caps_kern_table[] = {
593	{
594		.procname	= "capabilities",
595		.data		= capabilities,
596		.maxlen		= sizeof(capabilities),
597		.mode		= 0444,
598		.proc_handler	= proc_dostring,
599	},
600	{}
601};
602
603static struct ctl_table caps_root_table[] = {
604	{
605		.procname	= "kernel",
606		.mode		= 0555,
607		.child		= caps_kern_table,
608	},
609	{}
610};
611
612static int __init capabilities_register_sysctl(void)
613{
614	struct ctl_table_header *caps_table_header;
615
616	caps_table_header = register_sysctl_table(caps_root_table);
617	if (!caps_table_header) {
618		pr_err("Unable to register CAPABILITIES sysctl\n");
619		return -ENOMEM;
620	}
621
622	return 0;
623}
624
625core_initcall(capabilities_register_sysctl);
626