1/*
2 *  ARM64 Specific Low-Level ACPI Boot Support
3 *
4 *  Copyright (C) 2013-2014, Linaro Ltd.
5 *	Author: Al Stone <al.stone@linaro.org>
6 *	Author: Graeme Gregory <graeme.gregory@linaro.org>
7 *	Author: Hanjun Guo <hanjun.guo@linaro.org>
8 *	Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
9 *	Author: Naresh Bhat <naresh.bhat@linaro.org>
10 *
11 *  This program is free software; you can redistribute it and/or modify
12 *  it under the terms of the GNU General Public License version 2 as
13 *  published by the Free Software Foundation.
14 */
15
16#define pr_fmt(fmt) "ACPI: " fmt
17
18#include <linux/acpi.h>
19#include <linux/bootmem.h>
20#include <linux/cpumask.h>
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/irqdomain.h>
24#include <linux/memblock.h>
25#include <linux/of_fdt.h>
26#include <linux/smp.h>
27
28#include <asm/cputype.h>
29#include <asm/cpu_ops.h>
30#include <asm/smp_plat.h>
31
32int acpi_noirq = 1;		/* skip ACPI IRQ initialization */
33int acpi_disabled = 1;
34EXPORT_SYMBOL(acpi_disabled);
35
36int acpi_pci_disabled = 1;	/* skip ACPI PCI scan and IRQ initialization */
37EXPORT_SYMBOL(acpi_pci_disabled);
38
39/* Processors with enabled flag and sane MPIDR */
40static int enabled_cpus;
41
42/* Boot CPU is valid or not in MADT */
43static bool bootcpu_valid  __initdata;
44
45static bool param_acpi_off __initdata;
46static bool param_acpi_force __initdata;
47
48static int __init parse_acpi(char *arg)
49{
50	if (!arg)
51		return -EINVAL;
52
53	/* "acpi=off" disables both ACPI table parsing and interpreter */
54	if (strcmp(arg, "off") == 0)
55		param_acpi_off = true;
56	else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
57		param_acpi_force = true;
58	else
59		return -EINVAL;	/* Core will print when we return error */
60
61	return 0;
62}
63early_param("acpi", parse_acpi);
64
65static int __init dt_scan_depth1_nodes(unsigned long node,
66				       const char *uname, int depth,
67				       void *data)
68{
69	/*
70	 * Return 1 as soon as we encounter a node at depth 1 that is
71	 * not the /chosen node.
72	 */
73	if (depth == 1 && (strcmp(uname, "chosen") != 0))
74		return 1;
75	return 0;
76}
77
78/*
79 * __acpi_map_table() will be called before page_init(), so early_ioremap()
80 * or early_memremap() should be called here to for ACPI table mapping.
81 */
82char *__init __acpi_map_table(unsigned long phys, unsigned long size)
83{
84	if (!size)
85		return NULL;
86
87	return early_memremap(phys, size);
88}
89
90void __init __acpi_unmap_table(char *map, unsigned long size)
91{
92	if (!map || !size)
93		return;
94
95	early_memunmap(map, size);
96}
97
98/**
99 * acpi_map_gic_cpu_interface - generates a logical cpu number
100 * and map to MPIDR represented by GICC structure
101 */
102static void __init
103acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
104{
105	int i;
106	u64 mpidr = processor->arm_mpidr & MPIDR_HWID_BITMASK;
107	bool enabled = !!(processor->flags & ACPI_MADT_ENABLED);
108
109	if (mpidr == INVALID_HWID) {
110		pr_info("Skip MADT cpu entry with invalid MPIDR\n");
111		return;
112	}
113
114	total_cpus++;
115	if (!enabled)
116		return;
117
118	if (enabled_cpus >=  NR_CPUS) {
119		pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
120			NR_CPUS, total_cpus, mpidr);
121		return;
122	}
123
124	/* Check if GICC structure of boot CPU is available in the MADT */
125	if (cpu_logical_map(0) == mpidr) {
126		if (bootcpu_valid) {
127			pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
128			       mpidr);
129			return;
130		}
131
132		bootcpu_valid = true;
133	}
134
135	/*
136	 * Duplicate MPIDRs are a recipe for disaster. Scan
137	 * all initialized entries and check for
138	 * duplicates. If any is found just ignore the CPU.
139	 */
140	for (i = 1; i < enabled_cpus; i++) {
141		if (cpu_logical_map(i) == mpidr) {
142			pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
143			       mpidr);
144			return;
145		}
146	}
147
148	if (!acpi_psci_present())
149		return;
150
151	cpu_ops[enabled_cpus] = cpu_get_ops("psci");
152	/* CPU 0 was already initialized */
153	if (enabled_cpus) {
154		if (!cpu_ops[enabled_cpus])
155			return;
156
157		if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus))
158			return;
159
160		/* map the logical cpu id to cpu MPIDR */
161		cpu_logical_map(enabled_cpus) = mpidr;
162	}
163
164	enabled_cpus++;
165}
166
167static int __init
168acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
169				const unsigned long end)
170{
171	struct acpi_madt_generic_interrupt *processor;
172
173	processor = (struct acpi_madt_generic_interrupt *)header;
174
175	if (BAD_MADT_ENTRY(processor, end))
176		return -EINVAL;
177
178	acpi_table_print_madt_entry(header);
179	acpi_map_gic_cpu_interface(processor);
180	return 0;
181}
182
183/* Parse GIC cpu interface entries in MADT for SMP init */
184void __init acpi_init_cpus(void)
185{
186	int count, i;
187
188	/*
189	 * do a partial walk of MADT to determine how many CPUs
190	 * we have including disabled CPUs, and get information
191	 * we need for SMP init
192	 */
193	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
194			acpi_parse_gic_cpu_interface, 0);
195
196	if (!count) {
197		pr_err("No GIC CPU interface entries present\n");
198		return;
199	} else if (count < 0) {
200		pr_err("Error parsing GIC CPU interface entry\n");
201		return;
202	}
203
204	if (!bootcpu_valid) {
205		pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n");
206		return;
207	}
208
209	for (i = 0; i < enabled_cpus; i++)
210		set_cpu_possible(i, true);
211
212	/* Make boot-up look pretty */
213	pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
214}
215
216/*
217 * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
218 *			      checks on it
219 *
220 * Return 0 on success,  <0 on failure
221 */
222static int __init acpi_fadt_sanity_check(void)
223{
224	struct acpi_table_header *table;
225	struct acpi_table_fadt *fadt;
226	acpi_status status;
227	acpi_size tbl_size;
228	int ret = 0;
229
230	/*
231	 * FADT is required on arm64; retrieve it to check its presence
232	 * and carry out revision and ACPI HW reduced compliancy tests
233	 */
234	status = acpi_get_table_with_size(ACPI_SIG_FADT, 0, &table, &tbl_size);
235	if (ACPI_FAILURE(status)) {
236		const char *msg = acpi_format_exception(status);
237
238		pr_err("Failed to get FADT table, %s\n", msg);
239		return -ENODEV;
240	}
241
242	fadt = (struct acpi_table_fadt *)table;
243
244	/*
245	 * Revision in table header is the FADT Major revision, and there
246	 * is a minor revision of FADT which was introduced by ACPI 5.1,
247	 * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
248	 * boot protocol configuration data.
249	 */
250	if (table->revision < 5 ||
251	   (table->revision == 5 && fadt->minor_revision < 1)) {
252		pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
253		       table->revision, fadt->minor_revision);
254		ret = -EINVAL;
255		goto out;
256	}
257
258	if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
259		pr_err("FADT not ACPI hardware reduced compliant\n");
260		ret = -EINVAL;
261	}
262
263out:
264	/*
265	 * acpi_get_table_with_size() creates FADT table mapping that
266	 * should be released after parsing and before resuming boot
267	 */
268	early_acpi_os_unmap_memory(table, tbl_size);
269	return ret;
270}
271
272/*
273 * acpi_boot_table_init() called from setup_arch(), always.
274 *	1. find RSDP and get its address, and then find XSDT
275 *	2. extract all tables and checksums them all
276 *	3. check ACPI FADT revision
277 *	4. check ACPI FADT HW reduced flag
278 *
279 * We can parse ACPI boot-time tables such as MADT after
280 * this function is called.
281 *
282 * On return ACPI is enabled if either:
283 *
284 * - ACPI tables are initialized and sanity checks passed
285 * - acpi=force was passed in the command line and ACPI was not disabled
286 *   explicitly through acpi=off command line parameter
287 *
288 * ACPI is disabled on function return otherwise
289 */
290void __init acpi_boot_table_init(void)
291{
292	/*
293	 * Enable ACPI instead of device tree unless
294	 * - ACPI has been disabled explicitly (acpi=off), or
295	 * - the device tree is not empty (it has more than just a /chosen node)
296	 *   and ACPI has not been force enabled (acpi=force)
297	 */
298	if (param_acpi_off ||
299	    (!param_acpi_force && of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
300		return;
301
302	/*
303	 * ACPI is disabled at this point. Enable it in order to parse
304	 * the ACPI tables and carry out sanity checks
305	 */
306	enable_acpi();
307
308	/*
309	 * If ACPI tables are initialized and FADT sanity checks passed,
310	 * leave ACPI enabled and carry on booting; otherwise disable ACPI
311	 * on initialization error.
312	 * If acpi=force was passed on the command line it forces ACPI
313	 * to be enabled even if its initialization failed.
314	 */
315	if (acpi_table_init() || acpi_fadt_sanity_check()) {
316		pr_err("Failed to init ACPI tables\n");
317		if (!param_acpi_force)
318			disable_acpi();
319	}
320}
321
322void __init acpi_gic_init(void)
323{
324	struct acpi_table_header *table;
325	acpi_status status;
326	acpi_size tbl_size;
327	int err;
328
329	if (acpi_disabled)
330		return;
331
332	status = acpi_get_table_with_size(ACPI_SIG_MADT, 0, &table, &tbl_size);
333	if (ACPI_FAILURE(status)) {
334		const char *msg = acpi_format_exception(status);
335
336		pr_err("Failed to get MADT table, %s\n", msg);
337		return;
338	}
339
340	err = gic_v2_acpi_init(table);
341	if (err)
342		pr_err("Failed to initialize GIC IRQ controller");
343
344	early_acpi_os_unmap_memory((char *)table, tbl_size);
345}
346