1/*
2 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
3 * infrastructure
4 *
5 * APEI allows to report errors (for example from the chipset) to the
6 * the operating system. This improves NMI handling especially. In
7 * addition it supports error serialization and error injection.
8 *
9 * For more information about APEI, please refer to ACPI Specification
10 * version 4.0, chapter 17.
11 *
12 * This file has Common functions used by more than one APEI table,
13 * including framework of interpreter for ERST and EINJ; resource
14 * management for APEI registers.
15 *
16 * Copyright (C) 2009, Intel Corp.
17 *	Author: Huang Ying <ying.huang@intel.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License version
21 * 2 as published by the Free Software Foundation.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26 * GNU General Public License for more details.
27 */
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/acpi.h>
33#include <linux/slab.h>
34#include <linux/io.h>
35#include <linux/kref.h>
36#include <linux/rculist.h>
37#include <linux/interrupt.h>
38#include <linux/debugfs.h>
39#include <asm/unaligned.h>
40
41#include "apei-internal.h"
42
43#define APEI_PFX "APEI: "
44
45/*
46 * APEI ERST (Error Record Serialization Table) and EINJ (Error
47 * INJection) interpreter framework.
48 */
49
50#define APEI_EXEC_PRESERVE_REGISTER	0x1
51
52void apei_exec_ctx_init(struct apei_exec_context *ctx,
53			struct apei_exec_ins_type *ins_table,
54			u32 instructions,
55			struct acpi_whea_header *action_table,
56			u32 entries)
57{
58	ctx->ins_table = ins_table;
59	ctx->instructions = instructions;
60	ctx->action_table = action_table;
61	ctx->entries = entries;
62}
63EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
64
65int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
66{
67	int rc;
68
69	rc = apei_read(val, &entry->register_region);
70	if (rc)
71		return rc;
72	*val >>= entry->register_region.bit_offset;
73	*val &= entry->mask;
74
75	return 0;
76}
77
78int apei_exec_read_register(struct apei_exec_context *ctx,
79			    struct acpi_whea_header *entry)
80{
81	int rc;
82	u64 val = 0;
83
84	rc = __apei_exec_read_register(entry, &val);
85	if (rc)
86		return rc;
87	ctx->value = val;
88
89	return 0;
90}
91EXPORT_SYMBOL_GPL(apei_exec_read_register);
92
93int apei_exec_read_register_value(struct apei_exec_context *ctx,
94				  struct acpi_whea_header *entry)
95{
96	int rc;
97
98	rc = apei_exec_read_register(ctx, entry);
99	if (rc)
100		return rc;
101	ctx->value = (ctx->value == entry->value);
102
103	return 0;
104}
105EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
106
107int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
108{
109	int rc;
110
111	val &= entry->mask;
112	val <<= entry->register_region.bit_offset;
113	if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
114		u64 valr = 0;
115		rc = apei_read(&valr, &entry->register_region);
116		if (rc)
117			return rc;
118		valr &= ~(entry->mask << entry->register_region.bit_offset);
119		val |= valr;
120	}
121	rc = apei_write(val, &entry->register_region);
122
123	return rc;
124}
125
126int apei_exec_write_register(struct apei_exec_context *ctx,
127			     struct acpi_whea_header *entry)
128{
129	return __apei_exec_write_register(entry, ctx->value);
130}
131EXPORT_SYMBOL_GPL(apei_exec_write_register);
132
133int apei_exec_write_register_value(struct apei_exec_context *ctx,
134				   struct acpi_whea_header *entry)
135{
136	int rc;
137
138	ctx->value = entry->value;
139	rc = apei_exec_write_register(ctx, entry);
140
141	return rc;
142}
143EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
144
145int apei_exec_noop(struct apei_exec_context *ctx,
146		   struct acpi_whea_header *entry)
147{
148	return 0;
149}
150EXPORT_SYMBOL_GPL(apei_exec_noop);
151
152/*
153 * Interpret the specified action. Go through whole action table,
154 * execute all instructions belong to the action.
155 */
156int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
157		    bool optional)
158{
159	int rc = -ENOENT;
160	u32 i, ip;
161	struct acpi_whea_header *entry;
162	apei_exec_ins_func_t run;
163
164	ctx->ip = 0;
165
166	/*
167	 * "ip" is the instruction pointer of current instruction,
168	 * "ctx->ip" specifies the next instruction to executed,
169	 * instruction "run" function may change the "ctx->ip" to
170	 * implement "goto" semantics.
171	 */
172rewind:
173	ip = 0;
174	for (i = 0; i < ctx->entries; i++) {
175		entry = &ctx->action_table[i];
176		if (entry->action != action)
177			continue;
178		if (ip == ctx->ip) {
179			if (entry->instruction >= ctx->instructions ||
180			    !ctx->ins_table[entry->instruction].run) {
181				pr_warning(FW_WARN APEI_PFX
182			"Invalid action table, unknown instruction type: %d\n",
183					   entry->instruction);
184				return -EINVAL;
185			}
186			run = ctx->ins_table[entry->instruction].run;
187			rc = run(ctx, entry);
188			if (rc < 0)
189				return rc;
190			else if (rc != APEI_EXEC_SET_IP)
191				ctx->ip++;
192		}
193		ip++;
194		if (ctx->ip < ip)
195			goto rewind;
196	}
197
198	return !optional && rc < 0 ? rc : 0;
199}
200EXPORT_SYMBOL_GPL(__apei_exec_run);
201
202typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
203				      struct acpi_whea_header *entry,
204				      void *data);
205
206static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
207				    apei_exec_entry_func_t func,
208				    void *data,
209				    int *end)
210{
211	u8 ins;
212	int i, rc;
213	struct acpi_whea_header *entry;
214	struct apei_exec_ins_type *ins_table = ctx->ins_table;
215
216	for (i = 0; i < ctx->entries; i++) {
217		entry = ctx->action_table + i;
218		ins = entry->instruction;
219		if (end)
220			*end = i;
221		if (ins >= ctx->instructions || !ins_table[ins].run) {
222			pr_warning(FW_WARN APEI_PFX
223			"Invalid action table, unknown instruction type: %d\n",
224				   ins);
225			return -EINVAL;
226		}
227		rc = func(ctx, entry, data);
228		if (rc)
229			return rc;
230	}
231
232	return 0;
233}
234
235static int pre_map_gar_callback(struct apei_exec_context *ctx,
236				struct acpi_whea_header *entry,
237				void *data)
238{
239	u8 ins = entry->instruction;
240
241	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
242		return apei_map_generic_address(&entry->register_region);
243
244	return 0;
245}
246
247/*
248 * Pre-map all GARs in action table to make it possible to access them
249 * in NMI handler.
250 */
251int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
252{
253	int rc, end;
254
255	rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
256				      NULL, &end);
257	if (rc) {
258		struct apei_exec_context ctx_unmap;
259		memcpy(&ctx_unmap, ctx, sizeof(*ctx));
260		ctx_unmap.entries = end;
261		apei_exec_post_unmap_gars(&ctx_unmap);
262	}
263
264	return rc;
265}
266EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
267
268static int post_unmap_gar_callback(struct apei_exec_context *ctx,
269				   struct acpi_whea_header *entry,
270				   void *data)
271{
272	u8 ins = entry->instruction;
273
274	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
275		apei_unmap_generic_address(&entry->register_region);
276
277	return 0;
278}
279
280/* Post-unmap all GAR in action table. */
281int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
282{
283	return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
284					NULL, NULL);
285}
286EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
287
288/*
289 * Resource management for GARs in APEI
290 */
291struct apei_res {
292	struct list_head list;
293	unsigned long start;
294	unsigned long end;
295};
296
297/* Collect all resources requested, to avoid conflict */
298struct apei_resources apei_resources_all = {
299	.iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
300	.ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
301};
302
303static int apei_res_add(struct list_head *res_list,
304			unsigned long start, unsigned long size)
305{
306	struct apei_res *res, *resn, *res_ins = NULL;
307	unsigned long end = start + size;
308
309	if (end <= start)
310		return 0;
311repeat:
312	list_for_each_entry_safe(res, resn, res_list, list) {
313		if (res->start > end || res->end < start)
314			continue;
315		else if (end <= res->end && start >= res->start) {
316			kfree(res_ins);
317			return 0;
318		}
319		list_del(&res->list);
320		res->start = start = min(res->start, start);
321		res->end = end = max(res->end, end);
322		kfree(res_ins);
323		res_ins = res;
324		goto repeat;
325	}
326
327	if (res_ins)
328		list_add(&res_ins->list, res_list);
329	else {
330		res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
331		if (!res_ins)
332			return -ENOMEM;
333		res_ins->start = start;
334		res_ins->end = end;
335		list_add(&res_ins->list, res_list);
336	}
337
338	return 0;
339}
340
341static int apei_res_sub(struct list_head *res_list1,
342			struct list_head *res_list2)
343{
344	struct apei_res *res1, *resn1, *res2, *res;
345	res1 = list_entry(res_list1->next, struct apei_res, list);
346	resn1 = list_entry(res1->list.next, struct apei_res, list);
347	while (&res1->list != res_list1) {
348		list_for_each_entry(res2, res_list2, list) {
349			if (res1->start >= res2->end ||
350			    res1->end <= res2->start)
351				continue;
352			else if (res1->end <= res2->end &&
353				 res1->start >= res2->start) {
354				list_del(&res1->list);
355				kfree(res1);
356				break;
357			} else if (res1->end > res2->end &&
358				   res1->start < res2->start) {
359				res = kmalloc(sizeof(*res), GFP_KERNEL);
360				if (!res)
361					return -ENOMEM;
362				res->start = res2->end;
363				res->end = res1->end;
364				res1->end = res2->start;
365				list_add(&res->list, &res1->list);
366				resn1 = res;
367			} else {
368				if (res1->start < res2->start)
369					res1->end = res2->start;
370				else
371					res1->start = res2->end;
372			}
373		}
374		res1 = resn1;
375		resn1 = list_entry(resn1->list.next, struct apei_res, list);
376	}
377
378	return 0;
379}
380
381static void apei_res_clean(struct list_head *res_list)
382{
383	struct apei_res *res, *resn;
384
385	list_for_each_entry_safe(res, resn, res_list, list) {
386		list_del(&res->list);
387		kfree(res);
388	}
389}
390
391void apei_resources_fini(struct apei_resources *resources)
392{
393	apei_res_clean(&resources->iomem);
394	apei_res_clean(&resources->ioport);
395}
396EXPORT_SYMBOL_GPL(apei_resources_fini);
397
398static int apei_resources_merge(struct apei_resources *resources1,
399				struct apei_resources *resources2)
400{
401	int rc;
402	struct apei_res *res;
403
404	list_for_each_entry(res, &resources2->iomem, list) {
405		rc = apei_res_add(&resources1->iomem, res->start,
406				  res->end - res->start);
407		if (rc)
408			return rc;
409	}
410	list_for_each_entry(res, &resources2->ioport, list) {
411		rc = apei_res_add(&resources1->ioport, res->start,
412				  res->end - res->start);
413		if (rc)
414			return rc;
415	}
416
417	return 0;
418}
419
420int apei_resources_add(struct apei_resources *resources,
421		       unsigned long start, unsigned long size,
422		       bool iomem)
423{
424	if (iomem)
425		return apei_res_add(&resources->iomem, start, size);
426	else
427		return apei_res_add(&resources->ioport, start, size);
428}
429EXPORT_SYMBOL_GPL(apei_resources_add);
430
431/*
432 * EINJ has two groups of GARs (EINJ table entry and trigger table
433 * entry), so common resources are subtracted from the trigger table
434 * resources before the second requesting.
435 */
436int apei_resources_sub(struct apei_resources *resources1,
437		       struct apei_resources *resources2)
438{
439	int rc;
440
441	rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
442	if (rc)
443		return rc;
444	return apei_res_sub(&resources1->ioport, &resources2->ioport);
445}
446EXPORT_SYMBOL_GPL(apei_resources_sub);
447
448static int apei_get_res_callback(__u64 start, __u64 size, void *data)
449{
450	struct apei_resources *resources = data;
451	return apei_res_add(&resources->iomem, start, size);
452}
453
454static int apei_get_nvs_resources(struct apei_resources *resources)
455{
456	return acpi_nvs_for_each_region(apei_get_res_callback, resources);
457}
458
459int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
460				     void *data), void *data);
461static int apei_get_arch_resources(struct apei_resources *resources)
462
463{
464	return arch_apei_filter_addr(apei_get_res_callback, resources);
465}
466
467/*
468 * IO memory/port resource management mechanism is used to check
469 * whether memory/port area used by GARs conflicts with normal memory
470 * or IO memory/port of devices.
471 */
472int apei_resources_request(struct apei_resources *resources,
473			   const char *desc)
474{
475	struct apei_res *res, *res_bak = NULL;
476	struct resource *r;
477	struct apei_resources nvs_resources, arch_res;
478	int rc;
479
480	rc = apei_resources_sub(resources, &apei_resources_all);
481	if (rc)
482		return rc;
483
484	/*
485	 * Some firmware uses ACPI NVS region, that has been marked as
486	 * busy, so exclude it from APEI resources to avoid false
487	 * conflict.
488	 */
489	apei_resources_init(&nvs_resources);
490	rc = apei_get_nvs_resources(&nvs_resources);
491	if (rc)
492		goto nvs_res_fini;
493	rc = apei_resources_sub(resources, &nvs_resources);
494	if (rc)
495		goto nvs_res_fini;
496
497	if (arch_apei_filter_addr) {
498		apei_resources_init(&arch_res);
499		rc = apei_get_arch_resources(&arch_res);
500		if (rc)
501			goto arch_res_fini;
502		rc = apei_resources_sub(resources, &arch_res);
503		if (rc)
504			goto arch_res_fini;
505	}
506
507	rc = -EINVAL;
508	list_for_each_entry(res, &resources->iomem, list) {
509		r = request_mem_region(res->start, res->end - res->start,
510				       desc);
511		if (!r) {
512			pr_err(APEI_PFX
513		"Can not request [mem %#010llx-%#010llx] for %s registers\n",
514			       (unsigned long long)res->start,
515			       (unsigned long long)res->end - 1, desc);
516			res_bak = res;
517			goto err_unmap_iomem;
518		}
519	}
520
521	list_for_each_entry(res, &resources->ioport, list) {
522		r = request_region(res->start, res->end - res->start, desc);
523		if (!r) {
524			pr_err(APEI_PFX
525		"Can not request [io  %#06llx-%#06llx] for %s registers\n",
526			       (unsigned long long)res->start,
527			       (unsigned long long)res->end - 1, desc);
528			res_bak = res;
529			goto err_unmap_ioport;
530		}
531	}
532
533	rc = apei_resources_merge(&apei_resources_all, resources);
534	if (rc) {
535		pr_err(APEI_PFX "Fail to merge resources!\n");
536		goto err_unmap_ioport;
537	}
538
539	return 0;
540err_unmap_ioport:
541	list_for_each_entry(res, &resources->ioport, list) {
542		if (res == res_bak)
543			break;
544		release_region(res->start, res->end - res->start);
545	}
546	res_bak = NULL;
547err_unmap_iomem:
548	list_for_each_entry(res, &resources->iomem, list) {
549		if (res == res_bak)
550			break;
551		release_mem_region(res->start, res->end - res->start);
552	}
553arch_res_fini:
554	apei_resources_fini(&arch_res);
555nvs_res_fini:
556	apei_resources_fini(&nvs_resources);
557	return rc;
558}
559EXPORT_SYMBOL_GPL(apei_resources_request);
560
561void apei_resources_release(struct apei_resources *resources)
562{
563	int rc;
564	struct apei_res *res;
565
566	list_for_each_entry(res, &resources->iomem, list)
567		release_mem_region(res->start, res->end - res->start);
568	list_for_each_entry(res, &resources->ioport, list)
569		release_region(res->start, res->end - res->start);
570
571	rc = apei_resources_sub(&apei_resources_all, resources);
572	if (rc)
573		pr_err(APEI_PFX "Fail to sub resources!\n");
574}
575EXPORT_SYMBOL_GPL(apei_resources_release);
576
577static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
578				u32 *access_bit_width)
579{
580	u32 bit_width, bit_offset, access_size_code, space_id;
581
582	bit_width = reg->bit_width;
583	bit_offset = reg->bit_offset;
584	access_size_code = reg->access_width;
585	space_id = reg->space_id;
586	*paddr = get_unaligned(&reg->address);
587	if (!*paddr) {
588		pr_warning(FW_BUG APEI_PFX
589			   "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
590			   *paddr, bit_width, bit_offset, access_size_code,
591			   space_id);
592		return -EINVAL;
593	}
594
595	if (access_size_code < 1 || access_size_code > 4) {
596		pr_warning(FW_BUG APEI_PFX
597			   "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
598			   *paddr, bit_width, bit_offset, access_size_code,
599			   space_id);
600		return -EINVAL;
601	}
602	*access_bit_width = 1UL << (access_size_code + 2);
603
604	/* Fixup common BIOS bug */
605	if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
606	    *access_bit_width < 32)
607		*access_bit_width = 32;
608	else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
609	    *access_bit_width < 64)
610		*access_bit_width = 64;
611
612	if ((bit_width + bit_offset) > *access_bit_width) {
613		pr_warning(FW_BUG APEI_PFX
614			   "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
615			   *paddr, bit_width, bit_offset, access_size_code,
616			   space_id);
617		return -EINVAL;
618	}
619
620	if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
621	    space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
622		pr_warning(FW_BUG APEI_PFX
623			   "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
624			   *paddr, bit_width, bit_offset, access_size_code,
625			   space_id);
626		return -EINVAL;
627	}
628
629	return 0;
630}
631
632int apei_map_generic_address(struct acpi_generic_address *reg)
633{
634	int rc;
635	u32 access_bit_width;
636	u64 address;
637
638	rc = apei_check_gar(reg, &address, &access_bit_width);
639	if (rc)
640		return rc;
641	return acpi_os_map_generic_address(reg);
642}
643EXPORT_SYMBOL_GPL(apei_map_generic_address);
644
645/* read GAR in interrupt (including NMI) or process context */
646int apei_read(u64 *val, struct acpi_generic_address *reg)
647{
648	int rc;
649	u32 access_bit_width;
650	u64 address;
651	acpi_status status;
652
653	rc = apei_check_gar(reg, &address, &access_bit_width);
654	if (rc)
655		return rc;
656
657	*val = 0;
658	switch(reg->space_id) {
659	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
660		status = acpi_os_read_memory((acpi_physical_address) address,
661					       val, access_bit_width);
662		if (ACPI_FAILURE(status))
663			return -EIO;
664		break;
665	case ACPI_ADR_SPACE_SYSTEM_IO:
666		status = acpi_os_read_port(address, (u32 *)val,
667					   access_bit_width);
668		if (ACPI_FAILURE(status))
669			return -EIO;
670		break;
671	default:
672		return -EINVAL;
673	}
674
675	return 0;
676}
677EXPORT_SYMBOL_GPL(apei_read);
678
679/* write GAR in interrupt (including NMI) or process context */
680int apei_write(u64 val, struct acpi_generic_address *reg)
681{
682	int rc;
683	u32 access_bit_width;
684	u64 address;
685	acpi_status status;
686
687	rc = apei_check_gar(reg, &address, &access_bit_width);
688	if (rc)
689		return rc;
690
691	switch (reg->space_id) {
692	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
693		status = acpi_os_write_memory((acpi_physical_address) address,
694						val, access_bit_width);
695		if (ACPI_FAILURE(status))
696			return -EIO;
697		break;
698	case ACPI_ADR_SPACE_SYSTEM_IO:
699		status = acpi_os_write_port(address, val, access_bit_width);
700		if (ACPI_FAILURE(status))
701			return -EIO;
702		break;
703	default:
704		return -EINVAL;
705	}
706
707	return 0;
708}
709EXPORT_SYMBOL_GPL(apei_write);
710
711static int collect_res_callback(struct apei_exec_context *ctx,
712				struct acpi_whea_header *entry,
713				void *data)
714{
715	struct apei_resources *resources = data;
716	struct acpi_generic_address *reg = &entry->register_region;
717	u8 ins = entry->instruction;
718	u32 access_bit_width;
719	u64 paddr;
720	int rc;
721
722	if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
723		return 0;
724
725	rc = apei_check_gar(reg, &paddr, &access_bit_width);
726	if (rc)
727		return rc;
728
729	switch (reg->space_id) {
730	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
731		return apei_res_add(&resources->iomem, paddr,
732				    access_bit_width / 8);
733	case ACPI_ADR_SPACE_SYSTEM_IO:
734		return apei_res_add(&resources->ioport, paddr,
735				    access_bit_width / 8);
736	default:
737		return -EINVAL;
738	}
739}
740
741/*
742 * Same register may be used by multiple instructions in GARs, so
743 * resources are collected before requesting.
744 */
745int apei_exec_collect_resources(struct apei_exec_context *ctx,
746				struct apei_resources *resources)
747{
748	return apei_exec_for_each_entry(ctx, collect_res_callback,
749					resources, NULL);
750}
751EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
752
753struct dentry *apei_get_debugfs_dir(void)
754{
755	static struct dentry *dapei;
756
757	if (!dapei)
758		dapei = debugfs_create_dir("apei", NULL);
759
760	return dapei;
761}
762EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
763
764int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr,
765				  void *data)
766{
767	return 1;
768}
769EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff);
770
771void __weak arch_apei_report_mem_error(int sev,
772				       struct cper_sec_mem_err *mem_err)
773{
774}
775EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
776
777int apei_osc_setup(void)
778{
779	static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
780	acpi_handle handle;
781	u32 capbuf[3];
782	struct acpi_osc_context context = {
783		.uuid_str	= whea_uuid_str,
784		.rev		= 1,
785		.cap.length	= sizeof(capbuf),
786		.cap.pointer	= capbuf,
787	};
788
789	capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
790	capbuf[OSC_SUPPORT_DWORD] = 1;
791	capbuf[OSC_CONTROL_DWORD] = 0;
792
793	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
794	    || ACPI_FAILURE(acpi_run_osc(handle, &context)))
795		return -EIO;
796	else {
797		kfree(context.ret.pointer);
798		return 0;
799	}
800}
801EXPORT_SYMBOL_GPL(apei_osc_setup);
802