1
2/*
3 * edac_device.c
4 * (C) 2007 www.douglaskthompson.com
5 *
6 * This file may be distributed under the terms of the
7 * GNU General Public License.
8 *
9 * Written by Doug Thompson <norsk5@xmission.com>
10 *
11 * edac_device API implementation
12 * 19 Jan 2007
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/smp.h>
18#include <linux/init.h>
19#include <linux/sysctl.h>
20#include <linux/highmem.h>
21#include <linux/timer.h>
22#include <linux/slab.h>
23#include <linux/jiffies.h>
24#include <linux/spinlock.h>
25#include <linux/list.h>
26#include <linux/ctype.h>
27#include <linux/workqueue.h>
28#include <asm/uaccess.h>
29#include <asm/page.h>
30
31#include "edac_core.h"
32#include "edac_module.h"
33
34/* lock for the list: 'edac_device_list', manipulation of this list
35 * is protected by the 'device_ctls_mutex' lock
36 */
37static DEFINE_MUTEX(device_ctls_mutex);
38static LIST_HEAD(edac_device_list);
39
40#ifdef CONFIG_EDAC_DEBUG
41static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
42{
43	edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
44		 edac_dev, edac_dev->dev_idx);
45	edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
46	edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
47	edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
48		 edac_dev->mod_name, edac_dev->ctl_name);
49	edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
50}
51#endif				/* CONFIG_EDAC_DEBUG */
52
53
54/*
55 * edac_device_alloc_ctl_info()
56 *	Allocate a new edac device control info structure
57 *
58 *	The control structure is allocated in complete chunk
59 *	from the OS. It is in turn sub allocated to the
60 *	various objects that compose the structure
61 *
62 *	The structure has a 'nr_instance' array within itself.
63 *	Each instance represents a major component
64 *		Example:  L1 cache and L2 cache are 2 instance components
65 *
66 *	Within each instance is an array of 'nr_blocks' blockoffsets
67 */
68struct edac_device_ctl_info *edac_device_alloc_ctl_info(
69	unsigned sz_private,
70	char *edac_device_name, unsigned nr_instances,
71	char *edac_block_name, unsigned nr_blocks,
72	unsigned offset_value,		/* zero, 1, or other based offset */
73	struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
74	int device_index)
75{
76	struct edac_device_ctl_info *dev_ctl;
77	struct edac_device_instance *dev_inst, *inst;
78	struct edac_device_block *dev_blk, *blk_p, *blk;
79	struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
80	unsigned total_size;
81	unsigned count;
82	unsigned instance, block, attr;
83	void *pvt, *p;
84	int err;
85
86	edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
87
88	/* Calculate the size of memory we need to allocate AND
89	 * determine the offsets of the various item arrays
90	 * (instance,block,attrib) from the start of an  allocated structure.
91	 * We want the alignment of each item  (instance,block,attrib)
92	 * to be at least as stringent as what the compiler would
93	 * provide if we could simply hardcode everything into a single struct.
94	 */
95	p = NULL;
96	dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
97
98	/* Calc the 'end' offset past end of ONE ctl_info structure
99	 * which will become the start of the 'instance' array
100	 */
101	dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
102
103	/* Calc the 'end' offset past the instance array within the ctl_info
104	 * which will become the start of the block array
105	 */
106	count = nr_instances * nr_blocks;
107	dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
108
109	/* Calc the 'end' offset past the dev_blk array
110	 * which will become the start of the attrib array, if any.
111	 */
112	/* calc how many nr_attrib we need */
113	if (nr_attrib > 0)
114		count *= nr_attrib;
115	dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
116
117	/* Calc the 'end' offset past the attributes array */
118	pvt = edac_align_ptr(&p, sz_private, 1);
119
120	/* 'pvt' now points to where the private data area is.
121	 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
122	 * is baselined at ZERO
123	 */
124	total_size = ((unsigned long)pvt) + sz_private;
125
126	/* Allocate the amount of memory for the set of control structures */
127	dev_ctl = kzalloc(total_size, GFP_KERNEL);
128	if (dev_ctl == NULL)
129		return NULL;
130
131	/* Adjust pointers so they point within the actual memory we
132	 * just allocated rather than an imaginary chunk of memory
133	 * located at address 0.
134	 * 'dev_ctl' points to REAL memory, while the others are
135	 * ZERO based and thus need to be adjusted to point within
136	 * the allocated memory.
137	 */
138	dev_inst = (struct edac_device_instance *)
139		(((char *)dev_ctl) + ((unsigned long)dev_inst));
140	dev_blk = (struct edac_device_block *)
141		(((char *)dev_ctl) + ((unsigned long)dev_blk));
142	dev_attrib = (struct edac_dev_sysfs_block_attribute *)
143		(((char *)dev_ctl) + ((unsigned long)dev_attrib));
144	pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
145
146	/* Begin storing the information into the control info structure */
147	dev_ctl->dev_idx = device_index;
148	dev_ctl->nr_instances = nr_instances;
149	dev_ctl->instances = dev_inst;
150	dev_ctl->pvt_info = pvt;
151
152	/* Default logging of CEs and UEs */
153	dev_ctl->log_ce = 1;
154	dev_ctl->log_ue = 1;
155
156	/* Name of this edac device */
157	snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
158
159	edac_dbg(4, "edac_dev=%p next after end=%p\n",
160		 dev_ctl, pvt + sz_private);
161
162	/* Initialize every Instance */
163	for (instance = 0; instance < nr_instances; instance++) {
164		inst = &dev_inst[instance];
165		inst->ctl = dev_ctl;
166		inst->nr_blocks = nr_blocks;
167		blk_p = &dev_blk[instance * nr_blocks];
168		inst->blocks = blk_p;
169
170		/* name of this instance */
171		snprintf(inst->name, sizeof(inst->name),
172			 "%s%u", edac_device_name, instance);
173
174		/* Initialize every block in each instance */
175		for (block = 0; block < nr_blocks; block++) {
176			blk = &blk_p[block];
177			blk->instance = inst;
178			snprintf(blk->name, sizeof(blk->name),
179				 "%s%d", edac_block_name, block+offset_value);
180
181			edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
182				 instance, inst, block, blk, blk->name);
183
184			/* if there are NO attributes OR no attribute pointer
185			 * then continue on to next block iteration
186			 */
187			if ((nr_attrib == 0) || (attrib_spec == NULL))
188				continue;
189
190			/* setup the attribute array for this block */
191			blk->nr_attribs = nr_attrib;
192			attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
193			blk->block_attributes = attrib_p;
194
195			edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
196				 blk->block_attributes);
197
198			/* Initialize every user specified attribute in this
199			 * block with the data the caller passed in
200			 * Each block gets its own copy of pointers,
201			 * and its unique 'value'
202			 */
203			for (attr = 0; attr < nr_attrib; attr++) {
204				attrib = &attrib_p[attr];
205
206				/* populate the unique per attrib
207				 * with the code pointers and info
208				 */
209				attrib->attr = attrib_spec[attr].attr;
210				attrib->show = attrib_spec[attr].show;
211				attrib->store = attrib_spec[attr].store;
212
213				attrib->block = blk;	/* up link */
214
215				edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
216					 attrib, attrib->attr.name,
217					 &attrib_spec[attr],
218					 attrib_spec[attr].attr.name
219					);
220			}
221		}
222	}
223
224	/* Mark this instance as merely ALLOCATED */
225	dev_ctl->op_state = OP_ALLOC;
226
227	/*
228	 * Initialize the 'root' kobj for the edac_device controller
229	 */
230	err = edac_device_register_sysfs_main_kobj(dev_ctl);
231	if (err) {
232		kfree(dev_ctl);
233		return NULL;
234	}
235
236	/* at this point, the root kobj is valid, and in order to
237	 * 'free' the object, then the function:
238	 *	edac_device_unregister_sysfs_main_kobj() must be called
239	 * which will perform kobj unregistration and the actual free
240	 * will occur during the kobject callback operation
241	 */
242
243	return dev_ctl;
244}
245EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
246
247/*
248 * edac_device_free_ctl_info()
249 *	frees the memory allocated by the edac_device_alloc_ctl_info()
250 *	function
251 */
252void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
253{
254	edac_device_unregister_sysfs_main_kobj(ctl_info);
255}
256EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
257
258/*
259 * find_edac_device_by_dev
260 *	scans the edac_device list for a specific 'struct device *'
261 *
262 *	lock to be held prior to call:	device_ctls_mutex
263 *
264 *	Return:
265 *		pointer to control structure managing 'dev'
266 *		NULL if not found on list
267 */
268static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
269{
270	struct edac_device_ctl_info *edac_dev;
271	struct list_head *item;
272
273	edac_dbg(0, "\n");
274
275	list_for_each(item, &edac_device_list) {
276		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
277
278		if (edac_dev->dev == dev)
279			return edac_dev;
280	}
281
282	return NULL;
283}
284
285/*
286 * add_edac_dev_to_global_list
287 *	Before calling this function, caller must
288 *	assign a unique value to edac_dev->dev_idx.
289 *
290 *	lock to be held prior to call:	device_ctls_mutex
291 *
292 *	Return:
293 *		0 on success
294 *		1 on failure.
295 */
296static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
297{
298	struct list_head *item, *insert_before;
299	struct edac_device_ctl_info *rover;
300
301	insert_before = &edac_device_list;
302
303	/* Determine if already on the list */
304	rover = find_edac_device_by_dev(edac_dev->dev);
305	if (unlikely(rover != NULL))
306		goto fail0;
307
308	/* Insert in ascending order by 'dev_idx', so find position */
309	list_for_each(item, &edac_device_list) {
310		rover = list_entry(item, struct edac_device_ctl_info, link);
311
312		if (rover->dev_idx >= edac_dev->dev_idx) {
313			if (unlikely(rover->dev_idx == edac_dev->dev_idx))
314				goto fail1;
315
316			insert_before = item;
317			break;
318		}
319	}
320
321	list_add_tail_rcu(&edac_dev->link, insert_before);
322	return 0;
323
324fail0:
325	edac_printk(KERN_WARNING, EDAC_MC,
326			"%s (%s) %s %s already assigned %d\n",
327			dev_name(rover->dev), edac_dev_name(rover),
328			rover->mod_name, rover->ctl_name, rover->dev_idx);
329	return 1;
330
331fail1:
332	edac_printk(KERN_WARNING, EDAC_MC,
333			"bug in low-level driver: attempt to assign\n"
334			"    duplicate dev_idx %d in %s()\n", rover->dev_idx,
335			__func__);
336	return 1;
337}
338
339/*
340 * del_edac_device_from_global_list
341 */
342static void del_edac_device_from_global_list(struct edac_device_ctl_info
343						*edac_device)
344{
345	list_del_rcu(&edac_device->link);
346
347	/* these are for safe removal of devices from global list while
348	 * NMI handlers may be traversing list
349	 */
350	synchronize_rcu();
351	INIT_LIST_HEAD(&edac_device->link);
352}
353
354/*
355 * edac_device_workq_function
356 *	performs the operation scheduled by a workq request
357 *
358 *	this workq is embedded within an edac_device_ctl_info
359 *	structure, that needs to be polled for possible error events.
360 *
361 *	This operation is to acquire the list mutex lock
362 *	(thus preventing insertation or deletion)
363 *	and then call the device's poll function IFF this device is
364 *	running polled and there is a poll function defined.
365 */
366static void edac_device_workq_function(struct work_struct *work_req)
367{
368	struct delayed_work *d_work = to_delayed_work(work_req);
369	struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
370
371	mutex_lock(&device_ctls_mutex);
372
373	/* If we are being removed, bail out immediately */
374	if (edac_dev->op_state == OP_OFFLINE) {
375		mutex_unlock(&device_ctls_mutex);
376		return;
377	}
378
379	/* Only poll controllers that are running polled and have a check */
380	if ((edac_dev->op_state == OP_RUNNING_POLL) &&
381		(edac_dev->edac_check != NULL)) {
382			edac_dev->edac_check(edac_dev);
383	}
384
385	mutex_unlock(&device_ctls_mutex);
386
387	/* Reschedule the workq for the next time period to start again
388	 * if the number of msec is for 1 sec, then adjust to the next
389	 * whole one second to save timers firing all over the period
390	 * between integral seconds
391	 */
392	if (edac_dev->poll_msec == 1000)
393		queue_delayed_work(edac_workqueue, &edac_dev->work,
394				round_jiffies_relative(edac_dev->delay));
395	else
396		queue_delayed_work(edac_workqueue, &edac_dev->work,
397				edac_dev->delay);
398}
399
400/*
401 * edac_device_workq_setup
402 *	initialize a workq item for this edac_device instance
403 *	passing in the new delay period in msec
404 */
405void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
406				unsigned msec)
407{
408	edac_dbg(0, "\n");
409
410	/* take the arg 'msec' and set it into the control structure
411	 * to used in the time period calculation
412	 * then calc the number of jiffies that represents
413	 */
414	edac_dev->poll_msec = msec;
415	edac_dev->delay = msecs_to_jiffies(msec);
416
417	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
418
419	/* optimize here for the 1 second case, which will be normal value, to
420	 * fire ON the 1 second time event. This helps reduce all sorts of
421	 * timers firing on sub-second basis, while they are happy
422	 * to fire together on the 1 second exactly
423	 */
424	if (edac_dev->poll_msec == 1000)
425		queue_delayed_work(edac_workqueue, &edac_dev->work,
426				round_jiffies_relative(edac_dev->delay));
427	else
428		queue_delayed_work(edac_workqueue, &edac_dev->work,
429				edac_dev->delay);
430}
431
432/*
433 * edac_device_workq_teardown
434 *	stop the workq processing on this edac_dev
435 */
436void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
437{
438	if (!edac_dev->edac_check)
439		return;
440
441	edac_dev->op_state = OP_OFFLINE;
442
443	cancel_delayed_work_sync(&edac_dev->work);
444	flush_workqueue(edac_workqueue);
445}
446
447/*
448 * edac_device_reset_delay_period
449 *
450 *	need to stop any outstanding workq queued up at this time
451 *	because we will be resetting the sleep time.
452 *	Then restart the workq on the new delay
453 */
454void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
455					unsigned long value)
456{
457	/* cancel the current workq request, without the mutex lock */
458	edac_device_workq_teardown(edac_dev);
459
460	/* acquire the mutex before doing the workq setup */
461	mutex_lock(&device_ctls_mutex);
462
463	/* restart the workq request, with new delay value */
464	edac_device_workq_setup(edac_dev, value);
465
466	mutex_unlock(&device_ctls_mutex);
467}
468
469/*
470 * edac_device_alloc_index: Allocate a unique device index number
471 *
472 * Return:
473 *	allocated index number
474 */
475int edac_device_alloc_index(void)
476{
477	static atomic_t device_indexes = ATOMIC_INIT(0);
478
479	return atomic_inc_return(&device_indexes) - 1;
480}
481EXPORT_SYMBOL_GPL(edac_device_alloc_index);
482
483/**
484 * edac_device_add_device: Insert the 'edac_dev' structure into the
485 * edac_device global list and create sysfs entries associated with
486 * edac_device structure.
487 * @edac_device: pointer to the edac_device structure to be added to the list
488 * 'edac_device' structure.
489 *
490 * Return:
491 *	0	Success
492 *	!0	Failure
493 */
494int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
495{
496	edac_dbg(0, "\n");
497
498#ifdef CONFIG_EDAC_DEBUG
499	if (edac_debug_level >= 3)
500		edac_device_dump_device(edac_dev);
501#endif
502	mutex_lock(&device_ctls_mutex);
503
504	if (add_edac_dev_to_global_list(edac_dev))
505		goto fail0;
506
507	/* set load time so that error rate can be tracked */
508	edac_dev->start_time = jiffies;
509
510	/* create this instance's sysfs entries */
511	if (edac_device_create_sysfs(edac_dev)) {
512		edac_device_printk(edac_dev, KERN_WARNING,
513					"failed to create sysfs device\n");
514		goto fail1;
515	}
516
517	/* If there IS a check routine, then we are running POLLED */
518	if (edac_dev->edac_check != NULL) {
519		/* This instance is NOW RUNNING */
520		edac_dev->op_state = OP_RUNNING_POLL;
521
522		/*
523		 * enable workq processing on this instance,
524		 * default = 1000 msec
525		 */
526		edac_device_workq_setup(edac_dev, 1000);
527	} else {
528		edac_dev->op_state = OP_RUNNING_INTERRUPT;
529	}
530
531	/* Report action taken */
532	edac_device_printk(edac_dev, KERN_INFO,
533		"Giving out device to module %s controller %s: DEV %s (%s)\n",
534		edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name,
535		edac_op_state_to_string(edac_dev->op_state));
536
537	mutex_unlock(&device_ctls_mutex);
538	return 0;
539
540fail1:
541	/* Some error, so remove the entry from the lsit */
542	del_edac_device_from_global_list(edac_dev);
543
544fail0:
545	mutex_unlock(&device_ctls_mutex);
546	return 1;
547}
548EXPORT_SYMBOL_GPL(edac_device_add_device);
549
550/**
551 * edac_device_del_device:
552 *	Remove sysfs entries for specified edac_device structure and
553 *	then remove edac_device structure from global list
554 *
555 * @dev:
556 *	Pointer to 'struct device' representing edac_device
557 *	structure to remove.
558 *
559 * Return:
560 *	Pointer to removed edac_device structure,
561 *	OR NULL if device not found.
562 */
563struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
564{
565	struct edac_device_ctl_info *edac_dev;
566
567	edac_dbg(0, "\n");
568
569	mutex_lock(&device_ctls_mutex);
570
571	/* Find the structure on the list, if not there, then leave */
572	edac_dev = find_edac_device_by_dev(dev);
573	if (edac_dev == NULL) {
574		mutex_unlock(&device_ctls_mutex);
575		return NULL;
576	}
577
578	/* mark this instance as OFFLINE */
579	edac_dev->op_state = OP_OFFLINE;
580
581	/* deregister from global list */
582	del_edac_device_from_global_list(edac_dev);
583
584	mutex_unlock(&device_ctls_mutex);
585
586	/* clear workq processing on this instance */
587	edac_device_workq_teardown(edac_dev);
588
589	/* Tear down the sysfs entries for this instance */
590	edac_device_remove_sysfs(edac_dev);
591
592	edac_printk(KERN_INFO, EDAC_MC,
593		"Removed device %d for %s %s: DEV %s\n",
594		edac_dev->dev_idx,
595		edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
596
597	return edac_dev;
598}
599EXPORT_SYMBOL_GPL(edac_device_del_device);
600
601static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
602{
603	return edac_dev->log_ce;
604}
605
606static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
607{
608	return edac_dev->log_ue;
609}
610
611static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
612					*edac_dev)
613{
614	return edac_dev->panic_on_ue;
615}
616
617/*
618 * edac_device_handle_ce
619 *	perform a common output and handling of an 'edac_dev' CE event
620 */
621void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
622			int inst_nr, int block_nr, const char *msg)
623{
624	struct edac_device_instance *instance;
625	struct edac_device_block *block = NULL;
626
627	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
628		edac_device_printk(edac_dev, KERN_ERR,
629				"INTERNAL ERROR: 'instance' out of range "
630				"(%d >= %d)\n", inst_nr,
631				edac_dev->nr_instances);
632		return;
633	}
634
635	instance = edac_dev->instances + inst_nr;
636
637	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
638		edac_device_printk(edac_dev, KERN_ERR,
639				"INTERNAL ERROR: instance %d 'block' "
640				"out of range (%d >= %d)\n",
641				inst_nr, block_nr,
642				instance->nr_blocks);
643		return;
644	}
645
646	if (instance->nr_blocks > 0) {
647		block = instance->blocks + block_nr;
648		block->counters.ce_count++;
649	}
650
651	/* Propagate the count up the 'totals' tree */
652	instance->counters.ce_count++;
653	edac_dev->counters.ce_count++;
654
655	if (edac_device_get_log_ce(edac_dev))
656		edac_device_printk(edac_dev, KERN_WARNING,
657				"CE: %s instance: %s block: %s '%s'\n",
658				edac_dev->ctl_name, instance->name,
659				block ? block->name : "N/A", msg);
660}
661EXPORT_SYMBOL_GPL(edac_device_handle_ce);
662
663/*
664 * edac_device_handle_ue
665 *	perform a common output and handling of an 'edac_dev' UE event
666 */
667void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
668			int inst_nr, int block_nr, const char *msg)
669{
670	struct edac_device_instance *instance;
671	struct edac_device_block *block = NULL;
672
673	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
674		edac_device_printk(edac_dev, KERN_ERR,
675				"INTERNAL ERROR: 'instance' out of range "
676				"(%d >= %d)\n", inst_nr,
677				edac_dev->nr_instances);
678		return;
679	}
680
681	instance = edac_dev->instances + inst_nr;
682
683	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
684		edac_device_printk(edac_dev, KERN_ERR,
685				"INTERNAL ERROR: instance %d 'block' "
686				"out of range (%d >= %d)\n",
687				inst_nr, block_nr,
688				instance->nr_blocks);
689		return;
690	}
691
692	if (instance->nr_blocks > 0) {
693		block = instance->blocks + block_nr;
694		block->counters.ue_count++;
695	}
696
697	/* Propagate the count up the 'totals' tree */
698	instance->counters.ue_count++;
699	edac_dev->counters.ue_count++;
700
701	if (edac_device_get_log_ue(edac_dev))
702		edac_device_printk(edac_dev, KERN_EMERG,
703				"UE: %s instance: %s block: %s '%s'\n",
704				edac_dev->ctl_name, instance->name,
705				block ? block->name : "N/A", msg);
706
707	if (edac_device_get_panic_on_ue(edac_dev))
708		panic("EDAC %s: UE instance: %s block %s '%s'\n",
709			edac_dev->ctl_name, instance->name,
710			block ? block->name : "N/A", msg);
711}
712EXPORT_SYMBOL_GPL(edac_device_handle_ue);
713