1/*
2 *  Copyright (C) 1991, 1992  Linus Torvalds
3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *  Copyright (C) 2011	Don Zickus Red Hat, Inc.
5 *
6 *  Pentium III FXSR, SSE support
7 *	Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * Handle hardware traps and faults.
12 */
13#include <linux/spinlock.h>
14#include <linux/kprobes.h>
15#include <linux/kdebug.h>
16#include <linux/nmi.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/hardirq.h>
20#include <linux/slab.h>
21#include <linux/export.h>
22
23#if defined(CONFIG_EDAC)
24#include <linux/edac.h>
25#endif
26
27#include <linux/atomic.h>
28#include <asm/traps.h>
29#include <asm/mach_traps.h>
30#include <asm/nmi.h>
31#include <asm/x86_init.h>
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/nmi.h>
35
36struct nmi_desc {
37	spinlock_t lock;
38	struct list_head head;
39};
40
41static struct nmi_desc nmi_desc[NMI_MAX] =
42{
43	{
44		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
45		.head = LIST_HEAD_INIT(nmi_desc[0].head),
46	},
47	{
48		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
49		.head = LIST_HEAD_INIT(nmi_desc[1].head),
50	},
51	{
52		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
53		.head = LIST_HEAD_INIT(nmi_desc[2].head),
54	},
55	{
56		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
57		.head = LIST_HEAD_INIT(nmi_desc[3].head),
58	},
59
60};
61
62struct nmi_stats {
63	unsigned int normal;
64	unsigned int unknown;
65	unsigned int external;
66	unsigned int swallow;
67};
68
69static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
70
71static int ignore_nmis;
72
73int unknown_nmi_panic;
74/*
75 * Prevent NMI reason port (0x61) being accessed simultaneously, can
76 * only be used in NMI handler.
77 */
78static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
79
80static int __init setup_unknown_nmi_panic(char *str)
81{
82	unknown_nmi_panic = 1;
83	return 1;
84}
85__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
86
87#define nmi_to_desc(type) (&nmi_desc[type])
88
89static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
90
91static int __init nmi_warning_debugfs(void)
92{
93	debugfs_create_u64("nmi_longest_ns", 0644,
94			arch_debugfs_dir, &nmi_longest_ns);
95	return 0;
96}
97fs_initcall(nmi_warning_debugfs);
98
99static void nmi_max_handler(struct irq_work *w)
100{
101	struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
102	int remainder_ns, decimal_msecs;
103	u64 whole_msecs = ACCESS_ONCE(a->max_duration);
104
105	remainder_ns = do_div(whole_msecs, (1000 * 1000));
106	decimal_msecs = remainder_ns / 1000;
107
108	printk_ratelimited(KERN_INFO
109		"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
110		a->handler, whole_msecs, decimal_msecs);
111}
112
113static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
114{
115	struct nmi_desc *desc = nmi_to_desc(type);
116	struct nmiaction *a;
117	int handled=0;
118
119	rcu_read_lock();
120
121	/*
122	 * NMIs are edge-triggered, which means if you have enough
123	 * of them concurrently, you can lose some because only one
124	 * can be latched at any given time.  Walk the whole list
125	 * to handle those situations.
126	 */
127	list_for_each_entry_rcu(a, &desc->head, list) {
128		int thishandled;
129		u64 delta;
130
131		delta = sched_clock();
132		thishandled = a->handler(type, regs);
133		handled += thishandled;
134		delta = sched_clock() - delta;
135		trace_nmi_handler(a->handler, (int)delta, thishandled);
136
137		if (delta < nmi_longest_ns || delta < a->max_duration)
138			continue;
139
140		a->max_duration = delta;
141		irq_work_queue(&a->irq_work);
142	}
143
144	rcu_read_unlock();
145
146	/* return total number of NMI events handled */
147	return handled;
148}
149NOKPROBE_SYMBOL(nmi_handle);
150
151int __register_nmi_handler(unsigned int type, struct nmiaction *action)
152{
153	struct nmi_desc *desc = nmi_to_desc(type);
154	unsigned long flags;
155
156	if (!action->handler)
157		return -EINVAL;
158
159	init_irq_work(&action->irq_work, nmi_max_handler);
160
161	spin_lock_irqsave(&desc->lock, flags);
162
163	/*
164	 * most handlers of type NMI_UNKNOWN never return because
165	 * they just assume the NMI is theirs.  Just a sanity check
166	 * to manage expectations
167	 */
168	WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
169	WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
170	WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
171
172	/*
173	 * some handlers need to be executed first otherwise a fake
174	 * event confuses some handlers (kdump uses this flag)
175	 */
176	if (action->flags & NMI_FLAG_FIRST)
177		list_add_rcu(&action->list, &desc->head);
178	else
179		list_add_tail_rcu(&action->list, &desc->head);
180
181	spin_unlock_irqrestore(&desc->lock, flags);
182	return 0;
183}
184EXPORT_SYMBOL(__register_nmi_handler);
185
186void unregister_nmi_handler(unsigned int type, const char *name)
187{
188	struct nmi_desc *desc = nmi_to_desc(type);
189	struct nmiaction *n;
190	unsigned long flags;
191
192	spin_lock_irqsave(&desc->lock, flags);
193
194	list_for_each_entry_rcu(n, &desc->head, list) {
195		/*
196		 * the name passed in to describe the nmi handler
197		 * is used as the lookup key
198		 */
199		if (!strcmp(n->name, name)) {
200			WARN(in_nmi(),
201				"Trying to free NMI (%s) from NMI context!\n", n->name);
202			list_del_rcu(&n->list);
203			break;
204		}
205	}
206
207	spin_unlock_irqrestore(&desc->lock, flags);
208	synchronize_rcu();
209}
210EXPORT_SYMBOL_GPL(unregister_nmi_handler);
211
212static void
213pci_serr_error(unsigned char reason, struct pt_regs *regs)
214{
215	/* check to see if anyone registered against these types of errors */
216	if (nmi_handle(NMI_SERR, regs, false))
217		return;
218
219	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
220		 reason, smp_processor_id());
221
222	/*
223	 * On some machines, PCI SERR line is used to report memory
224	 * errors. EDAC makes use of it.
225	 */
226#if defined(CONFIG_EDAC)
227	if (edac_handler_set()) {
228		edac_atomic_assert_error();
229		return;
230	}
231#endif
232
233	if (panic_on_unrecovered_nmi)
234		panic("NMI: Not continuing");
235
236	pr_emerg("Dazed and confused, but trying to continue\n");
237
238	/* Clear and disable the PCI SERR error line. */
239	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
240	outb(reason, NMI_REASON_PORT);
241}
242NOKPROBE_SYMBOL(pci_serr_error);
243
244static void
245io_check_error(unsigned char reason, struct pt_regs *regs)
246{
247	unsigned long i;
248
249	/* check to see if anyone registered against these types of errors */
250	if (nmi_handle(NMI_IO_CHECK, regs, false))
251		return;
252
253	pr_emerg(
254	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
255		 reason, smp_processor_id());
256	show_regs(regs);
257
258	if (panic_on_io_nmi)
259		panic("NMI IOCK error: Not continuing");
260
261	/* Re-enable the IOCK line, wait for a few seconds */
262	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
263	outb(reason, NMI_REASON_PORT);
264
265	i = 20000;
266	while (--i) {
267		touch_nmi_watchdog();
268		udelay(100);
269	}
270
271	reason &= ~NMI_REASON_CLEAR_IOCHK;
272	outb(reason, NMI_REASON_PORT);
273}
274NOKPROBE_SYMBOL(io_check_error);
275
276static void
277unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
278{
279	int handled;
280
281	/*
282	 * Use 'false' as back-to-back NMIs are dealt with one level up.
283	 * Of course this makes having multiple 'unknown' handlers useless
284	 * as only the first one is ever run (unless it can actually determine
285	 * if it caused the NMI)
286	 */
287	handled = nmi_handle(NMI_UNKNOWN, regs, false);
288	if (handled) {
289		__this_cpu_add(nmi_stats.unknown, handled);
290		return;
291	}
292
293	__this_cpu_add(nmi_stats.unknown, 1);
294
295	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
296		 reason, smp_processor_id());
297
298	pr_emerg("Do you have a strange power saving mode enabled?\n");
299	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
300		panic("NMI: Not continuing");
301
302	pr_emerg("Dazed and confused, but trying to continue\n");
303}
304NOKPROBE_SYMBOL(unknown_nmi_error);
305
306static DEFINE_PER_CPU(bool, swallow_nmi);
307static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
308
309static void default_do_nmi(struct pt_regs *regs)
310{
311	unsigned char reason = 0;
312	int handled;
313	bool b2b = false;
314
315	/*
316	 * CPU-specific NMI must be processed before non-CPU-specific
317	 * NMI, otherwise we may lose it, because the CPU-specific
318	 * NMI can not be detected/processed on other CPUs.
319	 */
320
321	/*
322	 * Back-to-back NMIs are interesting because they can either
323	 * be two NMI or more than two NMIs (any thing over two is dropped
324	 * due to NMI being edge-triggered).  If this is the second half
325	 * of the back-to-back NMI, assume we dropped things and process
326	 * more handlers.  Otherwise reset the 'swallow' NMI behaviour
327	 */
328	if (regs->ip == __this_cpu_read(last_nmi_rip))
329		b2b = true;
330	else
331		__this_cpu_write(swallow_nmi, false);
332
333	__this_cpu_write(last_nmi_rip, regs->ip);
334
335	handled = nmi_handle(NMI_LOCAL, regs, b2b);
336	__this_cpu_add(nmi_stats.normal, handled);
337	if (handled) {
338		/*
339		 * There are cases when a NMI handler handles multiple
340		 * events in the current NMI.  One of these events may
341		 * be queued for in the next NMI.  Because the event is
342		 * already handled, the next NMI will result in an unknown
343		 * NMI.  Instead lets flag this for a potential NMI to
344		 * swallow.
345		 */
346		if (handled > 1)
347			__this_cpu_write(swallow_nmi, true);
348		return;
349	}
350
351	/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
352	raw_spin_lock(&nmi_reason_lock);
353	reason = x86_platform.get_nmi_reason();
354
355	if (reason & NMI_REASON_MASK) {
356		if (reason & NMI_REASON_SERR)
357			pci_serr_error(reason, regs);
358		else if (reason & NMI_REASON_IOCHK)
359			io_check_error(reason, regs);
360#ifdef CONFIG_X86_32
361		/*
362		 * Reassert NMI in case it became active
363		 * meanwhile as it's edge-triggered:
364		 */
365		reassert_nmi();
366#endif
367		__this_cpu_add(nmi_stats.external, 1);
368		raw_spin_unlock(&nmi_reason_lock);
369		return;
370	}
371	raw_spin_unlock(&nmi_reason_lock);
372
373	/*
374	 * Only one NMI can be latched at a time.  To handle
375	 * this we may process multiple nmi handlers at once to
376	 * cover the case where an NMI is dropped.  The downside
377	 * to this approach is we may process an NMI prematurely,
378	 * while its real NMI is sitting latched.  This will cause
379	 * an unknown NMI on the next run of the NMI processing.
380	 *
381	 * We tried to flag that condition above, by setting the
382	 * swallow_nmi flag when we process more than one event.
383	 * This condition is also only present on the second half
384	 * of a back-to-back NMI, so we flag that condition too.
385	 *
386	 * If both are true, we assume we already processed this
387	 * NMI previously and we swallow it.  Otherwise we reset
388	 * the logic.
389	 *
390	 * There are scenarios where we may accidentally swallow
391	 * a 'real' unknown NMI.  For example, while processing
392	 * a perf NMI another perf NMI comes in along with a
393	 * 'real' unknown NMI.  These two NMIs get combined into
394	 * one (as descibed above).  When the next NMI gets
395	 * processed, it will be flagged by perf as handled, but
396	 * noone will know that there was a 'real' unknown NMI sent
397	 * also.  As a result it gets swallowed.  Or if the first
398	 * perf NMI returns two events handled then the second
399	 * NMI will get eaten by the logic below, again losing a
400	 * 'real' unknown NMI.  But this is the best we can do
401	 * for now.
402	 */
403	if (b2b && __this_cpu_read(swallow_nmi))
404		__this_cpu_add(nmi_stats.swallow, 1);
405	else
406		unknown_nmi_error(reason, regs);
407}
408NOKPROBE_SYMBOL(default_do_nmi);
409
410/*
411 * NMIs can page fault or hit breakpoints which will cause it to lose
412 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
413 *
414 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
415 * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
416 * if the outer NMI came from kernel mode, but we can still nest if the
417 * outer NMI came from user mode.
418 *
419 * To handle these nested NMIs, we have three states:
420 *
421 *  1) not running
422 *  2) executing
423 *  3) latched
424 *
425 * When no NMI is in progress, it is in the "not running" state.
426 * When an NMI comes in, it goes into the "executing" state.
427 * Normally, if another NMI is triggered, it does not interrupt
428 * the running NMI and the HW will simply latch it so that when
429 * the first NMI finishes, it will restart the second NMI.
430 * (Note, the latch is binary, thus multiple NMIs triggering,
431 *  when one is running, are ignored. Only one NMI is restarted.)
432 *
433 * If an NMI executes an iret, another NMI can preempt it. We do not
434 * want to allow this new NMI to run, but we want to execute it when the
435 * first one finishes.  We set the state to "latched", and the exit of
436 * the first NMI will perform a dec_return, if the result is zero
437 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
438 * dec_return would have set the state to NMI_EXECUTING (what we want it
439 * to be when we are running). In this case, we simply jump back to
440 * rerun the NMI handler again, and restart the 'latched' NMI.
441 *
442 * No trap (breakpoint or page fault) should be hit before nmi_restart,
443 * thus there is no race between the first check of state for NOT_RUNNING
444 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
445 * at this point.
446 *
447 * In case the NMI takes a page fault, we need to save off the CR2
448 * because the NMI could have preempted another page fault and corrupt
449 * the CR2 that is about to be read. As nested NMIs must be restarted
450 * and they can not take breakpoints or page faults, the update of the
451 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
452 * Otherwise, there would be a race of another nested NMI coming in
453 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
454 */
455enum nmi_states {
456	NMI_NOT_RUNNING = 0,
457	NMI_EXECUTING,
458	NMI_LATCHED,
459};
460static DEFINE_PER_CPU(enum nmi_states, nmi_state);
461static DEFINE_PER_CPU(unsigned long, nmi_cr2);
462
463#ifdef CONFIG_X86_64
464/*
465 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
466 * some care, the inner breakpoint will clobber the outer breakpoint's
467 * stack.
468 *
469 * If a breakpoint is being processed, and the debug stack is being
470 * used, if an NMI comes in and also hits a breakpoint, the stack
471 * pointer will be set to the same fixed address as the breakpoint that
472 * was interrupted, causing that stack to be corrupted. To handle this
473 * case, check if the stack that was interrupted is the debug stack, and
474 * if so, change the IDT so that new breakpoints will use the current
475 * stack and not switch to the fixed address. On return of the NMI,
476 * switch back to the original IDT.
477 */
478static DEFINE_PER_CPU(int, update_debug_stack);
479#endif
480
481dotraplinkage notrace void
482do_nmi(struct pt_regs *regs, long error_code)
483{
484	if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
485		this_cpu_write(nmi_state, NMI_LATCHED);
486		return;
487	}
488	this_cpu_write(nmi_state, NMI_EXECUTING);
489	this_cpu_write(nmi_cr2, read_cr2());
490nmi_restart:
491
492#ifdef CONFIG_X86_64
493	/*
494	 * If we interrupted a breakpoint, it is possible that
495	 * the nmi handler will have breakpoints too. We need to
496	 * change the IDT such that breakpoints that happen here
497	 * continue to use the NMI stack.
498	 */
499	if (unlikely(is_debug_stack(regs->sp))) {
500		debug_stack_set_zero();
501		this_cpu_write(update_debug_stack, 1);
502	}
503#endif
504
505	nmi_enter();
506
507	inc_irq_stat(__nmi_count);
508
509	if (!ignore_nmis)
510		default_do_nmi(regs);
511
512	nmi_exit();
513
514#ifdef CONFIG_X86_64
515	if (unlikely(this_cpu_read(update_debug_stack))) {
516		debug_stack_reset();
517		this_cpu_write(update_debug_stack, 0);
518	}
519#endif
520
521	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
522		write_cr2(this_cpu_read(nmi_cr2));
523	if (this_cpu_dec_return(nmi_state))
524		goto nmi_restart;
525}
526NOKPROBE_SYMBOL(do_nmi);
527
528void stop_nmi(void)
529{
530	ignore_nmis++;
531}
532
533void restart_nmi(void)
534{
535	ignore_nmis--;
536}
537
538/* reset the back-to-back NMI logic */
539void local_touch_nmi(void)
540{
541	__this_cpu_write(last_nmi_rip, 0);
542}
543EXPORT_SYMBOL_GPL(local_touch_nmi);
544