2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * Handle hardware traps and faults.
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/nmi.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/hardirq.h>
20 #include <linux/slab.h>
21 #include <linux/export.h>
23 #if defined(CONFIG_EDAC)
24 #include <linux/edac.h>
27 #include <linux/atomic.h>
28 #include <asm/traps.h>
29 #include <asm/mach_traps.h>
31 #include <asm/x86_init.h>
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/nmi.h>
38 struct list_head head;
41 static struct nmi_desc nmi_desc[NMI_MAX] =
44 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
45 .head = LIST_HEAD_INIT(nmi_desc[0].head),
48 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
49 .head = LIST_HEAD_INIT(nmi_desc[1].head),
52 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
53 .head = LIST_HEAD_INIT(nmi_desc[2].head),
56 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
57 .head = LIST_HEAD_INIT(nmi_desc[3].head),
65 unsigned int external;
69 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
71 static int ignore_nmis;
73 int unknown_nmi_panic;
75 * Prevent NMI reason port (0x61) being accessed simultaneously, can
76 * only be used in NMI handler.
78 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
80 static int __init setup_unknown_nmi_panic(char *str)
82 unknown_nmi_panic = 1;
85 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
87 #define nmi_to_desc(type) (&nmi_desc[type])
89 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
90 static int __init nmi_warning_debugfs(void)
92 debugfs_create_u64("nmi_longest_ns", 0644,
93 arch_debugfs_dir, &nmi_longest_ns);
96 fs_initcall(nmi_warning_debugfs);
98 static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
100 struct nmi_desc *desc = nmi_to_desc(type);
107 * NMIs are edge-triggered, which means if you have enough
108 * of them concurrently, you can lose some because only one
109 * can be latched at any given time. Walk the whole list
110 * to handle those situations.
112 list_for_each_entry_rcu(a, &desc->head, list) {
113 u64 before, delta, whole_msecs;
114 int decimal_msecs, thishandled;
116 before = local_clock();
117 thishandled = a->handler(type, regs);
118 handled += thishandled;
119 delta = local_clock() - before;
120 trace_nmi_handler(a->handler, (int)delta, thishandled);
122 if (delta < nmi_longest_ns)
125 nmi_longest_ns = delta;
126 whole_msecs = do_div(delta, (1000 * 1000));
127 decimal_msecs = do_div(delta, 1000) % 1000;
128 printk_ratelimited(KERN_INFO
129 "INFO: NMI handler (%ps) took too long to run: "
130 "%lld.%03d msecs\n", a->handler, whole_msecs,
136 /* return total number of NMI events handled */
140 int __register_nmi_handler(unsigned int type, struct nmiaction *action)
142 struct nmi_desc *desc = nmi_to_desc(type);
145 if (!action->handler)
148 spin_lock_irqsave(&desc->lock, flags);
151 * most handlers of type NMI_UNKNOWN never return because
152 * they just assume the NMI is theirs. Just a sanity check
153 * to manage expectations
155 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
156 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
157 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
160 * some handlers need to be executed first otherwise a fake
161 * event confuses some handlers (kdump uses this flag)
163 if (action->flags & NMI_FLAG_FIRST)
164 list_add_rcu(&action->list, &desc->head);
166 list_add_tail_rcu(&action->list, &desc->head);
168 spin_unlock_irqrestore(&desc->lock, flags);
171 EXPORT_SYMBOL(__register_nmi_handler);
173 void unregister_nmi_handler(unsigned int type, const char *name)
175 struct nmi_desc *desc = nmi_to_desc(type);
179 spin_lock_irqsave(&desc->lock, flags);
181 list_for_each_entry_rcu(n, &desc->head, list) {
183 * the name passed in to describe the nmi handler
184 * is used as the lookup key
186 if (!strcmp(n->name, name)) {
188 "Trying to free NMI (%s) from NMI context!\n", n->name);
189 list_del_rcu(&n->list);
194 spin_unlock_irqrestore(&desc->lock, flags);
197 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
199 static __kprobes void
200 pci_serr_error(unsigned char reason, struct pt_regs *regs)
202 /* check to see if anyone registered against these types of errors */
203 if (nmi_handle(NMI_SERR, regs, false))
206 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
207 reason, smp_processor_id());
210 * On some machines, PCI SERR line is used to report memory
211 * errors. EDAC makes use of it.
213 #if defined(CONFIG_EDAC)
214 if (edac_handler_set()) {
215 edac_atomic_assert_error();
220 if (panic_on_unrecovered_nmi)
221 panic("NMI: Not continuing");
223 pr_emerg("Dazed and confused, but trying to continue\n");
225 /* Clear and disable the PCI SERR error line. */
226 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
227 outb(reason, NMI_REASON_PORT);
230 static __kprobes void
231 io_check_error(unsigned char reason, struct pt_regs *regs)
235 /* check to see if anyone registered against these types of errors */
236 if (nmi_handle(NMI_IO_CHECK, regs, false))
240 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
241 reason, smp_processor_id());
245 panic("NMI IOCK error: Not continuing");
247 /* Re-enable the IOCK line, wait for a few seconds */
248 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
249 outb(reason, NMI_REASON_PORT);
253 touch_nmi_watchdog();
257 reason &= ~NMI_REASON_CLEAR_IOCHK;
258 outb(reason, NMI_REASON_PORT);
261 static __kprobes void
262 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
267 * Use 'false' as back-to-back NMIs are dealt with one level up.
268 * Of course this makes having multiple 'unknown' handlers useless
269 * as only the first one is ever run (unless it can actually determine
270 * if it caused the NMI)
272 handled = nmi_handle(NMI_UNKNOWN, regs, false);
274 __this_cpu_add(nmi_stats.unknown, handled);
278 __this_cpu_add(nmi_stats.unknown, 1);
280 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
281 reason, smp_processor_id());
283 pr_emerg("Do you have a strange power saving mode enabled?\n");
284 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
285 panic("NMI: Not continuing");
287 pr_emerg("Dazed and confused, but trying to continue\n");
290 static DEFINE_PER_CPU(bool, swallow_nmi);
291 static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
293 static __kprobes void default_do_nmi(struct pt_regs *regs)
295 unsigned char reason = 0;
300 * CPU-specific NMI must be processed before non-CPU-specific
301 * NMI, otherwise we may lose it, because the CPU-specific
302 * NMI can not be detected/processed on other CPUs.
306 * Back-to-back NMIs are interesting because they can either
307 * be two NMI or more than two NMIs (any thing over two is dropped
308 * due to NMI being edge-triggered). If this is the second half
309 * of the back-to-back NMI, assume we dropped things and process
310 * more handlers. Otherwise reset the 'swallow' NMI behaviour
312 if (regs->ip == __this_cpu_read(last_nmi_rip))
315 __this_cpu_write(swallow_nmi, false);
317 __this_cpu_write(last_nmi_rip, regs->ip);
319 handled = nmi_handle(NMI_LOCAL, regs, b2b);
320 __this_cpu_add(nmi_stats.normal, handled);
323 * There are cases when a NMI handler handles multiple
324 * events in the current NMI. One of these events may
325 * be queued for in the next NMI. Because the event is
326 * already handled, the next NMI will result in an unknown
327 * NMI. Instead lets flag this for a potential NMI to
331 __this_cpu_write(swallow_nmi, true);
335 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
336 raw_spin_lock(&nmi_reason_lock);
337 reason = x86_platform.get_nmi_reason();
339 if (reason & NMI_REASON_MASK) {
340 if (reason & NMI_REASON_SERR)
341 pci_serr_error(reason, regs);
342 else if (reason & NMI_REASON_IOCHK)
343 io_check_error(reason, regs);
346 * Reassert NMI in case it became active
347 * meanwhile as it's edge-triggered:
351 __this_cpu_add(nmi_stats.external, 1);
352 raw_spin_unlock(&nmi_reason_lock);
355 raw_spin_unlock(&nmi_reason_lock);
358 * Only one NMI can be latched at a time. To handle
359 * this we may process multiple nmi handlers at once to
360 * cover the case where an NMI is dropped. The downside
361 * to this approach is we may process an NMI prematurely,
362 * while its real NMI is sitting latched. This will cause
363 * an unknown NMI on the next run of the NMI processing.
365 * We tried to flag that condition above, by setting the
366 * swallow_nmi flag when we process more than one event.
367 * This condition is also only present on the second half
368 * of a back-to-back NMI, so we flag that condition too.
370 * If both are true, we assume we already processed this
371 * NMI previously and we swallow it. Otherwise we reset
374 * There are scenarios where we may accidentally swallow
375 * a 'real' unknown NMI. For example, while processing
376 * a perf NMI another perf NMI comes in along with a
377 * 'real' unknown NMI. These two NMIs get combined into
378 * one (as descibed above). When the next NMI gets
379 * processed, it will be flagged by perf as handled, but
380 * noone will know that there was a 'real' unknown NMI sent
381 * also. As a result it gets swallowed. Or if the first
382 * perf NMI returns two events handled then the second
383 * NMI will get eaten by the logic below, again losing a
384 * 'real' unknown NMI. But this is the best we can do
387 if (b2b && __this_cpu_read(swallow_nmi))
388 __this_cpu_add(nmi_stats.swallow, 1);
390 unknown_nmi_error(reason, regs);
394 * NMIs can hit breakpoints which will cause it to lose its
395 * NMI context with the CPU when the breakpoint does an iret.
399 * For i386, NMIs use the same stack as the kernel, and we can
400 * add a workaround to the iret problem in C (preventing nested
401 * NMIs if an NMI takes a trap). Simply have 3 states the NMI
408 * When no NMI is in progress, it is in the "not running" state.
409 * When an NMI comes in, it goes into the "executing" state.
410 * Normally, if another NMI is triggered, it does not interrupt
411 * the running NMI and the HW will simply latch it so that when
412 * the first NMI finishes, it will restart the second NMI.
413 * (Note, the latch is binary, thus multiple NMIs triggering,
414 * when one is running, are ignored. Only one NMI is restarted.)
416 * If an NMI hits a breakpoint that executes an iret, another
417 * NMI can preempt it. We do not want to allow this new NMI
418 * to run, but we want to execute it when the first one finishes.
419 * We set the state to "latched", and the exit of the first NMI will
420 * perform a dec_return, if the result is zero (NOT_RUNNING), then
421 * it will simply exit the NMI handler. If not, the dec_return
422 * would have set the state to NMI_EXECUTING (what we want it to
423 * be when we are running). In this case, we simply jump back
424 * to rerun the NMI handler again, and restart the 'latched' NMI.
426 * No trap (breakpoint or page fault) should be hit before nmi_restart,
427 * thus there is no race between the first check of state for NOT_RUNNING
428 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
431 * In case the NMI takes a page fault, we need to save off the CR2
432 * because the NMI could have preempted another page fault and corrupt
433 * the CR2 that is about to be read. As nested NMIs must be restarted
434 * and they can not take breakpoints or page faults, the update of the
435 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
436 * Otherwise, there would be a race of another nested NMI coming in
437 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
444 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
445 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
447 #define nmi_nesting_preprocess(regs) \
449 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
450 this_cpu_write(nmi_state, NMI_LATCHED); \
453 this_cpu_write(nmi_state, NMI_EXECUTING); \
454 this_cpu_write(nmi_cr2, read_cr2()); \
458 #define nmi_nesting_postprocess() \
460 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
461 write_cr2(this_cpu_read(nmi_cr2)); \
462 if (this_cpu_dec_return(nmi_state)) \
467 * In x86_64 things are a bit more difficult. This has the same problem
468 * where an NMI hitting a breakpoint that calls iret will remove the
469 * NMI context, allowing a nested NMI to enter. What makes this more
470 * difficult is that both NMIs and breakpoints have their own stack.
471 * When a new NMI or breakpoint is executed, the stack is set to a fixed
472 * point. If an NMI is nested, it will have its stack set at that same
473 * fixed address that the first NMI had, and will start corrupting the
474 * stack. This is handled in entry_64.S, but the same problem exists with
475 * the breakpoint stack.
477 * If a breakpoint is being processed, and the debug stack is being used,
478 * if an NMI comes in and also hits a breakpoint, the stack pointer
479 * will be set to the same fixed address as the breakpoint that was
480 * interrupted, causing that stack to be corrupted. To handle this case,
481 * check if the stack that was interrupted is the debug stack, and if
482 * so, change the IDT so that new breakpoints will use the current stack
483 * and not switch to the fixed address. On return of the NMI, switch back
484 * to the original IDT.
486 static DEFINE_PER_CPU(int, update_debug_stack);
488 static inline void nmi_nesting_preprocess(struct pt_regs *regs)
491 * If we interrupted a breakpoint, it is possible that
492 * the nmi handler will have breakpoints too. We need to
493 * change the IDT such that breakpoints that happen here
494 * continue to use the NMI stack.
496 if (unlikely(is_debug_stack(regs->sp))) {
497 debug_stack_set_zero();
498 this_cpu_write(update_debug_stack, 1);
502 static inline void nmi_nesting_postprocess(void)
504 if (unlikely(this_cpu_read(update_debug_stack))) {
506 this_cpu_write(update_debug_stack, 0);
511 dotraplinkage notrace __kprobes void
512 do_nmi(struct pt_regs *regs, long error_code)
514 nmi_nesting_preprocess(regs);
518 inc_irq_stat(__nmi_count);
521 default_do_nmi(regs);
525 /* On i386, may loop back to preprocess */
526 nmi_nesting_postprocess();
534 void restart_nmi(void)
539 /* reset the back-to-back NMI logic */
540 void local_touch_nmi(void)
542 __this_cpu_write(last_nmi_rip, 0);
544 EXPORT_SYMBOL_GPL(local_touch_nmi);