Now that the NMI handler are broken into lists, increment the appropriate
stats for each list. This allows us to see what is going on when they
get printed out in the next patch.
Signed-off-by: Don Zickus <dzickus@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1317409584-23662-6-git-send-email-dzickus@redhat.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
+struct nmi_stats {
+ unsigned int normal;
+ unsigned int unknown;
+ unsigned int external;
+ unsigned int swallow;
+};
+
+static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
+
static int ignore_nmis;
int unknown_nmi_panic;
static int ignore_nmis;
int unknown_nmi_panic;
* if it caused the NMI)
*/
handled = nmi_handle(NMI_UNKNOWN, regs, false);
* if it caused the NMI)
*/
handled = nmi_handle(NMI_UNKNOWN, regs, false);
+ if (handled) {
+ __this_cpu_add(nmi_stats.unknown, handled);
+ }
+
+ __this_cpu_add(nmi_stats.unknown, 1);
+
#ifdef CONFIG_MCA
/*
* Might actually be able to figure out what the guilty party
#ifdef CONFIG_MCA
/*
* Might actually be able to figure out what the guilty party
__this_cpu_write(last_nmi_rip, regs->ip);
handled = nmi_handle(NMI_LOCAL, regs, b2b);
__this_cpu_write(last_nmi_rip, regs->ip);
handled = nmi_handle(NMI_LOCAL, regs, b2b);
+ __this_cpu_add(nmi_stats.normal, handled);
if (handled) {
/*
* There are cases when a NMI handler handles multiple
if (handled) {
/*
* There are cases when a NMI handler handles multiple
*/
reassert_nmi();
#endif
*/
reassert_nmi();
#endif
+ __this_cpu_add(nmi_stats.external, 1);
raw_spin_unlock(&nmi_reason_lock);
return;
}
raw_spin_unlock(&nmi_reason_lock);
return;
}
* for now.
*/
if (b2b && __this_cpu_read(swallow_nmi))
* for now.
*/
if (b2b && __this_cpu_read(swallow_nmi))
+ __this_cpu_add(nmi_stats.swallow, 1);
else
unknown_nmi_error(reason, regs);
}
else
unknown_nmi_error(reason, regs);
}