2 * local apic based NMI watchdog for various CPUs.
4 * This file also handles reservation of performance counters for coordination
5 * with other users (like oprofile).
7 * Note that these events normally don't tick when the CPU idles. This means
8 * the frequency varies with CPU load.
10 * Original code for K7/P6 written by Keith Owens
14 #include <linux/percpu.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/bitops.h>
18 #include <linux/smp.h>
20 #include <linux/kprobes.h>
23 #include <asm/perf_event.h>
26 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
27 * offset from MSR_P4_BSU_ESCR0.
29 * It will be the max for all platforms (for now)
31 #define NMI_MAX_COUNTER_BITS 66
34 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
35 * evtsel_nmi_owner tracks the ownership of the event selection
36 * - different performance counters/ event selection may be reserved for
37 * different subsystems this reservation system just tries to coordinate
40 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
41 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
43 /* converts an msr to an appropriate reservation bit */
44 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
46 /* returns the bit offset of the performance counter register */
47 switch (boot_cpu_data.x86_vendor) {
49 return msr - MSR_K7_PERFCTR0;
50 case X86_VENDOR_INTEL:
51 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
52 return msr - MSR_ARCH_PERFMON_PERFCTR0;
54 switch (boot_cpu_data.x86) {
56 return msr - MSR_P6_PERFCTR0;
58 return msr - MSR_P4_BPU_PERFCTR0;
65 * converts an msr to an appropriate reservation bit
66 * returns the bit offset of the event selection register
68 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
70 /* returns the bit offset of the event selection register */
71 switch (boot_cpu_data.x86_vendor) {
73 return msr - MSR_K7_EVNTSEL0;
74 case X86_VENDOR_INTEL:
75 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
76 return msr - MSR_ARCH_PERFMON_EVENTSEL0;
78 switch (boot_cpu_data.x86) {
80 return msr - MSR_P6_EVNTSEL0;
82 return msr - MSR_P4_BSU_ESCR0;
89 /* checks for a bit availability (hack for oprofile) */
90 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
92 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
94 return !test_bit(counter, perfctr_nmi_owner);
96 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
98 int reserve_perfctr_nmi(unsigned int msr)
100 unsigned int counter;
102 counter = nmi_perfctr_msr_to_bit(msr);
103 /* register not managed by the allocator? */
104 if (counter > NMI_MAX_COUNTER_BITS)
107 if (!test_and_set_bit(counter, perfctr_nmi_owner))
111 EXPORT_SYMBOL(reserve_perfctr_nmi);
113 void release_perfctr_nmi(unsigned int msr)
115 unsigned int counter;
117 counter = nmi_perfctr_msr_to_bit(msr);
118 /* register not managed by the allocator? */
119 if (counter > NMI_MAX_COUNTER_BITS)
122 clear_bit(counter, perfctr_nmi_owner);
124 EXPORT_SYMBOL(release_perfctr_nmi);
126 int reserve_evntsel_nmi(unsigned int msr)
128 unsigned int counter;
130 counter = nmi_evntsel_msr_to_bit(msr);
131 /* register not managed by the allocator? */
132 if (counter > NMI_MAX_COUNTER_BITS)
135 if (!test_and_set_bit(counter, evntsel_nmi_owner))
139 EXPORT_SYMBOL(reserve_evntsel_nmi);
141 void release_evntsel_nmi(unsigned int msr)
143 unsigned int counter;
145 counter = nmi_evntsel_msr_to_bit(msr);
146 /* register not managed by the allocator? */
147 if (counter > NMI_MAX_COUNTER_BITS)
150 clear_bit(counter, evntsel_nmi_owner);
152 EXPORT_SYMBOL(release_evntsel_nmi);