Merge branch 'x86-amd-nb-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perfctr-watchdog.c
1 /*
2  * local apic based NMI watchdog for various CPUs.
3  *
4  * This file also handles reservation of performance counters for coordination
5  * with other users (like oprofile).
6  *
7  * Note that these events normally don't tick when the CPU idles. This means
8  * the frequency varies with CPU load.
9  *
10  * Original code for K7/P6 written by Keith Owens
11  *
12  */
13
14 #include <linux/percpu.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/bitops.h>
18 #include <linux/smp.h>
19 #include <asm/nmi.h>
20 #include <linux/kprobes.h>
21
22 #include <asm/apic.h>
23 #include <asm/perf_event.h>
24
25 /*
26  * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
27  * offset from MSR_P4_BSU_ESCR0.
28  *
29  * It will be the max for all platforms (for now)
30  */
31 #define NMI_MAX_COUNTER_BITS 66
32
33 /*
34  * perfctr_nmi_owner tracks the ownership of the perfctr registers:
35  * evtsel_nmi_owner tracks the ownership of the event selection
36  * - different performance counters/ event selection may be reserved for
37  *   different subsystems this reservation system just tries to coordinate
38  *   things a little
39  */
40 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
41 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
42
43 /* converts an msr to an appropriate reservation bit */
44 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
45 {
46         /* returns the bit offset of the performance counter register */
47         switch (boot_cpu_data.x86_vendor) {
48         case X86_VENDOR_AMD:
49                 return msr - MSR_K7_PERFCTR0;
50         case X86_VENDOR_INTEL:
51                 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
52                         return msr - MSR_ARCH_PERFMON_PERFCTR0;
53
54                 switch (boot_cpu_data.x86) {
55                 case 6:
56                         return msr - MSR_P6_PERFCTR0;
57                 case 15:
58                         return msr - MSR_P4_BPU_PERFCTR0;
59                 }
60         }
61         return 0;
62 }
63
64 /*
65  * converts an msr to an appropriate reservation bit
66  * returns the bit offset of the event selection register
67  */
68 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
69 {
70         /* returns the bit offset of the event selection register */
71         switch (boot_cpu_data.x86_vendor) {
72         case X86_VENDOR_AMD:
73                 return msr - MSR_K7_EVNTSEL0;
74         case X86_VENDOR_INTEL:
75                 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
76                         return msr - MSR_ARCH_PERFMON_EVENTSEL0;
77
78                 switch (boot_cpu_data.x86) {
79                 case 6:
80                         return msr - MSR_P6_EVNTSEL0;
81                 case 15:
82                         return msr - MSR_P4_BSU_ESCR0;
83                 }
84         }
85         return 0;
86
87 }
88
89 /* checks for a bit availability (hack for oprofile) */
90 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
91 {
92         BUG_ON(counter > NMI_MAX_COUNTER_BITS);
93
94         return !test_bit(counter, perfctr_nmi_owner);
95 }
96 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
97
98 int reserve_perfctr_nmi(unsigned int msr)
99 {
100         unsigned int counter;
101
102         counter = nmi_perfctr_msr_to_bit(msr);
103         /* register not managed by the allocator? */
104         if (counter > NMI_MAX_COUNTER_BITS)
105                 return 1;
106
107         if (!test_and_set_bit(counter, perfctr_nmi_owner))
108                 return 1;
109         return 0;
110 }
111 EXPORT_SYMBOL(reserve_perfctr_nmi);
112
113 void release_perfctr_nmi(unsigned int msr)
114 {
115         unsigned int counter;
116
117         counter = nmi_perfctr_msr_to_bit(msr);
118         /* register not managed by the allocator? */
119         if (counter > NMI_MAX_COUNTER_BITS)
120                 return;
121
122         clear_bit(counter, perfctr_nmi_owner);
123 }
124 EXPORT_SYMBOL(release_perfctr_nmi);
125
126 int reserve_evntsel_nmi(unsigned int msr)
127 {
128         unsigned int counter;
129
130         counter = nmi_evntsel_msr_to_bit(msr);
131         /* register not managed by the allocator? */
132         if (counter > NMI_MAX_COUNTER_BITS)
133                 return 1;
134
135         if (!test_and_set_bit(counter, evntsel_nmi_owner))
136                 return 1;
137         return 0;
138 }
139 EXPORT_SYMBOL(reserve_evntsel_nmi);
140
141 void release_evntsel_nmi(unsigned int msr)
142 {
143         unsigned int counter;
144
145         counter = nmi_evntsel_msr_to_bit(msr);
146         /* register not managed by the allocator? */
147         if (counter > NMI_MAX_COUNTER_BITS)
148                 return;
149
150         clear_bit(counter, evntsel_nmi_owner);
151 }
152 EXPORT_SYMBOL(release_evntsel_nmi);