perf/x86/intel: Move PMU ACK to after LBR read
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event_intel.c
1 /*
2  * Per core/cpu state
3  *
4  * Used to coordinate shared registers between HT threads or
5  * among events on a single PMU.
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/watchdog.h>
16
17 #include <asm/cpufeature.h>
18 #include <asm/hardirq.h>
19 #include <asm/apic.h>
20
21 #include "perf_event.h"
22
23 /*
24  * Intel PerfMon, used on Core and later.
25  */
26 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
27 {
28         [PERF_COUNT_HW_CPU_CYCLES]              = 0x003c,
29         [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
30         [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4f2e,
31         [PERF_COUNT_HW_CACHE_MISSES]            = 0x412e,
32         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c4,
33         [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c5,
34         [PERF_COUNT_HW_BUS_CYCLES]              = 0x013c,
35         [PERF_COUNT_HW_REF_CPU_CYCLES]          = 0x0300, /* pseudo-encoding */
36 };
37
38 static struct event_constraint intel_core_event_constraints[] __read_mostly =
39 {
40         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
41         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
42         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
43         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
44         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45         INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
46         EVENT_CONSTRAINT_END
47 };
48
49 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
50 {
51         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
52         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
53         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
54         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
55         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
56         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
57         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
58         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
59         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
60         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
61         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
62         INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
63         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
64         EVENT_CONSTRAINT_END
65 };
66
67 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
68 {
69         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
72         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
73         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
74         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
75         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
76         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
77         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
78         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
79         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
80         EVENT_CONSTRAINT_END
81 };
82
83 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
84 {
85         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
86         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
87         INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
88         EVENT_EXTRA_END
89 };
90
91 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
92 {
93         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
94         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
95         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
96         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
97         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
98         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
99         INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
100         EVENT_CONSTRAINT_END
101 };
102
103 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
104 {
105         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
106         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
107         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
108         INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
109         INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
110         INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
111         INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
112         INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
113         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
114         INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
115         INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
116         INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
117
118         INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
119         INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
120         INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
121         INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
122
123         EVENT_CONSTRAINT_END
124 };
125
126 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
127 {
128         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
129         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
130         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
131         INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
132         INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
133         INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
134         INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
135         INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
136         INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
137         INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
138         INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
139         INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
140         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
141
142         INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
143         INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
144         INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
145         INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
146
147         EVENT_CONSTRAINT_END
148 };
149
150 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
151 {
152         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
153         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
154         INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
155         INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
156         EVENT_EXTRA_END
157 };
158
159 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
160 {
161         EVENT_CONSTRAINT_END
162 };
163
164 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
165 {
166         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
167         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
168         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
169         EVENT_CONSTRAINT_END
170 };
171
172 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
173 {
174         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
175         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
176         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
177         EVENT_CONSTRAINT_END
178 };
179
180 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
181         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
182         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
183         INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
184         INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
185         EVENT_EXTRA_END
186 };
187
188 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
189         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
190         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
191         INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
192         INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
193         EVENT_EXTRA_END
194 };
195
196 EVENT_ATTR_STR(mem-loads,       mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
197 EVENT_ATTR_STR(mem-loads,       mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
198 EVENT_ATTR_STR(mem-stores,      mem_st_snb,     "event=0xcd,umask=0x2");
199
200 struct attribute *nhm_events_attrs[] = {
201         EVENT_PTR(mem_ld_nhm),
202         NULL,
203 };
204
205 struct attribute *snb_events_attrs[] = {
206         EVENT_PTR(mem_ld_snb),
207         EVENT_PTR(mem_st_snb),
208         NULL,
209 };
210
211 static struct event_constraint intel_hsw_event_constraints[] = {
212         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
213         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
214         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
215         INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
216         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
217         INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
218         /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
219         INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
220         /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
221         INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
222         /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
223         INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
224
225         INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
226         INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
227         INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
228         INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
229
230         EVENT_CONSTRAINT_END
231 };
232
233 struct event_constraint intel_bdw_event_constraints[] = {
234         FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
235         FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
236         FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
237         INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
238         INTEL_EVENT_CONSTRAINT(0xa3, 0x4),      /* CYCLE_ACTIVITY.* */
239         EVENT_CONSTRAINT_END
240 };
241
242 static u64 intel_pmu_event_map(int hw_event)
243 {
244         return intel_perfmon_event_map[hw_event];
245 }
246
247 #define SNB_DMND_DATA_RD        (1ULL << 0)
248 #define SNB_DMND_RFO            (1ULL << 1)
249 #define SNB_DMND_IFETCH         (1ULL << 2)
250 #define SNB_DMND_WB             (1ULL << 3)
251 #define SNB_PF_DATA_RD          (1ULL << 4)
252 #define SNB_PF_RFO              (1ULL << 5)
253 #define SNB_PF_IFETCH           (1ULL << 6)
254 #define SNB_LLC_DATA_RD         (1ULL << 7)
255 #define SNB_LLC_RFO             (1ULL << 8)
256 #define SNB_LLC_IFETCH          (1ULL << 9)
257 #define SNB_BUS_LOCKS           (1ULL << 10)
258 #define SNB_STRM_ST             (1ULL << 11)
259 #define SNB_OTHER               (1ULL << 15)
260 #define SNB_RESP_ANY            (1ULL << 16)
261 #define SNB_NO_SUPP             (1ULL << 17)
262 #define SNB_LLC_HITM            (1ULL << 18)
263 #define SNB_LLC_HITE            (1ULL << 19)
264 #define SNB_LLC_HITS            (1ULL << 20)
265 #define SNB_LLC_HITF            (1ULL << 21)
266 #define SNB_LOCAL               (1ULL << 22)
267 #define SNB_REMOTE              (0xffULL << 23)
268 #define SNB_SNP_NONE            (1ULL << 31)
269 #define SNB_SNP_NOT_NEEDED      (1ULL << 32)
270 #define SNB_SNP_MISS            (1ULL << 33)
271 #define SNB_NO_FWD              (1ULL << 34)
272 #define SNB_SNP_FWD             (1ULL << 35)
273 #define SNB_HITM                (1ULL << 36)
274 #define SNB_NON_DRAM            (1ULL << 37)
275
276 #define SNB_DMND_READ           (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
277 #define SNB_DMND_WRITE          (SNB_DMND_RFO|SNB_LLC_RFO)
278 #define SNB_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
279
280 #define SNB_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
281                                  SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
282                                  SNB_HITM)
283
284 #define SNB_DRAM_ANY            (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
285 #define SNB_DRAM_REMOTE         (SNB_REMOTE|SNB_SNP_ANY)
286
287 #define SNB_L3_ACCESS           SNB_RESP_ANY
288 #define SNB_L3_MISS             (SNB_DRAM_ANY|SNB_NON_DRAM)
289
290 static __initconst const u64 snb_hw_cache_extra_regs
291                                 [PERF_COUNT_HW_CACHE_MAX]
292                                 [PERF_COUNT_HW_CACHE_OP_MAX]
293                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
294 {
295  [ C(LL  ) ] = {
296         [ C(OP_READ) ] = {
297                 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
298                 [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
299         },
300         [ C(OP_WRITE) ] = {
301                 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
302                 [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
303         },
304         [ C(OP_PREFETCH) ] = {
305                 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
306                 [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
307         },
308  },
309  [ C(NODE) ] = {
310         [ C(OP_READ) ] = {
311                 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
312                 [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
313         },
314         [ C(OP_WRITE) ] = {
315                 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
316                 [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
317         },
318         [ C(OP_PREFETCH) ] = {
319                 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
320                 [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
321         },
322  },
323 };
324
325 static __initconst const u64 snb_hw_cache_event_ids
326                                 [PERF_COUNT_HW_CACHE_MAX]
327                                 [PERF_COUNT_HW_CACHE_OP_MAX]
328                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
329 {
330  [ C(L1D) ] = {
331         [ C(OP_READ) ] = {
332                 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
333                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
334         },
335         [ C(OP_WRITE) ] = {
336                 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
337                 [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
338         },
339         [ C(OP_PREFETCH) ] = {
340                 [ C(RESULT_ACCESS) ] = 0x0,
341                 [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
342         },
343  },
344  [ C(L1I ) ] = {
345         [ C(OP_READ) ] = {
346                 [ C(RESULT_ACCESS) ] = 0x0,
347                 [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
348         },
349         [ C(OP_WRITE) ] = {
350                 [ C(RESULT_ACCESS) ] = -1,
351                 [ C(RESULT_MISS)   ] = -1,
352         },
353         [ C(OP_PREFETCH) ] = {
354                 [ C(RESULT_ACCESS) ] = 0x0,
355                 [ C(RESULT_MISS)   ] = 0x0,
356         },
357  },
358  [ C(LL  ) ] = {
359         [ C(OP_READ) ] = {
360                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
361                 [ C(RESULT_ACCESS) ] = 0x01b7,
362                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
363                 [ C(RESULT_MISS)   ] = 0x01b7,
364         },
365         [ C(OP_WRITE) ] = {
366                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
367                 [ C(RESULT_ACCESS) ] = 0x01b7,
368                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
369                 [ C(RESULT_MISS)   ] = 0x01b7,
370         },
371         [ C(OP_PREFETCH) ] = {
372                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
373                 [ C(RESULT_ACCESS) ] = 0x01b7,
374                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
375                 [ C(RESULT_MISS)   ] = 0x01b7,
376         },
377  },
378  [ C(DTLB) ] = {
379         [ C(OP_READ) ] = {
380                 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
381                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
382         },
383         [ C(OP_WRITE) ] = {
384                 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
385                 [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
386         },
387         [ C(OP_PREFETCH) ] = {
388                 [ C(RESULT_ACCESS) ] = 0x0,
389                 [ C(RESULT_MISS)   ] = 0x0,
390         },
391  },
392  [ C(ITLB) ] = {
393         [ C(OP_READ) ] = {
394                 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
395                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
396         },
397         [ C(OP_WRITE) ] = {
398                 [ C(RESULT_ACCESS) ] = -1,
399                 [ C(RESULT_MISS)   ] = -1,
400         },
401         [ C(OP_PREFETCH) ] = {
402                 [ C(RESULT_ACCESS) ] = -1,
403                 [ C(RESULT_MISS)   ] = -1,
404         },
405  },
406  [ C(BPU ) ] = {
407         [ C(OP_READ) ] = {
408                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
409                 [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
410         },
411         [ C(OP_WRITE) ] = {
412                 [ C(RESULT_ACCESS) ] = -1,
413                 [ C(RESULT_MISS)   ] = -1,
414         },
415         [ C(OP_PREFETCH) ] = {
416                 [ C(RESULT_ACCESS) ] = -1,
417                 [ C(RESULT_MISS)   ] = -1,
418         },
419  },
420  [ C(NODE) ] = {
421         [ C(OP_READ) ] = {
422                 [ C(RESULT_ACCESS) ] = 0x01b7,
423                 [ C(RESULT_MISS)   ] = 0x01b7,
424         },
425         [ C(OP_WRITE) ] = {
426                 [ C(RESULT_ACCESS) ] = 0x01b7,
427                 [ C(RESULT_MISS)   ] = 0x01b7,
428         },
429         [ C(OP_PREFETCH) ] = {
430                 [ C(RESULT_ACCESS) ] = 0x01b7,
431                 [ C(RESULT_MISS)   ] = 0x01b7,
432         },
433  },
434
435 };
436
437 /*
438  * Notes on the events:
439  * - data reads do not include code reads (comparable to earlier tables)
440  * - data counts include speculative execution (except L1 write, dtlb, bpu)
441  * - remote node access includes remote memory, remote cache, remote mmio.
442  * - prefetches are not included in the counts because they are not
443  *   reliably counted.
444  */
445
446 #define HSW_DEMAND_DATA_RD              BIT_ULL(0)
447 #define HSW_DEMAND_RFO                  BIT_ULL(1)
448 #define HSW_ANY_RESPONSE                BIT_ULL(16)
449 #define HSW_SUPPLIER_NONE               BIT_ULL(17)
450 #define HSW_L3_MISS_LOCAL_DRAM          BIT_ULL(22)
451 #define HSW_L3_MISS_REMOTE_HOP0         BIT_ULL(27)
452 #define HSW_L3_MISS_REMOTE_HOP1         BIT_ULL(28)
453 #define HSW_L3_MISS_REMOTE_HOP2P        BIT_ULL(29)
454 #define HSW_L3_MISS                     (HSW_L3_MISS_LOCAL_DRAM| \
455                                          HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
456                                          HSW_L3_MISS_REMOTE_HOP2P)
457 #define HSW_SNOOP_NONE                  BIT_ULL(31)
458 #define HSW_SNOOP_NOT_NEEDED            BIT_ULL(32)
459 #define HSW_SNOOP_MISS                  BIT_ULL(33)
460 #define HSW_SNOOP_HIT_NO_FWD            BIT_ULL(34)
461 #define HSW_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
462 #define HSW_SNOOP_HITM                  BIT_ULL(36)
463 #define HSW_SNOOP_NON_DRAM              BIT_ULL(37)
464 #define HSW_ANY_SNOOP                   (HSW_SNOOP_NONE| \
465                                          HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
466                                          HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
467                                          HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
468 #define HSW_SNOOP_DRAM                  (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
469 #define HSW_DEMAND_READ                 HSW_DEMAND_DATA_RD
470 #define HSW_DEMAND_WRITE                HSW_DEMAND_RFO
471 #define HSW_L3_MISS_REMOTE              (HSW_L3_MISS_REMOTE_HOP0|\
472                                          HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
473 #define HSW_LLC_ACCESS                  HSW_ANY_RESPONSE
474
475 #define BDW_L3_MISS_LOCAL               BIT(26)
476 #define BDW_L3_MISS                     (BDW_L3_MISS_LOCAL| \
477                                          HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
478                                          HSW_L3_MISS_REMOTE_HOP2P)
479
480
481 static __initconst const u64 hsw_hw_cache_event_ids
482                                 [PERF_COUNT_HW_CACHE_MAX]
483                                 [PERF_COUNT_HW_CACHE_OP_MAX]
484                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
485 {
486  [ C(L1D ) ] = {
487         [ C(OP_READ) ] = {
488                 [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
489                 [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
490         },
491         [ C(OP_WRITE) ] = {
492                 [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
493                 [ C(RESULT_MISS)   ] = 0x0,
494         },
495         [ C(OP_PREFETCH) ] = {
496                 [ C(RESULT_ACCESS) ] = 0x0,
497                 [ C(RESULT_MISS)   ] = 0x0,
498         },
499  },
500  [ C(L1I ) ] = {
501         [ C(OP_READ) ] = {
502                 [ C(RESULT_ACCESS) ] = 0x0,
503                 [ C(RESULT_MISS)   ] = 0x280,   /* ICACHE.MISSES */
504         },
505         [ C(OP_WRITE) ] = {
506                 [ C(RESULT_ACCESS) ] = -1,
507                 [ C(RESULT_MISS)   ] = -1,
508         },
509         [ C(OP_PREFETCH) ] = {
510                 [ C(RESULT_ACCESS) ] = 0x0,
511                 [ C(RESULT_MISS)   ] = 0x0,
512         },
513  },
514  [ C(LL  ) ] = {
515         [ C(OP_READ) ] = {
516                 [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
517                 [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
518         },
519         [ C(OP_WRITE) ] = {
520                 [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
521                 [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
522         },
523         [ C(OP_PREFETCH) ] = {
524                 [ C(RESULT_ACCESS) ] = 0x0,
525                 [ C(RESULT_MISS)   ] = 0x0,
526         },
527  },
528  [ C(DTLB) ] = {
529         [ C(OP_READ) ] = {
530                 [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
531                 [ C(RESULT_MISS)   ] = 0x108,   /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
532         },
533         [ C(OP_WRITE) ] = {
534                 [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
535                 [ C(RESULT_MISS)   ] = 0x149,   /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
536         },
537         [ C(OP_PREFETCH) ] = {
538                 [ C(RESULT_ACCESS) ] = 0x0,
539                 [ C(RESULT_MISS)   ] = 0x0,
540         },
541  },
542  [ C(ITLB) ] = {
543         [ C(OP_READ) ] = {
544                 [ C(RESULT_ACCESS) ] = 0x6085,  /* ITLB_MISSES.STLB_HIT */
545                 [ C(RESULT_MISS)   ] = 0x185,   /* ITLB_MISSES.MISS_CAUSES_A_WALK */
546         },
547         [ C(OP_WRITE) ] = {
548                 [ C(RESULT_ACCESS) ] = -1,
549                 [ C(RESULT_MISS)   ] = -1,
550         },
551         [ C(OP_PREFETCH) ] = {
552                 [ C(RESULT_ACCESS) ] = -1,
553                 [ C(RESULT_MISS)   ] = -1,
554         },
555  },
556  [ C(BPU ) ] = {
557         [ C(OP_READ) ] = {
558                 [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
559                 [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
560         },
561         [ C(OP_WRITE) ] = {
562                 [ C(RESULT_ACCESS) ] = -1,
563                 [ C(RESULT_MISS)   ] = -1,
564         },
565         [ C(OP_PREFETCH) ] = {
566                 [ C(RESULT_ACCESS) ] = -1,
567                 [ C(RESULT_MISS)   ] = -1,
568         },
569  },
570  [ C(NODE) ] = {
571         [ C(OP_READ) ] = {
572                 [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
573                 [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
574         },
575         [ C(OP_WRITE) ] = {
576                 [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
577                 [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
578         },
579         [ C(OP_PREFETCH) ] = {
580                 [ C(RESULT_ACCESS) ] = 0x0,
581                 [ C(RESULT_MISS)   ] = 0x0,
582         },
583  },
584 };
585
586 static __initconst const u64 hsw_hw_cache_extra_regs
587                                 [PERF_COUNT_HW_CACHE_MAX]
588                                 [PERF_COUNT_HW_CACHE_OP_MAX]
589                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
590 {
591  [ C(LL  ) ] = {
592         [ C(OP_READ) ] = {
593                 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
594                                        HSW_LLC_ACCESS,
595                 [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
596                                        HSW_L3_MISS|HSW_ANY_SNOOP,
597         },
598         [ C(OP_WRITE) ] = {
599                 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
600                                        HSW_LLC_ACCESS,
601                 [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
602                                        HSW_L3_MISS|HSW_ANY_SNOOP,
603         },
604         [ C(OP_PREFETCH) ] = {
605                 [ C(RESULT_ACCESS) ] = 0x0,
606                 [ C(RESULT_MISS)   ] = 0x0,
607         },
608  },
609  [ C(NODE) ] = {
610         [ C(OP_READ) ] = {
611                 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
612                                        HSW_L3_MISS_LOCAL_DRAM|
613                                        HSW_SNOOP_DRAM,
614                 [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
615                                        HSW_L3_MISS_REMOTE|
616                                        HSW_SNOOP_DRAM,
617         },
618         [ C(OP_WRITE) ] = {
619                 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
620                                        HSW_L3_MISS_LOCAL_DRAM|
621                                        HSW_SNOOP_DRAM,
622                 [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
623                                        HSW_L3_MISS_REMOTE|
624                                        HSW_SNOOP_DRAM,
625         },
626         [ C(OP_PREFETCH) ] = {
627                 [ C(RESULT_ACCESS) ] = 0x0,
628                 [ C(RESULT_MISS)   ] = 0x0,
629         },
630  },
631 };
632
633 static __initconst const u64 westmere_hw_cache_event_ids
634                                 [PERF_COUNT_HW_CACHE_MAX]
635                                 [PERF_COUNT_HW_CACHE_OP_MAX]
636                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
637 {
638  [ C(L1D) ] = {
639         [ C(OP_READ) ] = {
640                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
641                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
642         },
643         [ C(OP_WRITE) ] = {
644                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
645                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
646         },
647         [ C(OP_PREFETCH) ] = {
648                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
649                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
650         },
651  },
652  [ C(L1I ) ] = {
653         [ C(OP_READ) ] = {
654                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
655                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
656         },
657         [ C(OP_WRITE) ] = {
658                 [ C(RESULT_ACCESS) ] = -1,
659                 [ C(RESULT_MISS)   ] = -1,
660         },
661         [ C(OP_PREFETCH) ] = {
662                 [ C(RESULT_ACCESS) ] = 0x0,
663                 [ C(RESULT_MISS)   ] = 0x0,
664         },
665  },
666  [ C(LL  ) ] = {
667         [ C(OP_READ) ] = {
668                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
669                 [ C(RESULT_ACCESS) ] = 0x01b7,
670                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
671                 [ C(RESULT_MISS)   ] = 0x01b7,
672         },
673         /*
674          * Use RFO, not WRITEBACK, because a write miss would typically occur
675          * on RFO.
676          */
677         [ C(OP_WRITE) ] = {
678                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
679                 [ C(RESULT_ACCESS) ] = 0x01b7,
680                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
681                 [ C(RESULT_MISS)   ] = 0x01b7,
682         },
683         [ C(OP_PREFETCH) ] = {
684                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
685                 [ C(RESULT_ACCESS) ] = 0x01b7,
686                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
687                 [ C(RESULT_MISS)   ] = 0x01b7,
688         },
689  },
690  [ C(DTLB) ] = {
691         [ C(OP_READ) ] = {
692                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
693                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
694         },
695         [ C(OP_WRITE) ] = {
696                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
697                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
698         },
699         [ C(OP_PREFETCH) ] = {
700                 [ C(RESULT_ACCESS) ] = 0x0,
701                 [ C(RESULT_MISS)   ] = 0x0,
702         },
703  },
704  [ C(ITLB) ] = {
705         [ C(OP_READ) ] = {
706                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
707                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
708         },
709         [ C(OP_WRITE) ] = {
710                 [ C(RESULT_ACCESS) ] = -1,
711                 [ C(RESULT_MISS)   ] = -1,
712         },
713         [ C(OP_PREFETCH) ] = {
714                 [ C(RESULT_ACCESS) ] = -1,
715                 [ C(RESULT_MISS)   ] = -1,
716         },
717  },
718  [ C(BPU ) ] = {
719         [ C(OP_READ) ] = {
720                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
721                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
722         },
723         [ C(OP_WRITE) ] = {
724                 [ C(RESULT_ACCESS) ] = -1,
725                 [ C(RESULT_MISS)   ] = -1,
726         },
727         [ C(OP_PREFETCH) ] = {
728                 [ C(RESULT_ACCESS) ] = -1,
729                 [ C(RESULT_MISS)   ] = -1,
730         },
731  },
732  [ C(NODE) ] = {
733         [ C(OP_READ) ] = {
734                 [ C(RESULT_ACCESS) ] = 0x01b7,
735                 [ C(RESULT_MISS)   ] = 0x01b7,
736         },
737         [ C(OP_WRITE) ] = {
738                 [ C(RESULT_ACCESS) ] = 0x01b7,
739                 [ C(RESULT_MISS)   ] = 0x01b7,
740         },
741         [ C(OP_PREFETCH) ] = {
742                 [ C(RESULT_ACCESS) ] = 0x01b7,
743                 [ C(RESULT_MISS)   ] = 0x01b7,
744         },
745  },
746 };
747
748 /*
749  * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
750  * See IA32 SDM Vol 3B 30.6.1.3
751  */
752
753 #define NHM_DMND_DATA_RD        (1 << 0)
754 #define NHM_DMND_RFO            (1 << 1)
755 #define NHM_DMND_IFETCH         (1 << 2)
756 #define NHM_DMND_WB             (1 << 3)
757 #define NHM_PF_DATA_RD          (1 << 4)
758 #define NHM_PF_DATA_RFO         (1 << 5)
759 #define NHM_PF_IFETCH           (1 << 6)
760 #define NHM_OFFCORE_OTHER       (1 << 7)
761 #define NHM_UNCORE_HIT          (1 << 8)
762 #define NHM_OTHER_CORE_HIT_SNP  (1 << 9)
763 #define NHM_OTHER_CORE_HITM     (1 << 10)
764                                 /* reserved */
765 #define NHM_REMOTE_CACHE_FWD    (1 << 12)
766 #define NHM_REMOTE_DRAM         (1 << 13)
767 #define NHM_LOCAL_DRAM          (1 << 14)
768 #define NHM_NON_DRAM            (1 << 15)
769
770 #define NHM_LOCAL               (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
771 #define NHM_REMOTE              (NHM_REMOTE_DRAM)
772
773 #define NHM_DMND_READ           (NHM_DMND_DATA_RD)
774 #define NHM_DMND_WRITE          (NHM_DMND_RFO|NHM_DMND_WB)
775 #define NHM_DMND_PREFETCH       (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
776
777 #define NHM_L3_HIT      (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
778 #define NHM_L3_MISS     (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
779 #define NHM_L3_ACCESS   (NHM_L3_HIT|NHM_L3_MISS)
780
781 static __initconst const u64 nehalem_hw_cache_extra_regs
782                                 [PERF_COUNT_HW_CACHE_MAX]
783                                 [PERF_COUNT_HW_CACHE_OP_MAX]
784                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
785 {
786  [ C(LL  ) ] = {
787         [ C(OP_READ) ] = {
788                 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
789                 [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
790         },
791         [ C(OP_WRITE) ] = {
792                 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
793                 [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
794         },
795         [ C(OP_PREFETCH) ] = {
796                 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
797                 [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
798         },
799  },
800  [ C(NODE) ] = {
801         [ C(OP_READ) ] = {
802                 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
803                 [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
804         },
805         [ C(OP_WRITE) ] = {
806                 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
807                 [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
808         },
809         [ C(OP_PREFETCH) ] = {
810                 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
811                 [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
812         },
813  },
814 };
815
816 static __initconst const u64 nehalem_hw_cache_event_ids
817                                 [PERF_COUNT_HW_CACHE_MAX]
818                                 [PERF_COUNT_HW_CACHE_OP_MAX]
819                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
820 {
821  [ C(L1D) ] = {
822         [ C(OP_READ) ] = {
823                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
824                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
825         },
826         [ C(OP_WRITE) ] = {
827                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
828                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
829         },
830         [ C(OP_PREFETCH) ] = {
831                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
832                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
833         },
834  },
835  [ C(L1I ) ] = {
836         [ C(OP_READ) ] = {
837                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
838                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
839         },
840         [ C(OP_WRITE) ] = {
841                 [ C(RESULT_ACCESS) ] = -1,
842                 [ C(RESULT_MISS)   ] = -1,
843         },
844         [ C(OP_PREFETCH) ] = {
845                 [ C(RESULT_ACCESS) ] = 0x0,
846                 [ C(RESULT_MISS)   ] = 0x0,
847         },
848  },
849  [ C(LL  ) ] = {
850         [ C(OP_READ) ] = {
851                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
852                 [ C(RESULT_ACCESS) ] = 0x01b7,
853                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
854                 [ C(RESULT_MISS)   ] = 0x01b7,
855         },
856         /*
857          * Use RFO, not WRITEBACK, because a write miss would typically occur
858          * on RFO.
859          */
860         [ C(OP_WRITE) ] = {
861                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
862                 [ C(RESULT_ACCESS) ] = 0x01b7,
863                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
864                 [ C(RESULT_MISS)   ] = 0x01b7,
865         },
866         [ C(OP_PREFETCH) ] = {
867                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
868                 [ C(RESULT_ACCESS) ] = 0x01b7,
869                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
870                 [ C(RESULT_MISS)   ] = 0x01b7,
871         },
872  },
873  [ C(DTLB) ] = {
874         [ C(OP_READ) ] = {
875                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
876                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
877         },
878         [ C(OP_WRITE) ] = {
879                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
880                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
881         },
882         [ C(OP_PREFETCH) ] = {
883                 [ C(RESULT_ACCESS) ] = 0x0,
884                 [ C(RESULT_MISS)   ] = 0x0,
885         },
886  },
887  [ C(ITLB) ] = {
888         [ C(OP_READ) ] = {
889                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
890                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
891         },
892         [ C(OP_WRITE) ] = {
893                 [ C(RESULT_ACCESS) ] = -1,
894                 [ C(RESULT_MISS)   ] = -1,
895         },
896         [ C(OP_PREFETCH) ] = {
897                 [ C(RESULT_ACCESS) ] = -1,
898                 [ C(RESULT_MISS)   ] = -1,
899         },
900  },
901  [ C(BPU ) ] = {
902         [ C(OP_READ) ] = {
903                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
904                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
905         },
906         [ C(OP_WRITE) ] = {
907                 [ C(RESULT_ACCESS) ] = -1,
908                 [ C(RESULT_MISS)   ] = -1,
909         },
910         [ C(OP_PREFETCH) ] = {
911                 [ C(RESULT_ACCESS) ] = -1,
912                 [ C(RESULT_MISS)   ] = -1,
913         },
914  },
915  [ C(NODE) ] = {
916         [ C(OP_READ) ] = {
917                 [ C(RESULT_ACCESS) ] = 0x01b7,
918                 [ C(RESULT_MISS)   ] = 0x01b7,
919         },
920         [ C(OP_WRITE) ] = {
921                 [ C(RESULT_ACCESS) ] = 0x01b7,
922                 [ C(RESULT_MISS)   ] = 0x01b7,
923         },
924         [ C(OP_PREFETCH) ] = {
925                 [ C(RESULT_ACCESS) ] = 0x01b7,
926                 [ C(RESULT_MISS)   ] = 0x01b7,
927         },
928  },
929 };
930
931 static __initconst const u64 core2_hw_cache_event_ids
932                                 [PERF_COUNT_HW_CACHE_MAX]
933                                 [PERF_COUNT_HW_CACHE_OP_MAX]
934                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
935 {
936  [ C(L1D) ] = {
937         [ C(OP_READ) ] = {
938                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
939                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
940         },
941         [ C(OP_WRITE) ] = {
942                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
943                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
944         },
945         [ C(OP_PREFETCH) ] = {
946                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
947                 [ C(RESULT_MISS)   ] = 0,
948         },
949  },
950  [ C(L1I ) ] = {
951         [ C(OP_READ) ] = {
952                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
953                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
954         },
955         [ C(OP_WRITE) ] = {
956                 [ C(RESULT_ACCESS) ] = -1,
957                 [ C(RESULT_MISS)   ] = -1,
958         },
959         [ C(OP_PREFETCH) ] = {
960                 [ C(RESULT_ACCESS) ] = 0,
961                 [ C(RESULT_MISS)   ] = 0,
962         },
963  },
964  [ C(LL  ) ] = {
965         [ C(OP_READ) ] = {
966                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
967                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
968         },
969         [ C(OP_WRITE) ] = {
970                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
971                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
972         },
973         [ C(OP_PREFETCH) ] = {
974                 [ C(RESULT_ACCESS) ] = 0,
975                 [ C(RESULT_MISS)   ] = 0,
976         },
977  },
978  [ C(DTLB) ] = {
979         [ C(OP_READ) ] = {
980                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
981                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
982         },
983         [ C(OP_WRITE) ] = {
984                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
985                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
986         },
987         [ C(OP_PREFETCH) ] = {
988                 [ C(RESULT_ACCESS) ] = 0,
989                 [ C(RESULT_MISS)   ] = 0,
990         },
991  },
992  [ C(ITLB) ] = {
993         [ C(OP_READ) ] = {
994                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
995                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
996         },
997         [ C(OP_WRITE) ] = {
998                 [ C(RESULT_ACCESS) ] = -1,
999                 [ C(RESULT_MISS)   ] = -1,
1000         },
1001         [ C(OP_PREFETCH) ] = {
1002                 [ C(RESULT_ACCESS) ] = -1,
1003                 [ C(RESULT_MISS)   ] = -1,
1004         },
1005  },
1006  [ C(BPU ) ] = {
1007         [ C(OP_READ) ] = {
1008                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1009                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1010         },
1011         [ C(OP_WRITE) ] = {
1012                 [ C(RESULT_ACCESS) ] = -1,
1013                 [ C(RESULT_MISS)   ] = -1,
1014         },
1015         [ C(OP_PREFETCH) ] = {
1016                 [ C(RESULT_ACCESS) ] = -1,
1017                 [ C(RESULT_MISS)   ] = -1,
1018         },
1019  },
1020 };
1021
1022 static __initconst const u64 atom_hw_cache_event_ids
1023                                 [PERF_COUNT_HW_CACHE_MAX]
1024                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1025                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1026 {
1027  [ C(L1D) ] = {
1028         [ C(OP_READ) ] = {
1029                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1030                 [ C(RESULT_MISS)   ] = 0,
1031         },
1032         [ C(OP_WRITE) ] = {
1033                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1034                 [ C(RESULT_MISS)   ] = 0,
1035         },
1036         [ C(OP_PREFETCH) ] = {
1037                 [ C(RESULT_ACCESS) ] = 0x0,
1038                 [ C(RESULT_MISS)   ] = 0,
1039         },
1040  },
1041  [ C(L1I ) ] = {
1042         [ C(OP_READ) ] = {
1043                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1044                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1045         },
1046         [ C(OP_WRITE) ] = {
1047                 [ C(RESULT_ACCESS) ] = -1,
1048                 [ C(RESULT_MISS)   ] = -1,
1049         },
1050         [ C(OP_PREFETCH) ] = {
1051                 [ C(RESULT_ACCESS) ] = 0,
1052                 [ C(RESULT_MISS)   ] = 0,
1053         },
1054  },
1055  [ C(LL  ) ] = {
1056         [ C(OP_READ) ] = {
1057                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1058                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1059         },
1060         [ C(OP_WRITE) ] = {
1061                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1062                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1063         },
1064         [ C(OP_PREFETCH) ] = {
1065                 [ C(RESULT_ACCESS) ] = 0,
1066                 [ C(RESULT_MISS)   ] = 0,
1067         },
1068  },
1069  [ C(DTLB) ] = {
1070         [ C(OP_READ) ] = {
1071                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1072                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1073         },
1074         [ C(OP_WRITE) ] = {
1075                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1076                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1077         },
1078         [ C(OP_PREFETCH) ] = {
1079                 [ C(RESULT_ACCESS) ] = 0,
1080                 [ C(RESULT_MISS)   ] = 0,
1081         },
1082  },
1083  [ C(ITLB) ] = {
1084         [ C(OP_READ) ] = {
1085                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1086                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1087         },
1088         [ C(OP_WRITE) ] = {
1089                 [ C(RESULT_ACCESS) ] = -1,
1090                 [ C(RESULT_MISS)   ] = -1,
1091         },
1092         [ C(OP_PREFETCH) ] = {
1093                 [ C(RESULT_ACCESS) ] = -1,
1094                 [ C(RESULT_MISS)   ] = -1,
1095         },
1096  },
1097  [ C(BPU ) ] = {
1098         [ C(OP_READ) ] = {
1099                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1100                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1101         },
1102         [ C(OP_WRITE) ] = {
1103                 [ C(RESULT_ACCESS) ] = -1,
1104                 [ C(RESULT_MISS)   ] = -1,
1105         },
1106         [ C(OP_PREFETCH) ] = {
1107                 [ C(RESULT_ACCESS) ] = -1,
1108                 [ C(RESULT_MISS)   ] = -1,
1109         },
1110  },
1111 };
1112
1113 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1114 {
1115         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1116         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1117         INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1118         EVENT_EXTRA_END
1119 };
1120
1121 #define SLM_DMND_READ           SNB_DMND_DATA_RD
1122 #define SLM_DMND_WRITE          SNB_DMND_RFO
1123 #define SLM_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
1124
1125 #define SLM_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1126 #define SLM_LLC_ACCESS          SNB_RESP_ANY
1127 #define SLM_LLC_MISS            (SLM_SNP_ANY|SNB_NON_DRAM)
1128
1129 static __initconst const u64 slm_hw_cache_extra_regs
1130                                 [PERF_COUNT_HW_CACHE_MAX]
1131                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1132                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1133 {
1134  [ C(LL  ) ] = {
1135         [ C(OP_READ) ] = {
1136                 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1137                 [ C(RESULT_MISS)   ] = 0,
1138         },
1139         [ C(OP_WRITE) ] = {
1140                 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1141                 [ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1142         },
1143         [ C(OP_PREFETCH) ] = {
1144                 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1145                 [ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1146         },
1147  },
1148 };
1149
1150 static __initconst const u64 slm_hw_cache_event_ids
1151                                 [PERF_COUNT_HW_CACHE_MAX]
1152                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1153                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1154 {
1155  [ C(L1D) ] = {
1156         [ C(OP_READ) ] = {
1157                 [ C(RESULT_ACCESS) ] = 0,
1158                 [ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1159         },
1160         [ C(OP_WRITE) ] = {
1161                 [ C(RESULT_ACCESS) ] = 0,
1162                 [ C(RESULT_MISS)   ] = 0,
1163         },
1164         [ C(OP_PREFETCH) ] = {
1165                 [ C(RESULT_ACCESS) ] = 0,
1166                 [ C(RESULT_MISS)   ] = 0,
1167         },
1168  },
1169  [ C(L1I ) ] = {
1170         [ C(OP_READ) ] = {
1171                 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1172                 [ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1173         },
1174         [ C(OP_WRITE) ] = {
1175                 [ C(RESULT_ACCESS) ] = -1,
1176                 [ C(RESULT_MISS)   ] = -1,
1177         },
1178         [ C(OP_PREFETCH) ] = {
1179                 [ C(RESULT_ACCESS) ] = 0,
1180                 [ C(RESULT_MISS)   ] = 0,
1181         },
1182  },
1183  [ C(LL  ) ] = {
1184         [ C(OP_READ) ] = {
1185                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1186                 [ C(RESULT_ACCESS) ] = 0x01b7,
1187                 [ C(RESULT_MISS)   ] = 0,
1188         },
1189         [ C(OP_WRITE) ] = {
1190                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1191                 [ C(RESULT_ACCESS) ] = 0x01b7,
1192                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1193                 [ C(RESULT_MISS)   ] = 0x01b7,
1194         },
1195         [ C(OP_PREFETCH) ] = {
1196                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1197                 [ C(RESULT_ACCESS) ] = 0x01b7,
1198                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1199                 [ C(RESULT_MISS)   ] = 0x01b7,
1200         },
1201  },
1202  [ C(DTLB) ] = {
1203         [ C(OP_READ) ] = {
1204                 [ C(RESULT_ACCESS) ] = 0,
1205                 [ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1206         },
1207         [ C(OP_WRITE) ] = {
1208                 [ C(RESULT_ACCESS) ] = 0,
1209                 [ C(RESULT_MISS)   ] = 0,
1210         },
1211         [ C(OP_PREFETCH) ] = {
1212                 [ C(RESULT_ACCESS) ] = 0,
1213                 [ C(RESULT_MISS)   ] = 0,
1214         },
1215  },
1216  [ C(ITLB) ] = {
1217         [ C(OP_READ) ] = {
1218                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1219                 [ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1220         },
1221         [ C(OP_WRITE) ] = {
1222                 [ C(RESULT_ACCESS) ] = -1,
1223                 [ C(RESULT_MISS)   ] = -1,
1224         },
1225         [ C(OP_PREFETCH) ] = {
1226                 [ C(RESULT_ACCESS) ] = -1,
1227                 [ C(RESULT_MISS)   ] = -1,
1228         },
1229  },
1230  [ C(BPU ) ] = {
1231         [ C(OP_READ) ] = {
1232                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1233                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1234         },
1235         [ C(OP_WRITE) ] = {
1236                 [ C(RESULT_ACCESS) ] = -1,
1237                 [ C(RESULT_MISS)   ] = -1,
1238         },
1239         [ C(OP_PREFETCH) ] = {
1240                 [ C(RESULT_ACCESS) ] = -1,
1241                 [ C(RESULT_MISS)   ] = -1,
1242         },
1243  },
1244 };
1245
1246 /*
1247  * Use from PMIs where the LBRs are already disabled.
1248  */
1249 static void __intel_pmu_disable_all(void)
1250 {
1251         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1252
1253         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1254
1255         if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1256                 intel_pmu_disable_bts();
1257         else
1258                 intel_bts_disable_local();
1259
1260         intel_pmu_pebs_disable_all();
1261 }
1262
1263 static void intel_pmu_disable_all(void)
1264 {
1265         __intel_pmu_disable_all();
1266         intel_pmu_lbr_disable_all();
1267 }
1268
1269 static void __intel_pmu_enable_all(int added, bool pmi)
1270 {
1271         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1272
1273         intel_pmu_pebs_enable_all();
1274         intel_pmu_lbr_enable_all(pmi);
1275         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1276                         x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1277
1278         if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1279                 struct perf_event *event =
1280                         cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1281
1282                 if (WARN_ON_ONCE(!event))
1283                         return;
1284
1285                 intel_pmu_enable_bts(event->hw.config);
1286         } else
1287                 intel_bts_enable_local();
1288 }
1289
1290 static void intel_pmu_enable_all(int added)
1291 {
1292         __intel_pmu_enable_all(added, false);
1293 }
1294
1295 /*
1296  * Workaround for:
1297  *   Intel Errata AAK100 (model 26)
1298  *   Intel Errata AAP53  (model 30)
1299  *   Intel Errata BD53   (model 44)
1300  *
1301  * The official story:
1302  *   These chips need to be 'reset' when adding counters by programming the
1303  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1304  *   in sequence on the same PMC or on different PMCs.
1305  *
1306  * In practise it appears some of these events do in fact count, and
1307  * we need to programm all 4 events.
1308  */
1309 static void intel_pmu_nhm_workaround(void)
1310 {
1311         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1312         static const unsigned long nhm_magic[4] = {
1313                 0x4300B5,
1314                 0x4300D2,
1315                 0x4300B1,
1316                 0x4300B1
1317         };
1318         struct perf_event *event;
1319         int i;
1320
1321         /*
1322          * The Errata requires below steps:
1323          * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1324          * 2) Configure 4 PERFEVTSELx with the magic events and clear
1325          *    the corresponding PMCx;
1326          * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1327          * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1328          * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1329          */
1330
1331         /*
1332          * The real steps we choose are a little different from above.
1333          * A) To reduce MSR operations, we don't run step 1) as they
1334          *    are already cleared before this function is called;
1335          * B) Call x86_perf_event_update to save PMCx before configuring
1336          *    PERFEVTSELx with magic number;
1337          * C) With step 5), we do clear only when the PERFEVTSELx is
1338          *    not used currently.
1339          * D) Call x86_perf_event_set_period to restore PMCx;
1340          */
1341
1342         /* We always operate 4 pairs of PERF Counters */
1343         for (i = 0; i < 4; i++) {
1344                 event = cpuc->events[i];
1345                 if (event)
1346                         x86_perf_event_update(event);
1347         }
1348
1349         for (i = 0; i < 4; i++) {
1350                 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1351                 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1352         }
1353
1354         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1355         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1356
1357         for (i = 0; i < 4; i++) {
1358                 event = cpuc->events[i];
1359
1360                 if (event) {
1361                         x86_perf_event_set_period(event);
1362                         __x86_pmu_enable_event(&event->hw,
1363                                         ARCH_PERFMON_EVENTSEL_ENABLE);
1364                 } else
1365                         wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1366         }
1367 }
1368
1369 static void intel_pmu_nhm_enable_all(int added)
1370 {
1371         if (added)
1372                 intel_pmu_nhm_workaround();
1373         intel_pmu_enable_all(added);
1374 }
1375
1376 static inline u64 intel_pmu_get_status(void)
1377 {
1378         u64 status;
1379
1380         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1381
1382         return status;
1383 }
1384
1385 static inline void intel_pmu_ack_status(u64 ack)
1386 {
1387         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1388 }
1389
1390 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1391 {
1392         int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1393         u64 ctrl_val, mask;
1394
1395         mask = 0xfULL << (idx * 4);
1396
1397         rdmsrl(hwc->config_base, ctrl_val);
1398         ctrl_val &= ~mask;
1399         wrmsrl(hwc->config_base, ctrl_val);
1400 }
1401
1402 static inline bool event_is_checkpointed(struct perf_event *event)
1403 {
1404         return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1405 }
1406
1407 static void intel_pmu_disable_event(struct perf_event *event)
1408 {
1409         struct hw_perf_event *hwc = &event->hw;
1410         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1411
1412         if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1413                 intel_pmu_disable_bts();
1414                 intel_pmu_drain_bts_buffer();
1415                 return;
1416         }
1417
1418         cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1419         cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1420         cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1421
1422         /*
1423          * must disable before any actual event
1424          * because any event may be combined with LBR
1425          */
1426         if (needs_branch_stack(event))
1427                 intel_pmu_lbr_disable(event);
1428
1429         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1430                 intel_pmu_disable_fixed(hwc);
1431                 return;
1432         }
1433
1434         x86_pmu_disable_event(event);
1435
1436         if (unlikely(event->attr.precise_ip))
1437                 intel_pmu_pebs_disable(event);
1438 }
1439
1440 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1441 {
1442         int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1443         u64 ctrl_val, bits, mask;
1444
1445         /*
1446          * Enable IRQ generation (0x8),
1447          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1448          * if requested:
1449          */
1450         bits = 0x8ULL;
1451         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1452                 bits |= 0x2;
1453         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1454                 bits |= 0x1;
1455
1456         /*
1457          * ANY bit is supported in v3 and up
1458          */
1459         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1460                 bits |= 0x4;
1461
1462         bits <<= (idx * 4);
1463         mask = 0xfULL << (idx * 4);
1464
1465         rdmsrl(hwc->config_base, ctrl_val);
1466         ctrl_val &= ~mask;
1467         ctrl_val |= bits;
1468         wrmsrl(hwc->config_base, ctrl_val);
1469 }
1470
1471 static void intel_pmu_enable_event(struct perf_event *event)
1472 {
1473         struct hw_perf_event *hwc = &event->hw;
1474         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1475
1476         if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1477                 if (!__this_cpu_read(cpu_hw_events.enabled))
1478                         return;
1479
1480                 intel_pmu_enable_bts(hwc->config);
1481                 return;
1482         }
1483         /*
1484          * must enabled before any actual event
1485          * because any event may be combined with LBR
1486          */
1487         if (needs_branch_stack(event))
1488                 intel_pmu_lbr_enable(event);
1489
1490         if (event->attr.exclude_host)
1491                 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1492         if (event->attr.exclude_guest)
1493                 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1494
1495         if (unlikely(event_is_checkpointed(event)))
1496                 cpuc->intel_cp_status |= (1ull << hwc->idx);
1497
1498         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1499                 intel_pmu_enable_fixed(hwc);
1500                 return;
1501         }
1502
1503         if (unlikely(event->attr.precise_ip))
1504                 intel_pmu_pebs_enable(event);
1505
1506         __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1507 }
1508
1509 /*
1510  * Save and restart an expired event. Called by NMI contexts,
1511  * so it has to be careful about preempting normal event ops:
1512  */
1513 int intel_pmu_save_and_restart(struct perf_event *event)
1514 {
1515         x86_perf_event_update(event);
1516         /*
1517          * For a checkpointed counter always reset back to 0.  This
1518          * avoids a situation where the counter overflows, aborts the
1519          * transaction and is then set back to shortly before the
1520          * overflow, and overflows and aborts again.
1521          */
1522         if (unlikely(event_is_checkpointed(event))) {
1523                 /* No race with NMIs because the counter should not be armed */
1524                 wrmsrl(event->hw.event_base, 0);
1525                 local64_set(&event->hw.prev_count, 0);
1526         }
1527         return x86_perf_event_set_period(event);
1528 }
1529
1530 static void intel_pmu_reset(void)
1531 {
1532         struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
1533         unsigned long flags;
1534         int idx;
1535
1536         if (!x86_pmu.num_counters)
1537                 return;
1538
1539         local_irq_save(flags);
1540
1541         pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1542
1543         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1544                 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1545                 wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
1546         }
1547         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1548                 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1549
1550         if (ds)
1551                 ds->bts_index = ds->bts_buffer_base;
1552
1553         /* Ack all overflows and disable fixed counters */
1554         if (x86_pmu.version >= 2) {
1555                 intel_pmu_ack_status(intel_pmu_get_status());
1556                 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1557         }
1558
1559         /* Reset LBRs and LBR freezing */
1560         if (x86_pmu.lbr_nr) {
1561                 update_debugctlmsr(get_debugctlmsr() &
1562                         ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
1563         }
1564
1565         local_irq_restore(flags);
1566 }
1567
1568 /*
1569  * This handler is triggered by the local APIC, so the APIC IRQ handling
1570  * rules apply:
1571  */
1572 static int intel_pmu_handle_irq(struct pt_regs *regs)
1573 {
1574         struct perf_sample_data data;
1575         struct cpu_hw_events *cpuc;
1576         int bit, loops;
1577         u64 status;
1578         int handled;
1579
1580         cpuc = this_cpu_ptr(&cpu_hw_events);
1581
1582         /*
1583          * No known reason to not always do late ACK,
1584          * but just in case do it opt-in.
1585          */
1586         if (!x86_pmu.late_ack)
1587                 apic_write(APIC_LVTPC, APIC_DM_NMI);
1588         __intel_pmu_disable_all();
1589         handled = intel_pmu_drain_bts_buffer();
1590         handled += intel_bts_interrupt();
1591         status = intel_pmu_get_status();
1592         if (!status)
1593                 goto done;
1594
1595         loops = 0;
1596 again:
1597         intel_pmu_lbr_read();
1598         intel_pmu_ack_status(status);
1599         if (++loops > 100) {
1600                 static bool warned = false;
1601                 if (!warned) {
1602                         WARN(1, "perfevents: irq loop stuck!\n");
1603                         perf_event_print_debug();
1604                         warned = true;
1605                 }
1606                 intel_pmu_reset();
1607                 goto done;
1608         }
1609
1610         inc_irq_stat(apic_perf_irqs);
1611
1612
1613         /*
1614          * Ignore a range of extra bits in status that do not indicate
1615          * overflow by themselves.
1616          */
1617         status &= ~(GLOBAL_STATUS_COND_CHG |
1618                     GLOBAL_STATUS_ASIF |
1619                     GLOBAL_STATUS_LBRS_FROZEN);
1620         if (!status)
1621                 goto done;
1622
1623         /*
1624          * PEBS overflow sets bit 62 in the global status register
1625          */
1626         if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1627                 handled++;
1628                 x86_pmu.drain_pebs(regs);
1629         }
1630
1631         /*
1632          * Intel PT
1633          */
1634         if (__test_and_clear_bit(55, (unsigned long *)&status)) {
1635                 handled++;
1636                 intel_pt_interrupt();
1637         }
1638
1639         /*
1640          * Checkpointed counters can lead to 'spurious' PMIs because the
1641          * rollback caused by the PMI will have cleared the overflow status
1642          * bit. Therefore always force probe these counters.
1643          */
1644         status |= cpuc->intel_cp_status;
1645
1646         for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1647                 struct perf_event *event = cpuc->events[bit];
1648
1649                 handled++;
1650
1651                 if (!test_bit(bit, cpuc->active_mask))
1652                         continue;
1653
1654                 if (!intel_pmu_save_and_restart(event))
1655                         continue;
1656
1657                 perf_sample_data_init(&data, 0, event->hw.last_period);
1658
1659                 if (has_branch_stack(event))
1660                         data.br_stack = &cpuc->lbr_stack;
1661
1662                 if (perf_event_overflow(event, &data, regs))
1663                         x86_pmu_stop(event, 0);
1664         }
1665
1666         /*
1667          * Repeat if there is more work to be done:
1668          */
1669         status = intel_pmu_get_status();
1670         if (status)
1671                 goto again;
1672
1673 done:
1674         __intel_pmu_enable_all(0, true);
1675         /*
1676          * Only unmask the NMI after the overflow counters
1677          * have been reset. This avoids spurious NMIs on
1678          * Haswell CPUs.
1679          */
1680         if (x86_pmu.late_ack)
1681                 apic_write(APIC_LVTPC, APIC_DM_NMI);
1682         return handled;
1683 }
1684
1685 static struct event_constraint *
1686 intel_bts_constraints(struct perf_event *event)
1687 {
1688         struct hw_perf_event *hwc = &event->hw;
1689         unsigned int hw_event, bts_event;
1690
1691         if (event->attr.freq)
1692                 return NULL;
1693
1694         hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1695         bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1696
1697         if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1698                 return &bts_constraint;
1699
1700         return NULL;
1701 }
1702
1703 static int intel_alt_er(int idx, u64 config)
1704 {
1705         int alt_idx;
1706         if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
1707                 return idx;
1708
1709         if (idx == EXTRA_REG_RSP_0)
1710                 alt_idx = EXTRA_REG_RSP_1;
1711
1712         if (idx == EXTRA_REG_RSP_1)
1713                 alt_idx = EXTRA_REG_RSP_0;
1714
1715         if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
1716                 return idx;
1717
1718         return alt_idx;
1719 }
1720
1721 static void intel_fixup_er(struct perf_event *event, int idx)
1722 {
1723         event->hw.extra_reg.idx = idx;
1724
1725         if (idx == EXTRA_REG_RSP_0) {
1726                 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1727                 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
1728                 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1729         } else if (idx == EXTRA_REG_RSP_1) {
1730                 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1731                 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
1732                 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1733         }
1734 }
1735
1736 /*
1737  * manage allocation of shared extra msr for certain events
1738  *
1739  * sharing can be:
1740  * per-cpu: to be shared between the various events on a single PMU
1741  * per-core: per-cpu + shared by HT threads
1742  */
1743 static struct event_constraint *
1744 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1745                                    struct perf_event *event,
1746                                    struct hw_perf_event_extra *reg)
1747 {
1748         struct event_constraint *c = &emptyconstraint;
1749         struct er_account *era;
1750         unsigned long flags;
1751         int idx = reg->idx;
1752
1753         /*
1754          * reg->alloc can be set due to existing state, so for fake cpuc we
1755          * need to ignore this, otherwise we might fail to allocate proper fake
1756          * state for this extra reg constraint. Also see the comment below.
1757          */
1758         if (reg->alloc && !cpuc->is_fake)
1759                 return NULL; /* call x86_get_event_constraint() */
1760
1761 again:
1762         era = &cpuc->shared_regs->regs[idx];
1763         /*
1764          * we use spin_lock_irqsave() to avoid lockdep issues when
1765          * passing a fake cpuc
1766          */
1767         raw_spin_lock_irqsave(&era->lock, flags);
1768
1769         if (!atomic_read(&era->ref) || era->config == reg->config) {
1770
1771                 /*
1772                  * If its a fake cpuc -- as per validate_{group,event}() we
1773                  * shouldn't touch event state and we can avoid doing so
1774                  * since both will only call get_event_constraints() once
1775                  * on each event, this avoids the need for reg->alloc.
1776                  *
1777                  * Not doing the ER fixup will only result in era->reg being
1778                  * wrong, but since we won't actually try and program hardware
1779                  * this isn't a problem either.
1780                  */
1781                 if (!cpuc->is_fake) {
1782                         if (idx != reg->idx)
1783                                 intel_fixup_er(event, idx);
1784
1785                         /*
1786                          * x86_schedule_events() can call get_event_constraints()
1787                          * multiple times on events in the case of incremental
1788                          * scheduling(). reg->alloc ensures we only do the ER
1789                          * allocation once.
1790                          */
1791                         reg->alloc = 1;
1792                 }
1793
1794                 /* lock in msr value */
1795                 era->config = reg->config;
1796                 era->reg = reg->reg;
1797
1798                 /* one more user */
1799                 atomic_inc(&era->ref);
1800
1801                 /*
1802                  * need to call x86_get_event_constraint()
1803                  * to check if associated event has constraints
1804                  */
1805                 c = NULL;
1806         } else {
1807                 idx = intel_alt_er(idx, reg->config);
1808                 if (idx != reg->idx) {
1809                         raw_spin_unlock_irqrestore(&era->lock, flags);
1810                         goto again;
1811                 }
1812         }
1813         raw_spin_unlock_irqrestore(&era->lock, flags);
1814
1815         return c;
1816 }
1817
1818 static void
1819 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1820                                    struct hw_perf_event_extra *reg)
1821 {
1822         struct er_account *era;
1823
1824         /*
1825          * Only put constraint if extra reg was actually allocated. Also takes
1826          * care of event which do not use an extra shared reg.
1827          *
1828          * Also, if this is a fake cpuc we shouldn't touch any event state
1829          * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1830          * either since it'll be thrown out.
1831          */
1832         if (!reg->alloc || cpuc->is_fake)
1833                 return;
1834
1835         era = &cpuc->shared_regs->regs[reg->idx];
1836
1837         /* one fewer user */
1838         atomic_dec(&era->ref);
1839
1840         /* allocate again next time */
1841         reg->alloc = 0;
1842 }
1843
1844 static struct event_constraint *
1845 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1846                               struct perf_event *event)
1847 {
1848         struct event_constraint *c = NULL, *d;
1849         struct hw_perf_event_extra *xreg, *breg;
1850
1851         xreg = &event->hw.extra_reg;
1852         if (xreg->idx != EXTRA_REG_NONE) {
1853                 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1854                 if (c == &emptyconstraint)
1855                         return c;
1856         }
1857         breg = &event->hw.branch_reg;
1858         if (breg->idx != EXTRA_REG_NONE) {
1859                 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1860                 if (d == &emptyconstraint) {
1861                         __intel_shared_reg_put_constraints(cpuc, xreg);
1862                         c = d;
1863                 }
1864         }
1865         return c;
1866 }
1867
1868 struct event_constraint *
1869 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1870                           struct perf_event *event)
1871 {
1872         struct event_constraint *c;
1873
1874         if (x86_pmu.event_constraints) {
1875                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1876                         if ((event->hw.config & c->cmask) == c->code) {
1877                                 event->hw.flags |= c->flags;
1878                                 return c;
1879                         }
1880                 }
1881         }
1882
1883         return &unconstrained;
1884 }
1885
1886 static struct event_constraint *
1887 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1888                             struct perf_event *event)
1889 {
1890         struct event_constraint *c;
1891
1892         c = intel_bts_constraints(event);
1893         if (c)
1894                 return c;
1895
1896         c = intel_shared_regs_constraints(cpuc, event);
1897         if (c)
1898                 return c;
1899
1900         c = intel_pebs_constraints(event);
1901         if (c)
1902                 return c;
1903
1904         return x86_get_event_constraints(cpuc, idx, event);
1905 }
1906
1907 static void
1908 intel_start_scheduling(struct cpu_hw_events *cpuc)
1909 {
1910         struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
1911         struct intel_excl_states *xl;
1912         int tid = cpuc->excl_thread_id;
1913
1914         /*
1915          * nothing needed if in group validation mode
1916          */
1917         if (cpuc->is_fake || !is_ht_workaround_enabled())
1918                 return;
1919
1920         /*
1921          * no exclusion needed
1922          */
1923         if (WARN_ON_ONCE(!excl_cntrs))
1924                 return;
1925
1926         xl = &excl_cntrs->states[tid];
1927
1928         xl->sched_started = true;
1929         /*
1930          * lock shared state until we are done scheduling
1931          * in stop_event_scheduling()
1932          * makes scheduling appear as a transaction
1933          */
1934         raw_spin_lock(&excl_cntrs->lock);
1935 }
1936
1937 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
1938 {
1939         struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
1940         struct event_constraint *c = cpuc->event_constraint[idx];
1941         struct intel_excl_states *xl;
1942         int tid = cpuc->excl_thread_id;
1943
1944         if (cpuc->is_fake || !is_ht_workaround_enabled())
1945                 return;
1946
1947         if (WARN_ON_ONCE(!excl_cntrs))
1948                 return;
1949
1950         if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
1951                 return;
1952
1953         xl = &excl_cntrs->states[tid];
1954
1955         lockdep_assert_held(&excl_cntrs->lock);
1956
1957         if (c->flags & PERF_X86_EVENT_EXCL)
1958                 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
1959         else
1960                 xl->state[cntr] = INTEL_EXCL_SHARED;
1961 }
1962
1963 static void
1964 intel_stop_scheduling(struct cpu_hw_events *cpuc)
1965 {
1966         struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
1967         struct intel_excl_states *xl;
1968         int tid = cpuc->excl_thread_id;
1969
1970         /*
1971          * nothing needed if in group validation mode
1972          */
1973         if (cpuc->is_fake || !is_ht_workaround_enabled())
1974                 return;
1975         /*
1976          * no exclusion needed
1977          */
1978         if (WARN_ON_ONCE(!excl_cntrs))
1979                 return;
1980
1981         xl = &excl_cntrs->states[tid];
1982
1983         xl->sched_started = false;
1984         /*
1985          * release shared state lock (acquired in intel_start_scheduling())
1986          */
1987         raw_spin_unlock(&excl_cntrs->lock);
1988 }
1989
1990 static struct event_constraint *
1991 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
1992                            int idx, struct event_constraint *c)
1993 {
1994         struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
1995         struct intel_excl_states *xlo;
1996         int tid = cpuc->excl_thread_id;
1997         int is_excl, i;
1998
1999         /*
2000          * validating a group does not require
2001          * enforcing cross-thread  exclusion
2002          */
2003         if (cpuc->is_fake || !is_ht_workaround_enabled())
2004                 return c;
2005
2006         /*
2007          * no exclusion needed
2008          */
2009         if (WARN_ON_ONCE(!excl_cntrs))
2010                 return c;
2011
2012         /*
2013          * because we modify the constraint, we need
2014          * to make a copy. Static constraints come
2015          * from static const tables.
2016          *
2017          * only needed when constraint has not yet
2018          * been cloned (marked dynamic)
2019          */
2020         if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2021                 struct event_constraint *cx;
2022
2023                 /*
2024                  * grab pre-allocated constraint entry
2025                  */
2026                 cx = &cpuc->constraint_list[idx];
2027
2028                 /*
2029                  * initialize dynamic constraint
2030                  * with static constraint
2031                  */
2032                 *cx = *c;
2033
2034                 /*
2035                  * mark constraint as dynamic, so we
2036                  * can free it later on
2037                  */
2038                 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2039                 c = cx;
2040         }
2041
2042         /*
2043          * From here on, the constraint is dynamic.
2044          * Either it was just allocated above, or it
2045          * was allocated during a earlier invocation
2046          * of this function
2047          */
2048
2049         /*
2050          * state of sibling HT
2051          */
2052         xlo = &excl_cntrs->states[tid ^ 1];
2053
2054         /*
2055          * event requires exclusive counter access
2056          * across HT threads
2057          */
2058         is_excl = c->flags & PERF_X86_EVENT_EXCL;
2059         if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2060                 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2061                 if (!cpuc->n_excl++)
2062                         WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2063         }
2064
2065         /*
2066          * Modify static constraint with current dynamic
2067          * state of thread
2068          *
2069          * EXCLUSIVE: sibling counter measuring exclusive event
2070          * SHARED   : sibling counter measuring non-exclusive event
2071          * UNUSED   : sibling counter unused
2072          */
2073         for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2074                 /*
2075                  * exclusive event in sibling counter
2076                  * our corresponding counter cannot be used
2077                  * regardless of our event
2078                  */
2079                 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
2080                         __clear_bit(i, c->idxmsk);
2081                 /*
2082                  * if measuring an exclusive event, sibling
2083                  * measuring non-exclusive, then counter cannot
2084                  * be used
2085                  */
2086                 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
2087                         __clear_bit(i, c->idxmsk);
2088         }
2089
2090         /*
2091          * recompute actual bit weight for scheduling algorithm
2092          */
2093         c->weight = hweight64(c->idxmsk64);
2094
2095         /*
2096          * if we return an empty mask, then switch
2097          * back to static empty constraint to avoid
2098          * the cost of freeing later on
2099          */
2100         if (c->weight == 0)
2101                 c = &emptyconstraint;
2102
2103         return c;
2104 }
2105
2106 static struct event_constraint *
2107 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2108                             struct perf_event *event)
2109 {
2110         struct event_constraint *c1 = cpuc->event_constraint[idx];
2111         struct event_constraint *c2;
2112
2113         /*
2114          * first time only
2115          * - static constraint: no change across incremental scheduling calls
2116          * - dynamic constraint: handled by intel_get_excl_constraints()
2117          */
2118         c2 = __intel_get_event_constraints(cpuc, idx, event);
2119         if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
2120                 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
2121                 c1->weight = c2->weight;
2122                 c2 = c1;
2123         }
2124
2125         if (cpuc->excl_cntrs)
2126                 return intel_get_excl_constraints(cpuc, event, idx, c2);
2127
2128         return c2;
2129 }
2130
2131 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2132                 struct perf_event *event)
2133 {
2134         struct hw_perf_event *hwc = &event->hw;
2135         struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2136         int tid = cpuc->excl_thread_id;
2137         struct intel_excl_states *xl;
2138
2139         /*
2140          * nothing needed if in group validation mode
2141          */
2142         if (cpuc->is_fake)
2143                 return;
2144
2145         if (WARN_ON_ONCE(!excl_cntrs))
2146                 return;
2147
2148         if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2149                 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2150                 if (!--cpuc->n_excl)
2151                         WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2152         }
2153
2154         /*
2155          * If event was actually assigned, then mark the counter state as
2156          * unused now.
2157          */
2158         if (hwc->idx >= 0) {
2159                 xl = &excl_cntrs->states[tid];
2160
2161                 /*
2162                  * put_constraint may be called from x86_schedule_events()
2163                  * which already has the lock held so here make locking
2164                  * conditional.
2165                  */
2166                 if (!xl->sched_started)
2167                         raw_spin_lock(&excl_cntrs->lock);
2168
2169                 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
2170
2171                 if (!xl->sched_started)
2172                         raw_spin_unlock(&excl_cntrs->lock);
2173         }
2174 }
2175
2176 static void
2177 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
2178                                         struct perf_event *event)
2179 {
2180         struct hw_perf_event_extra *reg;
2181
2182         reg = &event->hw.extra_reg;
2183         if (reg->idx != EXTRA_REG_NONE)
2184                 __intel_shared_reg_put_constraints(cpuc, reg);
2185
2186         reg = &event->hw.branch_reg;
2187         if (reg->idx != EXTRA_REG_NONE)
2188                 __intel_shared_reg_put_constraints(cpuc, reg);
2189 }
2190
2191 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
2192                                         struct perf_event *event)
2193 {
2194         intel_put_shared_regs_event_constraints(cpuc, event);
2195
2196         /*
2197          * is PMU has exclusive counter restrictions, then
2198          * all events are subject to and must call the
2199          * put_excl_constraints() routine
2200          */
2201         if (cpuc->excl_cntrs)
2202                 intel_put_excl_constraints(cpuc, event);
2203 }
2204
2205 static void intel_pebs_aliases_core2(struct perf_event *event)
2206 {
2207         if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2208                 /*
2209                  * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2210                  * (0x003c) so that we can use it with PEBS.
2211                  *
2212                  * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2213                  * PEBS capable. However we can use INST_RETIRED.ANY_P
2214                  * (0x00c0), which is a PEBS capable event, to get the same
2215                  * count.
2216                  *
2217                  * INST_RETIRED.ANY_P counts the number of cycles that retires
2218                  * CNTMASK instructions. By setting CNTMASK to a value (16)
2219                  * larger than the maximum number of instructions that can be
2220                  * retired per cycle (4) and then inverting the condition, we
2221                  * count all cycles that retire 16 or less instructions, which
2222                  * is every cycle.
2223                  *
2224                  * Thereby we gain a PEBS capable cycle counter.
2225                  */
2226                 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
2227
2228                 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2229                 event->hw.config = alt_config;
2230         }
2231 }
2232
2233 static void intel_pebs_aliases_snb(struct perf_event *event)
2234 {
2235         if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2236                 /*
2237                  * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2238                  * (0x003c) so that we can use it with PEBS.
2239                  *
2240                  * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2241                  * PEBS capable. However we can use UOPS_RETIRED.ALL
2242                  * (0x01c2), which is a PEBS capable event, to get the same
2243                  * count.
2244                  *
2245                  * UOPS_RETIRED.ALL counts the number of cycles that retires
2246                  * CNTMASK micro-ops. By setting CNTMASK to a value (16)
2247                  * larger than the maximum number of micro-ops that can be
2248                  * retired per cycle (4) and then inverting the condition, we
2249                  * count all cycles that retire 16 or less micro-ops, which
2250                  * is every cycle.
2251                  *
2252                  * Thereby we gain a PEBS capable cycle counter.
2253                  */
2254                 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
2255
2256                 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2257                 event->hw.config = alt_config;
2258         }
2259 }
2260
2261 static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
2262 {
2263         unsigned long flags = x86_pmu.free_running_flags;
2264
2265         if (event->attr.use_clockid)
2266                 flags &= ~PERF_SAMPLE_TIME;
2267         return flags;
2268 }
2269
2270 static int intel_pmu_hw_config(struct perf_event *event)
2271 {
2272         int ret = x86_pmu_hw_config(event);
2273
2274         if (ret)
2275                 return ret;
2276
2277         if (event->attr.precise_ip) {
2278                 if (!event->attr.freq) {
2279                         event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
2280                         if (!(event->attr.sample_type &
2281                               ~intel_pmu_free_running_flags(event)))
2282                                 event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
2283                 }
2284                 if (x86_pmu.pebs_aliases)
2285                         x86_pmu.pebs_aliases(event);
2286         }
2287
2288         if (needs_branch_stack(event)) {
2289                 ret = intel_pmu_setup_lbr_filter(event);
2290                 if (ret)
2291                         return ret;
2292
2293                 /*
2294                  * BTS is set up earlier in this path, so don't account twice
2295                  */
2296                 if (!intel_pmu_has_bts(event)) {
2297                         /* disallow lbr if conflicting events are present */
2298                         if (x86_add_exclusive(x86_lbr_exclusive_lbr))
2299                                 return -EBUSY;
2300
2301                         event->destroy = hw_perf_lbr_event_destroy;
2302                 }
2303         }
2304
2305         if (event->attr.type != PERF_TYPE_RAW)
2306                 return 0;
2307
2308         if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
2309                 return 0;
2310
2311         if (x86_pmu.version < 3)
2312                 return -EINVAL;
2313
2314         if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2315                 return -EACCES;
2316
2317         event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
2318
2319         return 0;
2320 }
2321
2322 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
2323 {
2324         if (x86_pmu.guest_get_msrs)
2325                 return x86_pmu.guest_get_msrs(nr);
2326         *nr = 0;
2327         return NULL;
2328 }
2329 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
2330
2331 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
2332 {
2333         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2334         struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2335
2336         arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
2337         arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
2338         arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
2339         /*
2340          * If PMU counter has PEBS enabled it is not enough to disable counter
2341          * on a guest entry since PEBS memory write can overshoot guest entry
2342          * and corrupt guest memory. Disabling PEBS solves the problem.
2343          */
2344         arr[1].msr = MSR_IA32_PEBS_ENABLE;
2345         arr[1].host = cpuc->pebs_enabled;
2346         arr[1].guest = 0;
2347
2348         *nr = 2;
2349         return arr;
2350 }
2351
2352 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
2353 {
2354         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2355         struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2356         int idx;
2357
2358         for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
2359                 struct perf_event *event = cpuc->events[idx];
2360
2361                 arr[idx].msr = x86_pmu_config_addr(idx);
2362                 arr[idx].host = arr[idx].guest = 0;
2363
2364                 if (!test_bit(idx, cpuc->active_mask))
2365                         continue;
2366
2367                 arr[idx].host = arr[idx].guest =
2368                         event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
2369
2370                 if (event->attr.exclude_host)
2371                         arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2372                 else if (event->attr.exclude_guest)
2373                         arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2374         }
2375
2376         *nr = x86_pmu.num_counters;
2377         return arr;
2378 }
2379
2380 static void core_pmu_enable_event(struct perf_event *event)
2381 {
2382         if (!event->attr.exclude_host)
2383                 x86_pmu_enable_event(event);
2384 }
2385
2386 static void core_pmu_enable_all(int added)
2387 {
2388         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2389         int idx;
2390
2391         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2392                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
2393
2394                 if (!test_bit(idx, cpuc->active_mask) ||
2395                                 cpuc->events[idx]->attr.exclude_host)
2396                         continue;
2397
2398                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2399         }
2400 }
2401
2402 static int hsw_hw_config(struct perf_event *event)
2403 {
2404         int ret = intel_pmu_hw_config(event);
2405
2406         if (ret)
2407                 return ret;
2408         if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
2409                 return 0;
2410         event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
2411
2412         /*
2413          * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
2414          * PEBS or in ANY thread mode. Since the results are non-sensical forbid
2415          * this combination.
2416          */
2417         if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
2418              ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
2419               event->attr.precise_ip > 0))
2420                 return -EOPNOTSUPP;
2421
2422         if (event_is_checkpointed(event)) {
2423                 /*
2424                  * Sampling of checkpointed events can cause situations where
2425                  * the CPU constantly aborts because of a overflow, which is
2426                  * then checkpointed back and ignored. Forbid checkpointing
2427                  * for sampling.
2428                  *
2429                  * But still allow a long sampling period, so that perf stat
2430                  * from KVM works.
2431                  */
2432                 if (event->attr.sample_period > 0 &&
2433                     event->attr.sample_period < 0x7fffffff)
2434                         return -EOPNOTSUPP;
2435         }
2436         return 0;
2437 }
2438
2439 static struct event_constraint counter2_constraint =
2440                         EVENT_CONSTRAINT(0, 0x4, 0);
2441
2442 static struct event_constraint *
2443 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2444                           struct perf_event *event)
2445 {
2446         struct event_constraint *c;
2447
2448         c = intel_get_event_constraints(cpuc, idx, event);
2449
2450         /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2451         if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
2452                 if (c->idxmsk64 & (1U << 2))
2453                         return &counter2_constraint;
2454                 return &emptyconstraint;
2455         }
2456
2457         return c;
2458 }
2459
2460 /*
2461  * Broadwell:
2462  *
2463  * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
2464  * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
2465  * the two to enforce a minimum period of 128 (the smallest value that has bits
2466  * 0-5 cleared and >= 100).
2467  *
2468  * Because of how the code in x86_perf_event_set_period() works, the truncation
2469  * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
2470  * to make up for the 'lost' events due to carrying the 'error' in period_left.
2471  *
2472  * Therefore the effective (average) period matches the requested period,
2473  * despite coarser hardware granularity.
2474  */
2475 static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2476 {
2477         if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2478                         X86_CONFIG(.event=0xc0, .umask=0x01)) {
2479                 if (left < 128)
2480                         left = 128;
2481                 left &= ~0x3fu;
2482         }
2483         return left;
2484 }
2485
2486 PMU_FORMAT_ATTR(event,  "config:0-7"    );
2487 PMU_FORMAT_ATTR(umask,  "config:8-15"   );
2488 PMU_FORMAT_ATTR(edge,   "config:18"     );
2489 PMU_FORMAT_ATTR(pc,     "config:19"     );
2490 PMU_FORMAT_ATTR(any,    "config:21"     ); /* v3 + */
2491 PMU_FORMAT_ATTR(inv,    "config:23"     );
2492 PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
2493 PMU_FORMAT_ATTR(in_tx,  "config:32");
2494 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
2495
2496 static struct attribute *intel_arch_formats_attr[] = {
2497         &format_attr_event.attr,
2498         &format_attr_umask.attr,
2499         &format_attr_edge.attr,
2500         &format_attr_pc.attr,
2501         &format_attr_inv.attr,
2502         &format_attr_cmask.attr,
2503         NULL,
2504 };
2505
2506 ssize_t intel_event_sysfs_show(char *page, u64 config)
2507 {
2508         u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
2509
2510         return x86_event_sysfs_show(page, config, event);
2511 }
2512
2513 struct intel_shared_regs *allocate_shared_regs(int cpu)
2514 {
2515         struct intel_shared_regs *regs;
2516         int i;
2517
2518         regs = kzalloc_node(sizeof(struct intel_shared_regs),
2519                             GFP_KERNEL, cpu_to_node(cpu));
2520         if (regs) {
2521                 /*
2522                  * initialize the locks to keep lockdep happy
2523                  */
2524                 for (i = 0; i < EXTRA_REG_MAX; i++)
2525                         raw_spin_lock_init(&regs->regs[i].lock);
2526
2527                 regs->core_id = -1;
2528         }
2529         return regs;
2530 }
2531
2532 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
2533 {
2534         struct intel_excl_cntrs *c;
2535
2536         c = kzalloc_node(sizeof(struct intel_excl_cntrs),
2537                          GFP_KERNEL, cpu_to_node(cpu));
2538         if (c) {
2539                 raw_spin_lock_init(&c->lock);
2540                 c->core_id = -1;
2541         }
2542         return c;
2543 }
2544
2545 static int intel_pmu_cpu_prepare(int cpu)
2546 {
2547         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2548
2549         if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
2550                 cpuc->shared_regs = allocate_shared_regs(cpu);
2551                 if (!cpuc->shared_regs)
2552                         return NOTIFY_BAD;
2553         }
2554
2555         if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2556                 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
2557
2558                 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
2559                 if (!cpuc->constraint_list)
2560                         return NOTIFY_BAD;
2561
2562                 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
2563                 if (!cpuc->excl_cntrs) {
2564                         kfree(cpuc->constraint_list);
2565                         kfree(cpuc->shared_regs);
2566                         return NOTIFY_BAD;
2567                 }
2568                 cpuc->excl_thread_id = 0;
2569         }
2570
2571         return NOTIFY_OK;
2572 }
2573
2574 static void intel_pmu_cpu_starting(int cpu)
2575 {
2576         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2577         int core_id = topology_core_id(cpu);
2578         int i;
2579
2580         init_debug_store_on_cpu(cpu);
2581         /*
2582          * Deal with CPUs that don't clear their LBRs on power-up.
2583          */
2584         intel_pmu_lbr_reset();
2585
2586         cpuc->lbr_sel = NULL;
2587
2588         if (!cpuc->shared_regs)
2589                 return;
2590
2591         if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
2592                 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
2593
2594                 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
2595                         struct intel_shared_regs *pc;
2596
2597                         pc = per_cpu(cpu_hw_events, i).shared_regs;
2598                         if (pc && pc->core_id == core_id) {
2599                                 *onln = cpuc->shared_regs;
2600                                 cpuc->shared_regs = pc;
2601                                 break;
2602                         }
2603                 }
2604                 cpuc->shared_regs->core_id = core_id;
2605                 cpuc->shared_regs->refcnt++;
2606         }
2607
2608         if (x86_pmu.lbr_sel_map)
2609                 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2610
2611         if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2612                 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
2613                         struct intel_excl_cntrs *c;
2614
2615                         c = per_cpu(cpu_hw_events, i).excl_cntrs;
2616                         if (c && c->core_id == core_id) {
2617                                 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
2618                                 cpuc->excl_cntrs = c;
2619                                 cpuc->excl_thread_id = 1;
2620                                 break;
2621                         }
2622                 }
2623                 cpuc->excl_cntrs->core_id = core_id;
2624                 cpuc->excl_cntrs->refcnt++;
2625         }
2626 }
2627
2628 static void free_excl_cntrs(int cpu)
2629 {
2630         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2631         struct intel_excl_cntrs *c;
2632
2633         c = cpuc->excl_cntrs;
2634         if (c) {
2635                 if (c->core_id == -1 || --c->refcnt == 0)
2636                         kfree(c);
2637                 cpuc->excl_cntrs = NULL;
2638                 kfree(cpuc->constraint_list);
2639                 cpuc->constraint_list = NULL;
2640         }
2641 }
2642
2643 static void intel_pmu_cpu_dying(int cpu)
2644 {
2645         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2646         struct intel_shared_regs *pc;
2647
2648         pc = cpuc->shared_regs;
2649         if (pc) {
2650                 if (pc->core_id == -1 || --pc->refcnt == 0)
2651                         kfree(pc);
2652                 cpuc->shared_regs = NULL;
2653         }
2654
2655         free_excl_cntrs(cpu);
2656
2657         fini_debug_store_on_cpu(cpu);
2658 }
2659
2660 static void intel_pmu_sched_task(struct perf_event_context *ctx,
2661                                  bool sched_in)
2662 {
2663         if (x86_pmu.pebs_active)
2664                 intel_pmu_pebs_sched_task(ctx, sched_in);
2665         if (x86_pmu.lbr_nr)
2666                 intel_pmu_lbr_sched_task(ctx, sched_in);
2667 }
2668
2669 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2670
2671 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
2672
2673 static struct attribute *intel_arch3_formats_attr[] = {
2674         &format_attr_event.attr,
2675         &format_attr_umask.attr,
2676         &format_attr_edge.attr,
2677         &format_attr_pc.attr,
2678         &format_attr_any.attr,
2679         &format_attr_inv.attr,
2680         &format_attr_cmask.attr,
2681         &format_attr_in_tx.attr,
2682         &format_attr_in_tx_cp.attr,
2683
2684         &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
2685         &format_attr_ldlat.attr, /* PEBS load latency */
2686         NULL,
2687 };
2688
2689 static __initconst const struct x86_pmu core_pmu = {
2690         .name                   = "core",
2691         .handle_irq             = x86_pmu_handle_irq,
2692         .disable_all            = x86_pmu_disable_all,
2693         .enable_all             = core_pmu_enable_all,
2694         .enable                 = core_pmu_enable_event,
2695         .disable                = x86_pmu_disable_event,
2696         .hw_config              = x86_pmu_hw_config,
2697         .schedule_events        = x86_schedule_events,
2698         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2699         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2700         .event_map              = intel_pmu_event_map,
2701         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2702         .apic                   = 1,
2703         .free_running_flags     = PEBS_FREERUNNING_FLAGS,
2704
2705         /*
2706          * Intel PMCs cannot be accessed sanely above 32-bit width,
2707          * so we install an artificial 1<<31 period regardless of
2708          * the generic event period:
2709          */
2710         .max_period             = (1ULL<<31) - 1,
2711         .get_event_constraints  = intel_get_event_constraints,
2712         .put_event_constraints  = intel_put_event_constraints,
2713         .event_constraints      = intel_core_event_constraints,
2714         .guest_get_msrs         = core_guest_get_msrs,
2715         .format_attrs           = intel_arch_formats_attr,
2716         .events_sysfs_show      = intel_event_sysfs_show,
2717
2718         /*
2719          * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
2720          * together with PMU version 1 and thus be using core_pmu with
2721          * shared_regs. We need following callbacks here to allocate
2722          * it properly.
2723          */
2724         .cpu_prepare            = intel_pmu_cpu_prepare,
2725         .cpu_starting           = intel_pmu_cpu_starting,
2726         .cpu_dying              = intel_pmu_cpu_dying,
2727 };
2728
2729 static __initconst const struct x86_pmu intel_pmu = {
2730         .name                   = "Intel",
2731         .handle_irq             = intel_pmu_handle_irq,
2732         .disable_all            = intel_pmu_disable_all,
2733         .enable_all             = intel_pmu_enable_all,
2734         .enable                 = intel_pmu_enable_event,
2735         .disable                = intel_pmu_disable_event,
2736         .hw_config              = intel_pmu_hw_config,
2737         .schedule_events        = x86_schedule_events,
2738         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2739         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2740         .event_map              = intel_pmu_event_map,
2741         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2742         .apic                   = 1,
2743         .free_running_flags     = PEBS_FREERUNNING_FLAGS,
2744         /*
2745          * Intel PMCs cannot be accessed sanely above 32 bit width,
2746          * so we install an artificial 1<<31 period regardless of
2747          * the generic event period:
2748          */
2749         .max_period             = (1ULL << 31) - 1,
2750         .get_event_constraints  = intel_get_event_constraints,
2751         .put_event_constraints  = intel_put_event_constraints,
2752         .pebs_aliases           = intel_pebs_aliases_core2,
2753
2754         .format_attrs           = intel_arch3_formats_attr,
2755         .events_sysfs_show      = intel_event_sysfs_show,
2756
2757         .cpu_prepare            = intel_pmu_cpu_prepare,
2758         .cpu_starting           = intel_pmu_cpu_starting,
2759         .cpu_dying              = intel_pmu_cpu_dying,
2760         .guest_get_msrs         = intel_guest_get_msrs,
2761         .sched_task             = intel_pmu_sched_task,
2762 };
2763
2764 static __init void intel_clovertown_quirk(void)
2765 {
2766         /*
2767          * PEBS is unreliable due to:
2768          *
2769          *   AJ67  - PEBS may experience CPL leaks
2770          *   AJ68  - PEBS PMI may be delayed by one event
2771          *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2772          *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2773          *
2774          * AJ67 could be worked around by restricting the OS/USR flags.
2775          * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2776          *
2777          * AJ106 could possibly be worked around by not allowing LBR
2778          *       usage from PEBS, including the fixup.
2779          * AJ68  could possibly be worked around by always programming
2780          *       a pebs_event_reset[0] value and coping with the lost events.
2781          *
2782          * But taken together it might just make sense to not enable PEBS on
2783          * these chips.
2784          */
2785         pr_warn("PEBS disabled due to CPU errata\n");
2786         x86_pmu.pebs = 0;
2787         x86_pmu.pebs_constraints = NULL;
2788 }
2789
2790 static int intel_snb_pebs_broken(int cpu)
2791 {
2792         u32 rev = UINT_MAX; /* default to broken for unknown models */
2793
2794         switch (cpu_data(cpu).x86_model) {
2795         case 42: /* SNB */
2796                 rev = 0x28;
2797                 break;
2798
2799         case 45: /* SNB-EP */
2800                 switch (cpu_data(cpu).x86_mask) {
2801                 case 6: rev = 0x618; break;
2802                 case 7: rev = 0x70c; break;
2803                 }
2804         }
2805
2806         return (cpu_data(cpu).microcode < rev);
2807 }
2808
2809 static void intel_snb_check_microcode(void)
2810 {
2811         int pebs_broken = 0;
2812         int cpu;
2813
2814         get_online_cpus();
2815         for_each_online_cpu(cpu) {
2816                 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
2817                         break;
2818         }
2819         put_online_cpus();
2820
2821         if (pebs_broken == x86_pmu.pebs_broken)
2822                 return;
2823
2824         /*
2825          * Serialized by the microcode lock..
2826          */
2827         if (x86_pmu.pebs_broken) {
2828                 pr_info("PEBS enabled due to microcode update\n");
2829                 x86_pmu.pebs_broken = 0;
2830         } else {
2831                 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2832                 x86_pmu.pebs_broken = 1;
2833         }
2834 }
2835
2836 /*
2837  * Under certain circumstances, access certain MSR may cause #GP.
2838  * The function tests if the input MSR can be safely accessed.
2839  */
2840 static bool check_msr(unsigned long msr, u64 mask)
2841 {
2842         u64 val_old, val_new, val_tmp;
2843
2844         /*
2845          * Read the current value, change it and read it back to see if it
2846          * matches, this is needed to detect certain hardware emulators
2847          * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2848          */
2849         if (rdmsrl_safe(msr, &val_old))
2850                 return false;
2851
2852         /*
2853          * Only change the bits which can be updated by wrmsrl.
2854          */
2855         val_tmp = val_old ^ mask;
2856         if (wrmsrl_safe(msr, val_tmp) ||
2857             rdmsrl_safe(msr, &val_new))
2858                 return false;
2859
2860         if (val_new != val_tmp)
2861                 return false;
2862
2863         /* Here it's sure that the MSR can be safely accessed.
2864          * Restore the old value and return.
2865          */
2866         wrmsrl(msr, val_old);
2867
2868         return true;
2869 }
2870
2871 static __init void intel_sandybridge_quirk(void)
2872 {
2873         x86_pmu.check_microcode = intel_snb_check_microcode;
2874         intel_snb_check_microcode();
2875 }
2876
2877 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
2878         { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
2879         { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
2880         { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
2881         { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
2882         { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
2883         { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
2884         { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
2885 };
2886
2887 static __init void intel_arch_events_quirk(void)
2888 {
2889         int bit;
2890
2891         /* disable event that reported as not presend by cpuid */
2892         for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
2893                 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
2894                 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2895                         intel_arch_events_map[bit].name);
2896         }
2897 }
2898
2899 static __init void intel_nehalem_quirk(void)
2900 {
2901         union cpuid10_ebx ebx;
2902
2903         ebx.full = x86_pmu.events_maskl;
2904         if (ebx.split.no_branch_misses_retired) {
2905                 /*
2906                  * Erratum AAJ80 detected, we work it around by using
2907                  * the BR_MISP_EXEC.ANY event. This will over-count
2908                  * branch-misses, but it's still much better than the
2909                  * architectural event which is often completely bogus:
2910                  */
2911                 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
2912                 ebx.split.no_branch_misses_retired = 0;
2913                 x86_pmu.events_maskl = ebx.full;
2914                 pr_info("CPU erratum AAJ80 worked around\n");
2915         }
2916 }
2917
2918 /*
2919  * enable software workaround for errata:
2920  * SNB: BJ122
2921  * IVB: BV98
2922  * HSW: HSD29
2923  *
2924  * Only needed when HT is enabled. However detecting
2925  * if HT is enabled is difficult (model specific). So instead,
2926  * we enable the workaround in the early boot, and verify if
2927  * it is needed in a later initcall phase once we have valid
2928  * topology information to check if HT is actually enabled
2929  */
2930 static __init void intel_ht_bug(void)
2931 {
2932         x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
2933
2934         x86_pmu.start_scheduling = intel_start_scheduling;
2935         x86_pmu.commit_scheduling = intel_commit_scheduling;
2936         x86_pmu.stop_scheduling = intel_stop_scheduling;
2937 }
2938
2939 EVENT_ATTR_STR(mem-loads,       mem_ld_hsw,     "event=0xcd,umask=0x1,ldlat=3");
2940 EVENT_ATTR_STR(mem-stores,      mem_st_hsw,     "event=0xd0,umask=0x82")
2941
2942 /* Haswell special events */
2943 EVENT_ATTR_STR(tx-start,        tx_start,       "event=0xc9,umask=0x1");
2944 EVENT_ATTR_STR(tx-commit,       tx_commit,      "event=0xc9,umask=0x2");
2945 EVENT_ATTR_STR(tx-abort,        tx_abort,       "event=0xc9,umask=0x4");
2946 EVENT_ATTR_STR(tx-capacity,     tx_capacity,    "event=0x54,umask=0x2");
2947 EVENT_ATTR_STR(tx-conflict,     tx_conflict,    "event=0x54,umask=0x1");
2948 EVENT_ATTR_STR(el-start,        el_start,       "event=0xc8,umask=0x1");
2949 EVENT_ATTR_STR(el-commit,       el_commit,      "event=0xc8,umask=0x2");
2950 EVENT_ATTR_STR(el-abort,        el_abort,       "event=0xc8,umask=0x4");
2951 EVENT_ATTR_STR(el-capacity,     el_capacity,    "event=0x54,umask=0x2");
2952 EVENT_ATTR_STR(el-conflict,     el_conflict,    "event=0x54,umask=0x1");
2953 EVENT_ATTR_STR(cycles-t,        cycles_t,       "event=0x3c,in_tx=1");
2954 EVENT_ATTR_STR(cycles-ct,       cycles_ct,      "event=0x3c,in_tx=1,in_tx_cp=1");
2955
2956 static struct attribute *hsw_events_attrs[] = {
2957         EVENT_PTR(tx_start),
2958         EVENT_PTR(tx_commit),
2959         EVENT_PTR(tx_abort),
2960         EVENT_PTR(tx_capacity),
2961         EVENT_PTR(tx_conflict),
2962         EVENT_PTR(el_start),
2963         EVENT_PTR(el_commit),
2964         EVENT_PTR(el_abort),
2965         EVENT_PTR(el_capacity),
2966         EVENT_PTR(el_conflict),
2967         EVENT_PTR(cycles_t),
2968         EVENT_PTR(cycles_ct),
2969         EVENT_PTR(mem_ld_hsw),
2970         EVENT_PTR(mem_st_hsw),
2971         NULL
2972 };
2973
2974 __init int intel_pmu_init(void)
2975 {
2976         union cpuid10_edx edx;
2977         union cpuid10_eax eax;
2978         union cpuid10_ebx ebx;
2979         struct event_constraint *c;
2980         unsigned int unused;
2981         struct extra_reg *er;
2982         int version, i;
2983
2984         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2985                 switch (boot_cpu_data.x86) {
2986                 case 0x6:
2987                         return p6_pmu_init();
2988                 case 0xb:
2989                         return knc_pmu_init();
2990                 case 0xf:
2991                         return p4_pmu_init();
2992                 }
2993                 return -ENODEV;
2994         }
2995
2996         /*
2997          * Check whether the Architectural PerfMon supports
2998          * Branch Misses Retired hw_event or not.
2999          */
3000         cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
3001         if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
3002                 return -ENODEV;
3003
3004         version = eax.split.version_id;
3005         if (version < 2)
3006                 x86_pmu = core_pmu;
3007         else
3008                 x86_pmu = intel_pmu;
3009
3010         x86_pmu.version                 = version;
3011         x86_pmu.num_counters            = eax.split.num_counters;
3012         x86_pmu.cntval_bits             = eax.split.bit_width;
3013         x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) - 1;
3014
3015         x86_pmu.events_maskl            = ebx.full;
3016         x86_pmu.events_mask_len         = eax.split.mask_length;
3017
3018         x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
3019
3020         /*
3021          * Quirk: v2 perfmon does not report fixed-purpose events, so
3022          * assume at least 3 events:
3023          */
3024         if (version > 1)
3025                 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
3026
3027         if (boot_cpu_has(X86_FEATURE_PDCM)) {
3028                 u64 capabilities;
3029
3030                 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
3031                 x86_pmu.intel_cap.capabilities = capabilities;
3032         }
3033
3034         intel_ds_init();
3035
3036         x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
3037
3038         /*
3039          * Install the hw-cache-events table:
3040          */
3041         switch (boot_cpu_data.x86_model) {
3042         case 14: /* 65nm Core "Yonah" */
3043                 pr_cont("Core events, ");
3044                 break;
3045
3046         case 15: /* 65nm Core2 "Merom"          */
3047                 x86_add_quirk(intel_clovertown_quirk);
3048         case 22: /* 65nm Core2 "Merom-L"        */
3049         case 23: /* 45nm Core2 "Penryn"         */
3050         case 29: /* 45nm Core2 "Dunnington (MP) */
3051                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
3052                        sizeof(hw_cache_event_ids));
3053
3054                 intel_pmu_lbr_init_core();
3055
3056                 x86_pmu.event_constraints = intel_core2_event_constraints;
3057                 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
3058                 pr_cont("Core2 events, ");
3059                 break;
3060
3061         case 30: /* 45nm Nehalem    */
3062         case 26: /* 45nm Nehalem-EP */
3063         case 46: /* 45nm Nehalem-EX */
3064                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
3065                        sizeof(hw_cache_event_ids));
3066                 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
3067                        sizeof(hw_cache_extra_regs));
3068
3069                 intel_pmu_lbr_init_nhm();
3070
3071                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
3072                 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
3073                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
3074                 x86_pmu.extra_regs = intel_nehalem_extra_regs;
3075
3076                 x86_pmu.cpu_events = nhm_events_attrs;
3077
3078                 /* UOPS_ISSUED.STALLED_CYCLES */
3079                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3080                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3081                 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
3082                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3083                         X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
3084
3085                 x86_add_quirk(intel_nehalem_quirk);
3086
3087                 pr_cont("Nehalem events, ");
3088                 break;
3089
3090         case 28: /* 45nm Atom "Pineview"   */
3091         case 38: /* 45nm Atom "Lincroft"   */
3092         case 39: /* 32nm Atom "Penwell"    */
3093         case 53: /* 32nm Atom "Cloverview" */
3094         case 54: /* 32nm Atom "Cedarview"  */
3095                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
3096                        sizeof(hw_cache_event_ids));
3097
3098                 intel_pmu_lbr_init_atom();
3099
3100                 x86_pmu.event_constraints = intel_gen_event_constraints;
3101                 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
3102                 pr_cont("Atom events, ");
3103                 break;
3104
3105         case 55: /* 22nm Atom "Silvermont"                */
3106         case 76: /* 14nm Atom "Airmont"                   */
3107         case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
3108                 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
3109                         sizeof(hw_cache_event_ids));
3110                 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
3111                        sizeof(hw_cache_extra_regs));
3112
3113                 intel_pmu_lbr_init_atom();
3114
3115                 x86_pmu.event_constraints = intel_slm_event_constraints;
3116                 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
3117                 x86_pmu.extra_regs = intel_slm_extra_regs;
3118                 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3119                 pr_cont("Silvermont events, ");
3120                 break;
3121
3122         case 37: /* 32nm Westmere    */
3123         case 44: /* 32nm Westmere-EP */
3124         case 47: /* 32nm Westmere-EX */
3125                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
3126                        sizeof(hw_cache_event_ids));
3127                 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
3128                        sizeof(hw_cache_extra_regs));
3129
3130                 intel_pmu_lbr_init_nhm();
3131
3132                 x86_pmu.event_constraints = intel_westmere_event_constraints;
3133                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
3134                 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
3135                 x86_pmu.extra_regs = intel_westmere_extra_regs;
3136                 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3137
3138                 x86_pmu.cpu_events = nhm_events_attrs;
3139
3140                 /* UOPS_ISSUED.STALLED_CYCLES */
3141                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3142                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3143                 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
3144                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3145                         X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
3146
3147                 pr_cont("Westmere events, ");
3148                 break;
3149
3150         case 42: /* 32nm SandyBridge         */
3151         case 45: /* 32nm SandyBridge-E/EN/EP */
3152                 x86_add_quirk(intel_sandybridge_quirk);
3153                 x86_add_quirk(intel_ht_bug);
3154                 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
3155                        sizeof(hw_cache_event_ids));
3156                 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
3157                        sizeof(hw_cache_extra_regs));
3158
3159                 intel_pmu_lbr_init_snb();
3160
3161                 x86_pmu.event_constraints = intel_snb_event_constraints;
3162                 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
3163                 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3164                 if (boot_cpu_data.x86_model == 45)
3165                         x86_pmu.extra_regs = intel_snbep_extra_regs;
3166                 else
3167                         x86_pmu.extra_regs = intel_snb_extra_regs;
3168
3169
3170                 /* all extra regs are per-cpu when HT is on */
3171                 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3172                 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3173
3174                 x86_pmu.cpu_events = snb_events_attrs;
3175
3176                 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
3177                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3178                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3179                 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
3180                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3181                         X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
3182
3183                 pr_cont("SandyBridge events, ");
3184                 break;
3185
3186         case 58: /* 22nm IvyBridge       */
3187         case 62: /* 22nm IvyBridge-EP/EX */
3188                 x86_add_quirk(intel_ht_bug);
3189                 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
3190                        sizeof(hw_cache_event_ids));
3191                 /* dTLB-load-misses on IVB is different than SNB */
3192                 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
3193
3194                 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
3195                        sizeof(hw_cache_extra_regs));
3196
3197                 intel_pmu_lbr_init_snb();
3198
3199                 x86_pmu.event_constraints = intel_ivb_event_constraints;
3200                 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
3201                 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3202                 if (boot_cpu_data.x86_model == 62)
3203                         x86_pmu.extra_regs = intel_snbep_extra_regs;
3204                 else
3205                         x86_pmu.extra_regs = intel_snb_extra_regs;
3206                 /* all extra regs are per-cpu when HT is on */
3207                 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3208                 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3209
3210                 x86_pmu.cpu_events = snb_events_attrs;
3211
3212                 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
3213                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3214                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3215
3216                 pr_cont("IvyBridge events, ");
3217                 break;
3218
3219
3220         case 60: /* 22nm Haswell Core */
3221         case 63: /* 22nm Haswell Server */
3222         case 69: /* 22nm Haswell ULT */
3223         case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
3224                 x86_add_quirk(intel_ht_bug);
3225                 x86_pmu.late_ack = true;
3226                 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3227                 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
3228
3229                 intel_pmu_lbr_init_hsw();
3230
3231                 x86_pmu.event_constraints = intel_hsw_event_constraints;
3232                 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
3233                 x86_pmu.extra_regs = intel_snbep_extra_regs;
3234                 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3235                 /* all extra regs are per-cpu when HT is on */
3236                 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3237                 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3238
3239                 x86_pmu.hw_config = hsw_hw_config;
3240                 x86_pmu.get_event_constraints = hsw_get_event_constraints;
3241                 x86_pmu.cpu_events = hsw_events_attrs;
3242                 x86_pmu.lbr_double_abort = true;
3243                 pr_cont("Haswell events, ");
3244                 break;
3245
3246         case 61: /* 14nm Broadwell Core-M */
3247         case 86: /* 14nm Broadwell Xeon D */
3248         case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
3249         case 79: /* 14nm Broadwell Server */
3250                 x86_pmu.late_ack = true;
3251                 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3252                 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
3253
3254                 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
3255                 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
3256                                                                          BDW_L3_MISS|HSW_SNOOP_DRAM;
3257                 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
3258                                                                           HSW_SNOOP_DRAM;
3259                 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
3260                                                                              BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
3261                 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
3262                                                                               BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
3263
3264                 intel_pmu_lbr_init_hsw();
3265
3266                 x86_pmu.event_constraints = intel_bdw_event_constraints;
3267                 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
3268                 x86_pmu.extra_regs = intel_snbep_extra_regs;
3269                 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3270                 /* all extra regs are per-cpu when HT is on */
3271                 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3272                 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3273
3274                 x86_pmu.hw_config = hsw_hw_config;
3275                 x86_pmu.get_event_constraints = hsw_get_event_constraints;
3276                 x86_pmu.cpu_events = hsw_events_attrs;
3277                 x86_pmu.limit_period = bdw_limit_period;
3278                 pr_cont("Broadwell events, ");
3279                 break;
3280
3281         default:
3282                 switch (x86_pmu.version) {
3283                 case 1:
3284                         x86_pmu.event_constraints = intel_v1_event_constraints;
3285                         pr_cont("generic architected perfmon v1, ");
3286                         break;
3287                 default:
3288                         /*
3289                          * default constraints for v2 and up
3290                          */
3291                         x86_pmu.event_constraints = intel_gen_event_constraints;
3292                         pr_cont("generic architected perfmon, ");
3293                         break;
3294                 }
3295         }
3296
3297         if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
3298                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
3299                      x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
3300                 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
3301         }
3302         x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
3303
3304         if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
3305                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
3306                      x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
3307                 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
3308         }
3309
3310         x86_pmu.intel_ctrl |=
3311                 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
3312
3313         if (x86_pmu.event_constraints) {
3314                 /*
3315                  * event on fixed counter2 (REF_CYCLES) only works on this
3316                  * counter, so do not extend mask to generic counters
3317                  */
3318                 for_each_event_constraint(c, x86_pmu.event_constraints) {
3319                         if (c->cmask == FIXED_EVENT_FLAGS
3320                             && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
3321                                 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
3322                         }
3323                         c->idxmsk64 &=
3324                                 ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
3325                         c->weight = hweight64(c->idxmsk64);
3326                 }
3327         }
3328
3329         /*
3330          * Access LBR MSR may cause #GP under certain circumstances.
3331          * E.g. KVM doesn't support LBR MSR
3332          * Check all LBT MSR here.
3333          * Disable LBR access if any LBR MSRs can not be accessed.
3334          */
3335         if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
3336                 x86_pmu.lbr_nr = 0;
3337         for (i = 0; i < x86_pmu.lbr_nr; i++) {
3338                 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
3339                       check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
3340                         x86_pmu.lbr_nr = 0;
3341         }
3342
3343         /*
3344          * Access extra MSR may cause #GP under certain circumstances.
3345          * E.g. KVM doesn't support offcore event
3346          * Check all extra_regs here.
3347          */
3348         if (x86_pmu.extra_regs) {
3349                 for (er = x86_pmu.extra_regs; er->msr; er++) {
3350                         er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
3351                         /* Disable LBR select mapping */
3352                         if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
3353                                 x86_pmu.lbr_sel_map = NULL;
3354                 }
3355         }
3356
3357         /* Support full width counters using alternative MSR range */
3358         if (x86_pmu.intel_cap.full_width_write) {
3359                 x86_pmu.max_period = x86_pmu.cntval_mask;
3360                 x86_pmu.perfctr = MSR_IA32_PMC0;
3361                 pr_cont("full-width counters, ");
3362         }
3363
3364         return 0;
3365 }
3366
3367 /*
3368  * HT bug: phase 2 init
3369  * Called once we have valid topology information to check
3370  * whether or not HT is enabled
3371  * If HT is off, then we disable the workaround
3372  */
3373 static __init int fixup_ht_bug(void)
3374 {
3375         int cpu = smp_processor_id();
3376         int w, c;
3377         /*
3378          * problem not present on this CPU model, nothing to do
3379          */
3380         if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
3381                 return 0;
3382
3383         w = cpumask_weight(topology_sibling_cpumask(cpu));
3384         if (w > 1) {
3385                 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
3386                 return 0;
3387         }
3388
3389         watchdog_nmi_disable_all();
3390
3391         x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
3392
3393         x86_pmu.start_scheduling = NULL;
3394         x86_pmu.commit_scheduling = NULL;
3395         x86_pmu.stop_scheduling = NULL;
3396
3397         watchdog_nmi_enable_all();
3398
3399         get_online_cpus();
3400
3401         for_each_online_cpu(c) {
3402                 free_excl_cntrs(c);
3403         }
3404
3405         put_online_cpus();
3406         pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
3407         return 0;
3408 }
3409 subsys_initcall(fixup_ht_bug)