Merge tag 'tty-3.3-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event_amd.c
1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <asm/apicdef.h>
7
8 #include "perf_event.h"
9
10 static __initconst const u64 amd_hw_cache_event_ids
11                                 [PERF_COUNT_HW_CACHE_MAX]
12                                 [PERF_COUNT_HW_CACHE_OP_MAX]
13                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
14 {
15  [ C(L1D) ] = {
16         [ C(OP_READ) ] = {
17                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
18                 [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
19         },
20         [ C(OP_WRITE) ] = {
21                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
22                 [ C(RESULT_MISS)   ] = 0,
23         },
24         [ C(OP_PREFETCH) ] = {
25                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
26                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
27         },
28  },
29  [ C(L1I ) ] = {
30         [ C(OP_READ) ] = {
31                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
32                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
33         },
34         [ C(OP_WRITE) ] = {
35                 [ C(RESULT_ACCESS) ] = -1,
36                 [ C(RESULT_MISS)   ] = -1,
37         },
38         [ C(OP_PREFETCH) ] = {
39                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
40                 [ C(RESULT_MISS)   ] = 0,
41         },
42  },
43  [ C(LL  ) ] = {
44         [ C(OP_READ) ] = {
45                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
47         },
48         [ C(OP_WRITE) ] = {
49                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
50                 [ C(RESULT_MISS)   ] = 0,
51         },
52         [ C(OP_PREFETCH) ] = {
53                 [ C(RESULT_ACCESS) ] = 0,
54                 [ C(RESULT_MISS)   ] = 0,
55         },
56  },
57  [ C(DTLB) ] = {
58         [ C(OP_READ) ] = {
59                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
60                 [ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
61         },
62         [ C(OP_WRITE) ] = {
63                 [ C(RESULT_ACCESS) ] = 0,
64                 [ C(RESULT_MISS)   ] = 0,
65         },
66         [ C(OP_PREFETCH) ] = {
67                 [ C(RESULT_ACCESS) ] = 0,
68                 [ C(RESULT_MISS)   ] = 0,
69         },
70  },
71  [ C(ITLB) ] = {
72         [ C(OP_READ) ] = {
73                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
74                 [ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
75         },
76         [ C(OP_WRITE) ] = {
77                 [ C(RESULT_ACCESS) ] = -1,
78                 [ C(RESULT_MISS)   ] = -1,
79         },
80         [ C(OP_PREFETCH) ] = {
81                 [ C(RESULT_ACCESS) ] = -1,
82                 [ C(RESULT_MISS)   ] = -1,
83         },
84  },
85  [ C(BPU ) ] = {
86         [ C(OP_READ) ] = {
87                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
88                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
89         },
90         [ C(OP_WRITE) ] = {
91                 [ C(RESULT_ACCESS) ] = -1,
92                 [ C(RESULT_MISS)   ] = -1,
93         },
94         [ C(OP_PREFETCH) ] = {
95                 [ C(RESULT_ACCESS) ] = -1,
96                 [ C(RESULT_MISS)   ] = -1,
97         },
98  },
99  [ C(NODE) ] = {
100         [ C(OP_READ) ] = {
101                 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102                 [ C(RESULT_MISS)   ] = 0x98e9, /* CPU Request to Memory, r   */
103         },
104         [ C(OP_WRITE) ] = {
105                 [ C(RESULT_ACCESS) ] = -1,
106                 [ C(RESULT_MISS)   ] = -1,
107         },
108         [ C(OP_PREFETCH) ] = {
109                 [ C(RESULT_ACCESS) ] = -1,
110                 [ C(RESULT_MISS)   ] = -1,
111         },
112  },
113 };
114
115 /*
116  * AMD Performance Monitor K7 and later.
117  */
118 static const u64 amd_perfmon_event_map[] =
119 {
120   [PERF_COUNT_HW_CPU_CYCLES]                    = 0x0076,
121   [PERF_COUNT_HW_INSTRUCTIONS]                  = 0x00c0,
122   [PERF_COUNT_HW_CACHE_REFERENCES]              = 0x0080,
123   [PERF_COUNT_HW_CACHE_MISSES]                  = 0x0081,
124   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]           = 0x00c2,
125   [PERF_COUNT_HW_BRANCH_MISSES]                 = 0x00c3,
126   [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]       = 0x00d0, /* "Decoder empty" event */
127   [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]        = 0x00d1, /* "Dispatch stalls" event */
128 };
129
130 static u64 amd_pmu_event_map(int hw_event)
131 {
132         return amd_perfmon_event_map[hw_event];
133 }
134
135 static int amd_pmu_hw_config(struct perf_event *event)
136 {
137         int ret = x86_pmu_hw_config(event);
138
139         if (ret)
140                 return ret;
141
142         if (event->attr.exclude_host && event->attr.exclude_guest)
143                 /*
144                  * When HO == GO == 1 the hardware treats that as GO == HO == 0
145                  * and will count in both modes. We don't want to count in that
146                  * case so we emulate no-counting by setting US = OS = 0.
147                  */
148                 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
149                                       ARCH_PERFMON_EVENTSEL_OS);
150         else if (event->attr.exclude_host)
151                 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
152         else if (event->attr.exclude_guest)
153                 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
154
155         if (event->attr.type != PERF_TYPE_RAW)
156                 return 0;
157
158         event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
159
160         return 0;
161 }
162
163 /*
164  * AMD64 events are detected based on their event codes.
165  */
166 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
167 {
168         return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
169 }
170
171 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
172 {
173         return (hwc->config & 0xe0) == 0xe0;
174 }
175
176 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
177 {
178         struct amd_nb *nb = cpuc->amd_nb;
179
180         return nb && nb->nb_id != -1;
181 }
182
183 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
184                                       struct perf_event *event)
185 {
186         struct hw_perf_event *hwc = &event->hw;
187         struct amd_nb *nb = cpuc->amd_nb;
188         int i;
189
190         /*
191          * only care about NB events
192          */
193         if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
194                 return;
195
196         /*
197          * need to scan whole list because event may not have
198          * been assigned during scheduling
199          *
200          * no race condition possible because event can only
201          * be removed on one CPU at a time AND PMU is disabled
202          * when we come here
203          */
204         for (i = 0; i < x86_pmu.num_counters; i++) {
205                 if (nb->owners[i] == event) {
206                         cmpxchg(nb->owners+i, event, NULL);
207                         break;
208                 }
209         }
210 }
211
212  /*
213   * AMD64 NorthBridge events need special treatment because
214   * counter access needs to be synchronized across all cores
215   * of a package. Refer to BKDG section 3.12
216   *
217   * NB events are events measuring L3 cache, Hypertransport
218   * traffic. They are identified by an event code >= 0xe00.
219   * They measure events on the NorthBride which is shared
220   * by all cores on a package. NB events are counted on a
221   * shared set of counters. When a NB event is programmed
222   * in a counter, the data actually comes from a shared
223   * counter. Thus, access to those counters needs to be
224   * synchronized.
225   *
226   * We implement the synchronization such that no two cores
227   * can be measuring NB events using the same counters. Thus,
228   * we maintain a per-NB allocation table. The available slot
229   * is propagated using the event_constraint structure.
230   *
231   * We provide only one choice for each NB event based on
232   * the fact that only NB events have restrictions. Consequently,
233   * if a counter is available, there is a guarantee the NB event
234   * will be assigned to it. If no slot is available, an empty
235   * constraint is returned and scheduling will eventually fail
236   * for this event.
237   *
238   * Note that all cores attached the same NB compete for the same
239   * counters to host NB events, this is why we use atomic ops. Some
240   * multi-chip CPUs may have more than one NB.
241   *
242   * Given that resources are allocated (cmpxchg), they must be
243   * eventually freed for others to use. This is accomplished by
244   * calling amd_put_event_constraints().
245   *
246   * Non NB events are not impacted by this restriction.
247   */
248 static struct event_constraint *
249 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
250 {
251         struct hw_perf_event *hwc = &event->hw;
252         struct amd_nb *nb = cpuc->amd_nb;
253         struct perf_event *old = NULL;
254         int max = x86_pmu.num_counters;
255         int i, j, k = -1;
256
257         /*
258          * if not NB event or no NB, then no constraints
259          */
260         if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
261                 return &unconstrained;
262
263         /*
264          * detect if already present, if so reuse
265          *
266          * cannot merge with actual allocation
267          * because of possible holes
268          *
269          * event can already be present yet not assigned (in hwc->idx)
270          * because of successive calls to x86_schedule_events() from
271          * hw_perf_group_sched_in() without hw_perf_enable()
272          */
273         for (i = 0; i < max; i++) {
274                 /*
275                  * keep track of first free slot
276                  */
277                 if (k == -1 && !nb->owners[i])
278                         k = i;
279
280                 /* already present, reuse */
281                 if (nb->owners[i] == event)
282                         goto done;
283         }
284         /*
285          * not present, so grab a new slot
286          * starting either at:
287          */
288         if (hwc->idx != -1) {
289                 /* previous assignment */
290                 i = hwc->idx;
291         } else if (k != -1) {
292                 /* start from free slot found */
293                 i = k;
294         } else {
295                 /*
296                  * event not found, no slot found in
297                  * first pass, try again from the
298                  * beginning
299                  */
300                 i = 0;
301         }
302         j = i;
303         do {
304                 old = cmpxchg(nb->owners+i, NULL, event);
305                 if (!old)
306                         break;
307                 if (++i == max)
308                         i = 0;
309         } while (i != j);
310 done:
311         if (!old)
312                 return &nb->event_constraints[i];
313
314         return &emptyconstraint;
315 }
316
317 static struct amd_nb *amd_alloc_nb(int cpu)
318 {
319         struct amd_nb *nb;
320         int i;
321
322         nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
323                           cpu_to_node(cpu));
324         if (!nb)
325                 return NULL;
326
327         nb->nb_id = -1;
328
329         /*
330          * initialize all possible NB constraints
331          */
332         for (i = 0; i < x86_pmu.num_counters; i++) {
333                 __set_bit(i, nb->event_constraints[i].idxmsk);
334                 nb->event_constraints[i].weight = 1;
335         }
336         return nb;
337 }
338
339 static int amd_pmu_cpu_prepare(int cpu)
340 {
341         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
342
343         WARN_ON_ONCE(cpuc->amd_nb);
344
345         if (boot_cpu_data.x86_max_cores < 2)
346                 return NOTIFY_OK;
347
348         cpuc->amd_nb = amd_alloc_nb(cpu);
349         if (!cpuc->amd_nb)
350                 return NOTIFY_BAD;
351
352         return NOTIFY_OK;
353 }
354
355 static void amd_pmu_cpu_starting(int cpu)
356 {
357         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
358         struct amd_nb *nb;
359         int i, nb_id;
360
361         cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
362
363         if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
364                 return;
365
366         nb_id = amd_get_nb_id(cpu);
367         WARN_ON_ONCE(nb_id == BAD_APICID);
368
369         for_each_online_cpu(i) {
370                 nb = per_cpu(cpu_hw_events, i).amd_nb;
371                 if (WARN_ON_ONCE(!nb))
372                         continue;
373
374                 if (nb->nb_id == nb_id) {
375                         cpuc->kfree_on_online = cpuc->amd_nb;
376                         cpuc->amd_nb = nb;
377                         break;
378                 }
379         }
380
381         cpuc->amd_nb->nb_id = nb_id;
382         cpuc->amd_nb->refcnt++;
383 }
384
385 static void amd_pmu_cpu_dead(int cpu)
386 {
387         struct cpu_hw_events *cpuhw;
388
389         if (boot_cpu_data.x86_max_cores < 2)
390                 return;
391
392         cpuhw = &per_cpu(cpu_hw_events, cpu);
393
394         if (cpuhw->amd_nb) {
395                 struct amd_nb *nb = cpuhw->amd_nb;
396
397                 if (nb->nb_id == -1 || --nb->refcnt == 0)
398                         kfree(nb);
399
400                 cpuhw->amd_nb = NULL;
401         }
402 }
403
404 static __initconst const struct x86_pmu amd_pmu = {
405         .name                   = "AMD",
406         .handle_irq             = x86_pmu_handle_irq,
407         .disable_all            = x86_pmu_disable_all,
408         .enable_all             = x86_pmu_enable_all,
409         .enable                 = x86_pmu_enable_event,
410         .disable                = x86_pmu_disable_event,
411         .hw_config              = amd_pmu_hw_config,
412         .schedule_events        = x86_schedule_events,
413         .eventsel               = MSR_K7_EVNTSEL0,
414         .perfctr                = MSR_K7_PERFCTR0,
415         .event_map              = amd_pmu_event_map,
416         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
417         .num_counters           = AMD64_NUM_COUNTERS,
418         .cntval_bits            = 48,
419         .cntval_mask            = (1ULL << 48) - 1,
420         .apic                   = 1,
421         /* use highest bit to detect overflow */
422         .max_period             = (1ULL << 47) - 1,
423         .get_event_constraints  = amd_get_event_constraints,
424         .put_event_constraints  = amd_put_event_constraints,
425
426         .cpu_prepare            = amd_pmu_cpu_prepare,
427         .cpu_starting           = amd_pmu_cpu_starting,
428         .cpu_dead               = amd_pmu_cpu_dead,
429 };
430
431 /* AMD Family 15h */
432
433 #define AMD_EVENT_TYPE_MASK     0x000000F0ULL
434
435 #define AMD_EVENT_FP            0x00000000ULL ... 0x00000010ULL
436 #define AMD_EVENT_LS            0x00000020ULL ... 0x00000030ULL
437 #define AMD_EVENT_DC            0x00000040ULL ... 0x00000050ULL
438 #define AMD_EVENT_CU            0x00000060ULL ... 0x00000070ULL
439 #define AMD_EVENT_IC_DE         0x00000080ULL ... 0x00000090ULL
440 #define AMD_EVENT_EX_LS         0x000000C0ULL
441 #define AMD_EVENT_DE            0x000000D0ULL
442 #define AMD_EVENT_NB            0x000000E0ULL ... 0x000000F0ULL
443
444 /*
445  * AMD family 15h event code/PMC mappings:
446  *
447  * type = event_code & 0x0F0:
448  *
449  * 0x000        FP      PERF_CTL[5:3]
450  * 0x010        FP      PERF_CTL[5:3]
451  * 0x020        LS      PERF_CTL[5:0]
452  * 0x030        LS      PERF_CTL[5:0]
453  * 0x040        DC      PERF_CTL[5:0]
454  * 0x050        DC      PERF_CTL[5:0]
455  * 0x060        CU      PERF_CTL[2:0]
456  * 0x070        CU      PERF_CTL[2:0]
457  * 0x080        IC/DE   PERF_CTL[2:0]
458  * 0x090        IC/DE   PERF_CTL[2:0]
459  * 0x0A0        ---
460  * 0x0B0        ---
461  * 0x0C0        EX/LS   PERF_CTL[5:0]
462  * 0x0D0        DE      PERF_CTL[2:0]
463  * 0x0E0        NB      NB_PERF_CTL[3:0]
464  * 0x0F0        NB      NB_PERF_CTL[3:0]
465  *
466  * Exceptions:
467  *
468  * 0x000        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
469  * 0x003        FP      PERF_CTL[3]
470  * 0x004        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
471  * 0x00B        FP      PERF_CTL[3]
472  * 0x00D        FP      PERF_CTL[3]
473  * 0x023        DE      PERF_CTL[2:0]
474  * 0x02D        LS      PERF_CTL[3]
475  * 0x02E        LS      PERF_CTL[3,0]
476  * 0x043        CU      PERF_CTL[2:0]
477  * 0x045        CU      PERF_CTL[2:0]
478  * 0x046        CU      PERF_CTL[2:0]
479  * 0x054        CU      PERF_CTL[2:0]
480  * 0x055        CU      PERF_CTL[2:0]
481  * 0x08F        IC      PERF_CTL[0]
482  * 0x187        DE      PERF_CTL[0]
483  * 0x188        DE      PERF_CTL[0]
484  * 0x0DB        EX      PERF_CTL[5:0]
485  * 0x0DC        LS      PERF_CTL[5:0]
486  * 0x0DD        LS      PERF_CTL[5:0]
487  * 0x0DE        LS      PERF_CTL[5:0]
488  * 0x0DF        LS      PERF_CTL[5:0]
489  * 0x1D6        EX      PERF_CTL[5:0]
490  * 0x1D8        EX      PERF_CTL[5:0]
491  *
492  * (*) depending on the umask all FPU counters may be used
493  */
494
495 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
496 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
497 static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
498 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
499 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
500 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
501
502 static struct event_constraint *
503 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
504 {
505         struct hw_perf_event *hwc = &event->hw;
506         unsigned int event_code = amd_get_event_code(hwc);
507
508         switch (event_code & AMD_EVENT_TYPE_MASK) {
509         case AMD_EVENT_FP:
510                 switch (event_code) {
511                 case 0x000:
512                         if (!(hwc->config & 0x0000F000ULL))
513                                 break;
514                         if (!(hwc->config & 0x00000F00ULL))
515                                 break;
516                         return &amd_f15_PMC3;
517                 case 0x004:
518                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
519                                 break;
520                         return &amd_f15_PMC3;
521                 case 0x003:
522                 case 0x00B:
523                 case 0x00D:
524                         return &amd_f15_PMC3;
525                 }
526                 return &amd_f15_PMC53;
527         case AMD_EVENT_LS:
528         case AMD_EVENT_DC:
529         case AMD_EVENT_EX_LS:
530                 switch (event_code) {
531                 case 0x023:
532                 case 0x043:
533                 case 0x045:
534                 case 0x046:
535                 case 0x054:
536                 case 0x055:
537                         return &amd_f15_PMC20;
538                 case 0x02D:
539                         return &amd_f15_PMC3;
540                 case 0x02E:
541                         return &amd_f15_PMC30;
542                 default:
543                         return &amd_f15_PMC50;
544                 }
545         case AMD_EVENT_CU:
546         case AMD_EVENT_IC_DE:
547         case AMD_EVENT_DE:
548                 switch (event_code) {
549                 case 0x08F:
550                 case 0x187:
551                 case 0x188:
552                         return &amd_f15_PMC0;
553                 case 0x0DB ... 0x0DF:
554                 case 0x1D6:
555                 case 0x1D8:
556                         return &amd_f15_PMC50;
557                 default:
558                         return &amd_f15_PMC20;
559                 }
560         case AMD_EVENT_NB:
561                 /* not yet implemented */
562                 return &emptyconstraint;
563         default:
564                 return &emptyconstraint;
565         }
566 }
567
568 static __initconst const struct x86_pmu amd_pmu_f15h = {
569         .name                   = "AMD Family 15h",
570         .handle_irq             = x86_pmu_handle_irq,
571         .disable_all            = x86_pmu_disable_all,
572         .enable_all             = x86_pmu_enable_all,
573         .enable                 = x86_pmu_enable_event,
574         .disable                = x86_pmu_disable_event,
575         .hw_config              = amd_pmu_hw_config,
576         .schedule_events        = x86_schedule_events,
577         .eventsel               = MSR_F15H_PERF_CTL,
578         .perfctr                = MSR_F15H_PERF_CTR,
579         .event_map              = amd_pmu_event_map,
580         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
581         .num_counters           = AMD64_NUM_COUNTERS_F15H,
582         .cntval_bits            = 48,
583         .cntval_mask            = (1ULL << 48) - 1,
584         .apic                   = 1,
585         /* use highest bit to detect overflow */
586         .max_period             = (1ULL << 47) - 1,
587         .get_event_constraints  = amd_get_event_constraints_f15h,
588         /* nortbridge counters not yet implemented: */
589 #if 0
590         .put_event_constraints  = amd_put_event_constraints,
591
592         .cpu_prepare            = amd_pmu_cpu_prepare,
593         .cpu_dead               = amd_pmu_cpu_dead,
594 #endif
595         .cpu_starting           = amd_pmu_cpu_starting,
596 };
597
598 __init int amd_pmu_init(void)
599 {
600         /* Performance-monitoring supported from K7 and later: */
601         if (boot_cpu_data.x86 < 6)
602                 return -ENODEV;
603
604         /*
605          * If core performance counter extensions exists, it must be
606          * family 15h, otherwise fail. See x86_pmu_addr_offset().
607          */
608         switch (boot_cpu_data.x86) {
609         case 0x15:
610                 if (!cpu_has_perfctr_core)
611                         return -ENODEV;
612                 x86_pmu = amd_pmu_f15h;
613                 break;
614         default:
615                 if (cpu_has_perfctr_core)
616                         return -ENODEV;
617                 x86_pmu = amd_pmu;
618                 break;
619         }
620
621         /* Events are common for all AMDs */
622         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
623                sizeof(hw_cache_event_ids));
624
625         return 0;
626 }
627
628 void amd_pmu_enable_virt(void)
629 {
630         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
631
632         cpuc->perf_ctr_virt_mask = 0;
633
634         /* Reload all events */
635         x86_pmu_disable_all();
636         x86_pmu_enable_all(0);
637 }
638 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
639
640 void amd_pmu_disable_virt(void)
641 {
642         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
643
644         /*
645          * We only mask out the Host-only bit so that host-only counting works
646          * when SVM is disabled. If someone sets up a guest-only counter when
647          * SVM is disabled the Guest-only bits still gets set and the counter
648          * will not count anything.
649          */
650         cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
651
652         /* Reload all events */
653         x86_pmu_disable_all();
654         x86_pmu_enable_all(0);
655 }
656 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);