Merge remote-tracking branch 'origin/develop-3.0' into develop-3.0-jb
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event_intel_ds.c
1 #ifdef CONFIG_CPU_SUP_INTEL
2
3 /* The maximal number of PEBS events: */
4 #define MAX_PEBS_EVENTS         4
5
6 /* The size of a BTS record in bytes: */
7 #define BTS_RECORD_SIZE         24
8
9 #define BTS_BUFFER_SIZE         (PAGE_SIZE << 4)
10 #define PEBS_BUFFER_SIZE        PAGE_SIZE
11
12 /*
13  * pebs_record_32 for p4 and core not supported
14
15 struct pebs_record_32 {
16         u32 flags, ip;
17         u32 ax, bc, cx, dx;
18         u32 si, di, bp, sp;
19 };
20
21  */
22
23 struct pebs_record_core {
24         u64 flags, ip;
25         u64 ax, bx, cx, dx;
26         u64 si, di, bp, sp;
27         u64 r8,  r9,  r10, r11;
28         u64 r12, r13, r14, r15;
29 };
30
31 struct pebs_record_nhm {
32         u64 flags, ip;
33         u64 ax, bx, cx, dx;
34         u64 si, di, bp, sp;
35         u64 r8,  r9,  r10, r11;
36         u64 r12, r13, r14, r15;
37         u64 status, dla, dse, lat;
38 };
39
40 /*
41  * A debug store configuration.
42  *
43  * We only support architectures that use 64bit fields.
44  */
45 struct debug_store {
46         u64     bts_buffer_base;
47         u64     bts_index;
48         u64     bts_absolute_maximum;
49         u64     bts_interrupt_threshold;
50         u64     pebs_buffer_base;
51         u64     pebs_index;
52         u64     pebs_absolute_maximum;
53         u64     pebs_interrupt_threshold;
54         u64     pebs_event_reset[MAX_PEBS_EVENTS];
55 };
56
57 static void init_debug_store_on_cpu(int cpu)
58 {
59         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
60
61         if (!ds)
62                 return;
63
64         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
65                      (u32)((u64)(unsigned long)ds),
66                      (u32)((u64)(unsigned long)ds >> 32));
67 }
68
69 static void fini_debug_store_on_cpu(int cpu)
70 {
71         if (!per_cpu(cpu_hw_events, cpu).ds)
72                 return;
73
74         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
75 }
76
77 static int alloc_pebs_buffer(int cpu)
78 {
79         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
80         int node = cpu_to_node(cpu);
81         int max, thresh = 1; /* always use a single PEBS record */
82         void *buffer;
83
84         if (!x86_pmu.pebs)
85                 return 0;
86
87         buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
88         if (unlikely(!buffer))
89                 return -ENOMEM;
90
91         max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
92
93         ds->pebs_buffer_base = (u64)(unsigned long)buffer;
94         ds->pebs_index = ds->pebs_buffer_base;
95         ds->pebs_absolute_maximum = ds->pebs_buffer_base +
96                 max * x86_pmu.pebs_record_size;
97
98         ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
99                 thresh * x86_pmu.pebs_record_size;
100
101         return 0;
102 }
103
104 static void release_pebs_buffer(int cpu)
105 {
106         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
107
108         if (!ds || !x86_pmu.pebs)
109                 return;
110
111         kfree((void *)(unsigned long)ds->pebs_buffer_base);
112         ds->pebs_buffer_base = 0;
113 }
114
115 static int alloc_bts_buffer(int cpu)
116 {
117         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
118         int node = cpu_to_node(cpu);
119         int max, thresh;
120         void *buffer;
121
122         if (!x86_pmu.bts)
123                 return 0;
124
125         buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
126         if (unlikely(!buffer))
127                 return -ENOMEM;
128
129         max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
130         thresh = max / 16;
131
132         ds->bts_buffer_base = (u64)(unsigned long)buffer;
133         ds->bts_index = ds->bts_buffer_base;
134         ds->bts_absolute_maximum = ds->bts_buffer_base +
135                 max * BTS_RECORD_SIZE;
136         ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
137                 thresh * BTS_RECORD_SIZE;
138
139         return 0;
140 }
141
142 static void release_bts_buffer(int cpu)
143 {
144         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
145
146         if (!ds || !x86_pmu.bts)
147                 return;
148
149         kfree((void *)(unsigned long)ds->bts_buffer_base);
150         ds->bts_buffer_base = 0;
151 }
152
153 static int alloc_ds_buffer(int cpu)
154 {
155         int node = cpu_to_node(cpu);
156         struct debug_store *ds;
157
158         ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
159         if (unlikely(!ds))
160                 return -ENOMEM;
161
162         per_cpu(cpu_hw_events, cpu).ds = ds;
163
164         return 0;
165 }
166
167 static void release_ds_buffer(int cpu)
168 {
169         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
170
171         if (!ds)
172                 return;
173
174         per_cpu(cpu_hw_events, cpu).ds = NULL;
175         kfree(ds);
176 }
177
178 static void release_ds_buffers(void)
179 {
180         int cpu;
181
182         if (!x86_pmu.bts && !x86_pmu.pebs)
183                 return;
184
185         get_online_cpus();
186         for_each_online_cpu(cpu)
187                 fini_debug_store_on_cpu(cpu);
188
189         for_each_possible_cpu(cpu) {
190                 release_pebs_buffer(cpu);
191                 release_bts_buffer(cpu);
192                 release_ds_buffer(cpu);
193         }
194         put_online_cpus();
195 }
196
197 static void reserve_ds_buffers(void)
198 {
199         int bts_err = 0, pebs_err = 0;
200         int cpu;
201
202         x86_pmu.bts_active = 0;
203         x86_pmu.pebs_active = 0;
204
205         if (!x86_pmu.bts && !x86_pmu.pebs)
206                 return;
207
208         if (!x86_pmu.bts)
209                 bts_err = 1;
210
211         if (!x86_pmu.pebs)
212                 pebs_err = 1;
213
214         get_online_cpus();
215
216         for_each_possible_cpu(cpu) {
217                 if (alloc_ds_buffer(cpu)) {
218                         bts_err = 1;
219                         pebs_err = 1;
220                 }
221
222                 if (!bts_err && alloc_bts_buffer(cpu))
223                         bts_err = 1;
224
225                 if (!pebs_err && alloc_pebs_buffer(cpu))
226                         pebs_err = 1;
227
228                 if (bts_err && pebs_err)
229                         break;
230         }
231
232         if (bts_err) {
233                 for_each_possible_cpu(cpu)
234                         release_bts_buffer(cpu);
235         }
236
237         if (pebs_err) {
238                 for_each_possible_cpu(cpu)
239                         release_pebs_buffer(cpu);
240         }
241
242         if (bts_err && pebs_err) {
243                 for_each_possible_cpu(cpu)
244                         release_ds_buffer(cpu);
245         } else {
246                 if (x86_pmu.bts && !bts_err)
247                         x86_pmu.bts_active = 1;
248
249                 if (x86_pmu.pebs && !pebs_err)
250                         x86_pmu.pebs_active = 1;
251
252                 for_each_online_cpu(cpu)
253                         init_debug_store_on_cpu(cpu);
254         }
255
256         put_online_cpus();
257 }
258
259 /*
260  * BTS
261  */
262
263 static struct event_constraint bts_constraint =
264         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
265
266 static void intel_pmu_enable_bts(u64 config)
267 {
268         unsigned long debugctlmsr;
269
270         debugctlmsr = get_debugctlmsr();
271
272         debugctlmsr |= DEBUGCTLMSR_TR;
273         debugctlmsr |= DEBUGCTLMSR_BTS;
274         debugctlmsr |= DEBUGCTLMSR_BTINT;
275
276         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
277                 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
278
279         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
280                 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
281
282         update_debugctlmsr(debugctlmsr);
283 }
284
285 static void intel_pmu_disable_bts(void)
286 {
287         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
288         unsigned long debugctlmsr;
289
290         if (!cpuc->ds)
291                 return;
292
293         debugctlmsr = get_debugctlmsr();
294
295         debugctlmsr &=
296                 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
297                   DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
298
299         update_debugctlmsr(debugctlmsr);
300 }
301
302 static int intel_pmu_drain_bts_buffer(void)
303 {
304         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
305         struct debug_store *ds = cpuc->ds;
306         struct bts_record {
307                 u64     from;
308                 u64     to;
309                 u64     flags;
310         };
311         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
312         struct bts_record *at, *top;
313         struct perf_output_handle handle;
314         struct perf_event_header header;
315         struct perf_sample_data data;
316         struct pt_regs regs;
317
318         if (!event)
319                 return 0;
320
321         if (!x86_pmu.bts_active)
322                 return 0;
323
324         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
325         top = (struct bts_record *)(unsigned long)ds->bts_index;
326
327         if (top <= at)
328                 return 0;
329
330         ds->bts_index = ds->bts_buffer_base;
331
332         perf_sample_data_init(&data, 0);
333         data.period = event->hw.last_period;
334         regs.ip     = 0;
335
336         /*
337          * Prepare a generic sample, i.e. fill in the invariant fields.
338          * We will overwrite the from and to address before we output
339          * the sample.
340          */
341         perf_prepare_sample(&header, &data, event, &regs);
342
343         if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
344                 return 1;
345
346         for (; at < top; at++) {
347                 data.ip         = at->from;
348                 data.addr       = at->to;
349
350                 perf_output_sample(&handle, &header, &data, event);
351         }
352
353         perf_output_end(&handle);
354
355         /* There's new data available. */
356         event->hw.interrupts++;
357         event->pending_kill = POLL_IN;
358         return 1;
359 }
360
361 /*
362  * PEBS
363  */
364 static struct event_constraint intel_core2_pebs_event_constraints[] = {
365         INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
366         INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
367         INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
368         INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
369         INTEL_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
370         EVENT_CONSTRAINT_END
371 };
372
373 static struct event_constraint intel_atom_pebs_event_constraints[] = {
374         INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
375         INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
376         INTEL_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
377         EVENT_CONSTRAINT_END
378 };
379
380 static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
381         INTEL_EVENT_CONSTRAINT(0x0b, 0xf),    /* MEM_INST_RETIRED.* */
382         INTEL_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
383         INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
384         INTEL_EVENT_CONSTRAINT(0xc0, 0xf),    /* INST_RETIRED.ANY */
385         INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
386         INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
387         INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
388         INTEL_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
389         INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
390         INTEL_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
391         INTEL_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
392         EVENT_CONSTRAINT_END
393 };
394
395 static struct event_constraint intel_westmere_pebs_event_constraints[] = {
396         INTEL_EVENT_CONSTRAINT(0x0b, 0xf),    /* MEM_INST_RETIRED.* */
397         INTEL_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
398         INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
399         INTEL_EVENT_CONSTRAINT(0xc0, 0xf),    /* INSTR_RETIRED.* */
400         INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
401         INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
402         INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
403         INTEL_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
404         INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
405         INTEL_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
406         INTEL_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
407         EVENT_CONSTRAINT_END
408 };
409
410 static struct event_constraint intel_snb_pebs_events[] = {
411         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
412         INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
413         INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
414         INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
415         INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
416         INTEL_EVENT_CONSTRAINT(0xcd, 0x8),    /* MEM_TRANS_RETIRED.* */
417         INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
418         INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
419         INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
420         INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
421         INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
422         INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
423         INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
424         INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
425         INTEL_EVENT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
426         INTEL_EVENT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
427         INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
428         EVENT_CONSTRAINT_END
429 };
430
431 static struct event_constraint *
432 intel_pebs_constraints(struct perf_event *event)
433 {
434         struct event_constraint *c;
435
436         if (!event->attr.precise_ip)
437                 return NULL;
438
439         if (x86_pmu.pebs_constraints) {
440                 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
441                         if ((event->hw.config & c->cmask) == c->code)
442                                 return c;
443                 }
444         }
445
446         return &emptyconstraint;
447 }
448
449 static void intel_pmu_pebs_enable(struct perf_event *event)
450 {
451         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
452         struct hw_perf_event *hwc = &event->hw;
453
454         hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
455
456         cpuc->pebs_enabled |= 1ULL << hwc->idx;
457         WARN_ON_ONCE(cpuc->enabled);
458
459         if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
460                 intel_pmu_lbr_enable(event);
461 }
462
463 static void intel_pmu_pebs_disable(struct perf_event *event)
464 {
465         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
466         struct hw_perf_event *hwc = &event->hw;
467
468         cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
469         if (cpuc->enabled)
470                 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
471
472         hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
473
474         if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
475                 intel_pmu_lbr_disable(event);
476 }
477
478 static void intel_pmu_pebs_enable_all(void)
479 {
480         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
481
482         if (cpuc->pebs_enabled)
483                 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
484 }
485
486 static void intel_pmu_pebs_disable_all(void)
487 {
488         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
489
490         if (cpuc->pebs_enabled)
491                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
492 }
493
494 #include <asm/insn.h>
495
496 static inline bool kernel_ip(unsigned long ip)
497 {
498 #ifdef CONFIG_X86_32
499         return ip > PAGE_OFFSET;
500 #else
501         return (long)ip < 0;
502 #endif
503 }
504
505 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
506 {
507         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
508         unsigned long from = cpuc->lbr_entries[0].from;
509         unsigned long old_to, to = cpuc->lbr_entries[0].to;
510         unsigned long ip = regs->ip;
511         int is_64bit = 0;
512
513         /*
514          * We don't need to fixup if the PEBS assist is fault like
515          */
516         if (!x86_pmu.intel_cap.pebs_trap)
517                 return 1;
518
519         /*
520          * No LBR entry, no basic block, no rewinding
521          */
522         if (!cpuc->lbr_stack.nr || !from || !to)
523                 return 0;
524
525         /*
526          * Basic blocks should never cross user/kernel boundaries
527          */
528         if (kernel_ip(ip) != kernel_ip(to))
529                 return 0;
530
531         /*
532          * unsigned math, either ip is before the start (impossible) or
533          * the basic block is larger than 1 page (sanity)
534          */
535         if ((ip - to) > PAGE_SIZE)
536                 return 0;
537
538         /*
539          * We sampled a branch insn, rewind using the LBR stack
540          */
541         if (ip == to) {
542                 regs->ip = from;
543                 return 1;
544         }
545
546         do {
547                 struct insn insn;
548                 u8 buf[MAX_INSN_SIZE];
549                 void *kaddr;
550
551                 old_to = to;
552                 if (!kernel_ip(ip)) {
553                         int bytes, size = MAX_INSN_SIZE;
554
555                         bytes = copy_from_user_nmi(buf, (void __user *)to, size);
556                         if (bytes != size)
557                                 return 0;
558
559                         kaddr = buf;
560                 } else
561                         kaddr = (void *)to;
562
563 #ifdef CONFIG_X86_64
564                 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
565 #endif
566                 insn_init(&insn, kaddr, is_64bit);
567                 insn_get_length(&insn);
568                 to += insn.length;
569         } while (to < ip);
570
571         if (to == ip) {
572                 regs->ip = old_to;
573                 return 1;
574         }
575
576         /*
577          * Even though we decoded the basic block, the instruction stream
578          * never matched the given IP, either the TO or the IP got corrupted.
579          */
580         return 0;
581 }
582
583 static int intel_pmu_save_and_restart(struct perf_event *event);
584
585 static void __intel_pmu_pebs_event(struct perf_event *event,
586                                    struct pt_regs *iregs, void *__pebs)
587 {
588         /*
589          * We cast to pebs_record_core since that is a subset of
590          * both formats and we don't use the other fields in this
591          * routine.
592          */
593         struct pebs_record_core *pebs = __pebs;
594         struct perf_sample_data data;
595         struct pt_regs regs;
596
597         if (!intel_pmu_save_and_restart(event))
598                 return;
599
600         perf_sample_data_init(&data, 0);
601         data.period = event->hw.last_period;
602
603         /*
604          * We use the interrupt regs as a base because the PEBS record
605          * does not contain a full regs set, specifically it seems to
606          * lack segment descriptors, which get used by things like
607          * user_mode().
608          *
609          * In the simple case fix up only the IP and BP,SP regs, for
610          * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
611          * A possible PERF_SAMPLE_REGS will have to transfer all regs.
612          */
613         regs = *iregs;
614         regs.ip = pebs->ip;
615         regs.bp = pebs->bp;
616         regs.sp = pebs->sp;
617
618         if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
619                 regs.flags |= PERF_EFLAGS_EXACT;
620         else
621                 regs.flags &= ~PERF_EFLAGS_EXACT;
622
623         if (perf_event_overflow(event, 1, &data, &regs))
624                 x86_pmu_stop(event, 0);
625 }
626
627 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
628 {
629         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
630         struct debug_store *ds = cpuc->ds;
631         struct perf_event *event = cpuc->events[0]; /* PMC0 only */
632         struct pebs_record_core *at, *top;
633         int n;
634
635         if (!x86_pmu.pebs_active)
636                 return;
637
638         at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
639         top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
640
641         /*
642          * Whatever else happens, drain the thing
643          */
644         ds->pebs_index = ds->pebs_buffer_base;
645
646         if (!test_bit(0, cpuc->active_mask))
647                 return;
648
649         WARN_ON_ONCE(!event);
650
651         if (!event->attr.precise_ip)
652                 return;
653
654         n = top - at;
655         if (n <= 0)
656                 return;
657
658         /*
659          * Should not happen, we program the threshold at 1 and do not
660          * set a reset value.
661          */
662         WARN_ON_ONCE(n > 1);
663         at += n - 1;
664
665         __intel_pmu_pebs_event(event, iregs, at);
666 }
667
668 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
669 {
670         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
671         struct debug_store *ds = cpuc->ds;
672         struct pebs_record_nhm *at, *top;
673         struct perf_event *event = NULL;
674         u64 status = 0;
675         int bit, n;
676
677         if (!x86_pmu.pebs_active)
678                 return;
679
680         at  = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
681         top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
682
683         ds->pebs_index = ds->pebs_buffer_base;
684
685         n = top - at;
686         if (n <= 0)
687                 return;
688
689         /*
690          * Should not happen, we program the threshold at 1 and do not
691          * set a reset value.
692          */
693         WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
694
695         for ( ; at < top; at++) {
696                 for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
697                         event = cpuc->events[bit];
698                         if (!test_bit(bit, cpuc->active_mask))
699                                 continue;
700
701                         WARN_ON_ONCE(!event);
702
703                         if (!event->attr.precise_ip)
704                                 continue;
705
706                         if (__test_and_set_bit(bit, (unsigned long *)&status))
707                                 continue;
708
709                         break;
710                 }
711
712                 if (!event || bit >= MAX_PEBS_EVENTS)
713                         continue;
714
715                 __intel_pmu_pebs_event(event, iregs, at);
716         }
717 }
718
719 /*
720  * BTS, PEBS probe and setup
721  */
722
723 static void intel_ds_init(void)
724 {
725         /*
726          * No support for 32bit formats
727          */
728         if (!boot_cpu_has(X86_FEATURE_DTES64))
729                 return;
730
731         x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
732         x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
733         if (x86_pmu.pebs) {
734                 char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
735                 int format = x86_pmu.intel_cap.pebs_format;
736
737                 switch (format) {
738                 case 0:
739                         printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
740                         x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
741                         x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
742                         break;
743
744                 case 1:
745                         printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
746                         x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
747                         x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
748                         break;
749
750                 default:
751                         printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
752                         x86_pmu.pebs = 0;
753                 }
754         }
755 }
756
757 #else /* CONFIG_CPU_SUP_INTEL */
758
759 static void reserve_ds_buffers(void)
760 {
761 }
762
763 static void release_ds_buffers(void)
764 {
765 }
766
767 #endif /* CONFIG_CPU_SUP_INTEL */