UPSTREAM: arm64: perf: Convert event enums to #defines
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / perf_event.c
1 /*
2  * PMU support
3  *
4  * Copyright (C) 2012 ARM Limited
5  * Author: Will Deacon <will.deacon@arm.com>
6  *
7  * This code is based heavily on the ARMv7 perf event code.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include <asm/irq_regs.h>
23
24 #include <linux/of.h>
25 #include <linux/perf/arm_pmu.h>
26 #include <linux/platform_device.h>
27
28 /*
29  * ARMv8 PMUv3 Performance Events handling code.
30  * Common event types.
31  */
32
33 /* Required events. */
34 #define ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR                        0x00
35 #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL                    0x03
36 #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS                    0x04
37 #define ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED                  0x10
38 #define ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES                        0x11
39 #define ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED                      0x12
40
41 /* At least one of the following is required. */
42 #define ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED                      0x08
43 #define ARMV8_PMUV3_PERFCTR_OP_SPEC                             0x1B
44
45 /* Common architectural events. */
46 #define ARMV8_PMUV3_PERFCTR_MEM_READ                            0x06
47 #define ARMV8_PMUV3_PERFCTR_MEM_WRITE                           0x07
48 #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN                           0x09
49 #define ARMV8_PMUV3_PERFCTR_EXC_EXECUTED                        0x0A
50 #define ARMV8_PMUV3_PERFCTR_CID_WRITE                           0x0B
51 #define ARMV8_PMUV3_PERFCTR_PC_WRITE                            0x0C
52 #define ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH                       0x0D
53 #define ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN                      0x0E
54 #define ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS                0x0F
55 #define ARMV8_PMUV3_PERFCTR_TTBR_WRITE                          0x1C
56
57 /* Common microarchitectural events. */
58 #define ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL                    0x01
59 #define ARMV8_PMUV3_PERFCTR_ITLB_REFILL                         0x02
60 #define ARMV8_PMUV3_PERFCTR_DTLB_REFILL                         0x05
61 #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS                          0x13
62 #define ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS                    0x14
63 #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB                        0x15
64 #define ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS                     0x16
65 #define ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL                     0x17
66 #define ARMV8_PMUV3_PERFCTR_L2_CACHE_WB                         0x18
67 #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS                          0x19
68 #define ARMV8_PMUV3_PERFCTR_MEM_ERROR                           0x1A
69 #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES                          0x1D
70
71 /* ARMv8 Cortex-A53 specific event types. */
72 #define ARMV8_A53_PERFCTR_PREFETCH_LINEFILL                     0xC2
73
74 /* ARMv8 Cortex-A57 specific event types. */
75 #define ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD                   0x40
76 #define ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST                   0x41
77 #define ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD                   0x42
78 #define ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST                   0x43
79 #define ARMV8_A57_PERFCTR_DTLB_REFILL_LD                        0x4c
80 #define ARMV8_A57_PERFCTR_DTLB_REFILL_ST                        0x4d
81
82 /* PMUv3 HW events mapping. */
83 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
84         PERF_MAP_ALL_UNSUPPORTED,
85         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
86         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
87         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
88         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
89         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
90 };
91
92 /* ARM Cortex-A53 HW events mapping. */
93 static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
94         PERF_MAP_ALL_UNSUPPORTED,
95         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
96         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
97         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
98         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
99         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV8_PMUV3_PERFCTR_PC_WRITE,
100         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
101         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
102 };
103
104 static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
105         PERF_MAP_ALL_UNSUPPORTED,
106         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
107         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
108         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
109         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
110         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
111         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
112 };
113
114 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
115                                                 [PERF_COUNT_HW_CACHE_OP_MAX]
116                                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
117         PERF_CACHE_MAP_ALL_UNSUPPORTED,
118
119         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
120         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
121         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
122         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
123
124         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
125         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
126         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
127         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
128 };
129
130 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
131                                               [PERF_COUNT_HW_CACHE_OP_MAX]
132                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
133         PERF_CACHE_MAP_ALL_UNSUPPORTED,
134
135         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
136         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
137         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
138         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
139         [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
140
141         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
142         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
143
144         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
145
146         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
147         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
148         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
149         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
150 };
151
152 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
153                                               [PERF_COUNT_HW_CACHE_OP_MAX]
154                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
155         PERF_CACHE_MAP_ALL_UNSUPPORTED,
156
157         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD,
158         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD,
159         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST,
160         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST,
161
162         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
163         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
164
165         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_A57_PERFCTR_DTLB_REFILL_LD,
166         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV8_A57_PERFCTR_DTLB_REFILL_ST,
167
168         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
169
170         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
171         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
172         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
173         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
174 };
175
176
177 /*
178  * Perf Events' indices
179  */
180 #define ARMV8_IDX_CYCLE_COUNTER 0
181 #define ARMV8_IDX_COUNTER0      1
182 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
183         (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
184
185 #define ARMV8_MAX_COUNTERS      32
186 #define ARMV8_COUNTER_MASK      (ARMV8_MAX_COUNTERS - 1)
187
188 /*
189  * ARMv8 low level PMU access
190  */
191
192 /*
193  * Perf Event to low level counters mapping
194  */
195 #define ARMV8_IDX_TO_COUNTER(x) \
196         (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
197
198 /*
199  * Per-CPU PMCR: config reg
200  */
201 #define ARMV8_PMCR_E            (1 << 0) /* Enable all counters */
202 #define ARMV8_PMCR_P            (1 << 1) /* Reset all counters */
203 #define ARMV8_PMCR_C            (1 << 2) /* Cycle counter reset */
204 #define ARMV8_PMCR_D            (1 << 3) /* CCNT counts every 64th cpu cycle */
205 #define ARMV8_PMCR_X            (1 << 4) /* Export to ETM */
206 #define ARMV8_PMCR_DP           (1 << 5) /* Disable CCNT if non-invasive debug*/
207 #define ARMV8_PMCR_N_SHIFT      11       /* Number of counters supported */
208 #define ARMV8_PMCR_N_MASK       0x1f
209 #define ARMV8_PMCR_MASK         0x3f     /* Mask for writable bits */
210
211 /*
212  * PMOVSR: counters overflow flag status reg
213  */
214 #define ARMV8_OVSR_MASK         0xffffffff      /* Mask for writable bits */
215 #define ARMV8_OVERFLOWED_MASK   ARMV8_OVSR_MASK
216
217 /*
218  * PMXEVTYPER: Event selection reg
219  */
220 #define ARMV8_EVTYPE_MASK       0xc80003ff      /* Mask for writable bits */
221 #define ARMV8_EVTYPE_EVENT      0x3ff           /* Mask for EVENT bits */
222
223 /*
224  * Event filters for PMUv3
225  */
226 #define ARMV8_EXCLUDE_EL1       (1 << 31)
227 #define ARMV8_EXCLUDE_EL0       (1 << 30)
228 #define ARMV8_INCLUDE_EL2       (1 << 27)
229
230 static inline u32 armv8pmu_pmcr_read(void)
231 {
232         u32 val;
233         asm volatile("mrs %0, pmcr_el0" : "=r" (val));
234         return val;
235 }
236
237 static inline void armv8pmu_pmcr_write(u32 val)
238 {
239         val &= ARMV8_PMCR_MASK;
240         isb();
241         asm volatile("msr pmcr_el0, %0" :: "r" (val));
242 }
243
244 static inline int armv8pmu_has_overflowed(u32 pmovsr)
245 {
246         return pmovsr & ARMV8_OVERFLOWED_MASK;
247 }
248
249 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
250 {
251         return idx >= ARMV8_IDX_CYCLE_COUNTER &&
252                 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
253 }
254
255 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
256 {
257         return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
258 }
259
260 static inline int armv8pmu_select_counter(int idx)
261 {
262         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
263         asm volatile("msr pmselr_el0, %0" :: "r" (counter));
264         isb();
265
266         return idx;
267 }
268
269 static inline u32 armv8pmu_read_counter(struct perf_event *event)
270 {
271         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
272         struct hw_perf_event *hwc = &event->hw;
273         int idx = hwc->idx;
274         u32 value = 0;
275
276         if (!armv8pmu_counter_valid(cpu_pmu, idx))
277                 pr_err("CPU%u reading wrong counter %d\n",
278                         smp_processor_id(), idx);
279         else if (idx == ARMV8_IDX_CYCLE_COUNTER)
280                 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
281         else if (armv8pmu_select_counter(idx) == idx)
282                 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
283
284         return value;
285 }
286
287 static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
288 {
289         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
290         struct hw_perf_event *hwc = &event->hw;
291         int idx = hwc->idx;
292
293         if (!armv8pmu_counter_valid(cpu_pmu, idx))
294                 pr_err("CPU%u writing wrong counter %d\n",
295                         smp_processor_id(), idx);
296         else if (idx == ARMV8_IDX_CYCLE_COUNTER)
297                 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
298         else if (armv8pmu_select_counter(idx) == idx)
299                 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
300 }
301
302 static inline void armv8pmu_write_evtype(int idx, u32 val)
303 {
304         if (armv8pmu_select_counter(idx) == idx) {
305                 val &= ARMV8_EVTYPE_MASK;
306                 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
307         }
308 }
309
310 static inline int armv8pmu_enable_counter(int idx)
311 {
312         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
313         asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
314         return idx;
315 }
316
317 static inline int armv8pmu_disable_counter(int idx)
318 {
319         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
320         asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
321         return idx;
322 }
323
324 static inline int armv8pmu_enable_intens(int idx)
325 {
326         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
327         asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
328         return idx;
329 }
330
331 static inline int armv8pmu_disable_intens(int idx)
332 {
333         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
334         asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
335         isb();
336         /* Clear the overflow flag in case an interrupt is pending. */
337         asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
338         isb();
339
340         return idx;
341 }
342
343 static inline u32 armv8pmu_getreset_flags(void)
344 {
345         u32 value;
346
347         /* Read */
348         asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
349
350         /* Write to clear flags */
351         value &= ARMV8_OVSR_MASK;
352         asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
353
354         return value;
355 }
356
357 static void armv8pmu_enable_event(struct perf_event *event)
358 {
359         unsigned long flags;
360         struct hw_perf_event *hwc = &event->hw;
361         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
362         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
363         int idx = hwc->idx;
364
365         /*
366          * Enable counter and interrupt, and set the counter to count
367          * the event that we're interested in.
368          */
369         raw_spin_lock_irqsave(&events->pmu_lock, flags);
370
371         /*
372          * Disable counter
373          */
374         armv8pmu_disable_counter(idx);
375
376         /*
377          * Set event (if destined for PMNx counters).
378          */
379         armv8pmu_write_evtype(idx, hwc->config_base);
380
381         /*
382          * Enable interrupt for this counter
383          */
384         armv8pmu_enable_intens(idx);
385
386         /*
387          * Enable counter
388          */
389         armv8pmu_enable_counter(idx);
390
391         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
392 }
393
394 static void armv8pmu_disable_event(struct perf_event *event)
395 {
396         unsigned long flags;
397         struct hw_perf_event *hwc = &event->hw;
398         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
399         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
400         int idx = hwc->idx;
401
402         /*
403          * Disable counter and interrupt
404          */
405         raw_spin_lock_irqsave(&events->pmu_lock, flags);
406
407         /*
408          * Disable counter
409          */
410         armv8pmu_disable_counter(idx);
411
412         /*
413          * Disable interrupt for this counter
414          */
415         armv8pmu_disable_intens(idx);
416
417         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
418 }
419
420 static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
421 {
422         u32 pmovsr;
423         struct perf_sample_data data;
424         struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
425         struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
426         struct pt_regs *regs;
427         int idx;
428
429         /*
430          * Get and reset the IRQ flags
431          */
432         pmovsr = armv8pmu_getreset_flags();
433
434         /*
435          * Did an overflow occur?
436          */
437         if (!armv8pmu_has_overflowed(pmovsr))
438                 return IRQ_NONE;
439
440         /*
441          * Handle the counter(s) overflow(s)
442          */
443         regs = get_irq_regs();
444
445         for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
446                 struct perf_event *event = cpuc->events[idx];
447                 struct hw_perf_event *hwc;
448
449                 /* Ignore if we don't have an event. */
450                 if (!event)
451                         continue;
452
453                 /*
454                  * We have a single interrupt for all counters. Check that
455                  * each counter has overflowed before we process it.
456                  */
457                 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
458                         continue;
459
460                 hwc = &event->hw;
461                 armpmu_event_update(event);
462                 perf_sample_data_init(&data, 0, hwc->last_period);
463                 if (!armpmu_event_set_period(event))
464                         continue;
465
466                 if (perf_event_overflow(event, &data, regs))
467                         cpu_pmu->disable(event);
468         }
469
470         /*
471          * Handle the pending perf events.
472          *
473          * Note: this call *must* be run with interrupts disabled. For
474          * platforms that can have the PMU interrupts raised as an NMI, this
475          * will not work.
476          */
477         irq_work_run();
478
479         return IRQ_HANDLED;
480 }
481
482 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
483 {
484         unsigned long flags;
485         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
486
487         raw_spin_lock_irqsave(&events->pmu_lock, flags);
488         /* Enable all counters */
489         armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
490         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
491 }
492
493 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
494 {
495         unsigned long flags;
496         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
497
498         raw_spin_lock_irqsave(&events->pmu_lock, flags);
499         /* Disable all counters */
500         armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
501         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
502 }
503
504 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
505                                   struct perf_event *event)
506 {
507         int idx;
508         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
509         struct hw_perf_event *hwc = &event->hw;
510         unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
511
512         /* Always place a cycle counter into the cycle counter. */
513         if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
514                 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
515                         return -EAGAIN;
516
517                 return ARMV8_IDX_CYCLE_COUNTER;
518         }
519
520         /*
521          * For anything other than a cycle counter, try and use
522          * the events counters
523          */
524         for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
525                 if (!test_and_set_bit(idx, cpuc->used_mask))
526                         return idx;
527         }
528
529         /* The counters are all in use. */
530         return -EAGAIN;
531 }
532
533 /*
534  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
535  */
536 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
537                                      struct perf_event_attr *attr)
538 {
539         unsigned long config_base = 0;
540
541         if (attr->exclude_idle)
542                 return -EPERM;
543         if (attr->exclude_user)
544                 config_base |= ARMV8_EXCLUDE_EL0;
545         if (attr->exclude_kernel)
546                 config_base |= ARMV8_EXCLUDE_EL1;
547         if (!attr->exclude_hv)
548                 config_base |= ARMV8_INCLUDE_EL2;
549
550         /*
551          * Install the filter into config_base as this is used to
552          * construct the event type.
553          */
554         event->config_base = config_base;
555
556         return 0;
557 }
558
559 static void armv8pmu_reset(void *info)
560 {
561         struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
562         u32 idx, nb_cnt = cpu_pmu->num_events;
563
564         /* The counter and interrupt enable registers are unknown at reset. */
565         for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
566                 armv8pmu_disable_counter(idx);
567                 armv8pmu_disable_intens(idx);
568         }
569
570         /* Initialize & Reset PMNC: C and P bits. */
571         armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
572 }
573
574 static int armv8_pmuv3_map_event(struct perf_event *event)
575 {
576         return armpmu_map_event(event, &armv8_pmuv3_perf_map,
577                                 &armv8_pmuv3_perf_cache_map,
578                                 ARMV8_EVTYPE_EVENT);
579 }
580
581 static int armv8_a53_map_event(struct perf_event *event)
582 {
583         return armpmu_map_event(event, &armv8_a53_perf_map,
584                                 &armv8_a53_perf_cache_map,
585                                 ARMV8_EVTYPE_EVENT);
586 }
587
588 static int armv8_a57_map_event(struct perf_event *event)
589 {
590         return armpmu_map_event(event, &armv8_a57_perf_map,
591                                 &armv8_a57_perf_cache_map,
592                                 ARMV8_EVTYPE_EVENT);
593 }
594
595 static void armv8pmu_read_num_pmnc_events(void *info)
596 {
597         int *nb_cnt = info;
598
599         /* Read the nb of CNTx counters supported from PMNC */
600         *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
601
602         /* Add the CPU cycles counter */
603         *nb_cnt += 1;
604 }
605
606 static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
607 {
608         return smp_call_function_any(&arm_pmu->supported_cpus,
609                                     armv8pmu_read_num_pmnc_events,
610                                     &arm_pmu->num_events, 1);
611 }
612
613 static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
614 {
615         cpu_pmu->handle_irq             = armv8pmu_handle_irq,
616         cpu_pmu->enable                 = armv8pmu_enable_event,
617         cpu_pmu->disable                = armv8pmu_disable_event,
618         cpu_pmu->read_counter           = armv8pmu_read_counter,
619         cpu_pmu->write_counter          = armv8pmu_write_counter,
620         cpu_pmu->get_event_idx          = armv8pmu_get_event_idx,
621         cpu_pmu->start                  = armv8pmu_start,
622         cpu_pmu->stop                   = armv8pmu_stop,
623         cpu_pmu->reset                  = armv8pmu_reset,
624         cpu_pmu->max_period             = (1LLU << 32) - 1,
625         cpu_pmu->set_event_filter       = armv8pmu_set_event_filter;
626 }
627
628 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
629 {
630         armv8_pmu_init(cpu_pmu);
631         cpu_pmu->name                   = "armv8_pmuv3";
632         cpu_pmu->map_event              = armv8_pmuv3_map_event;
633         return armv8pmu_probe_num_events(cpu_pmu);
634 }
635
636 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
637 {
638         armv8_pmu_init(cpu_pmu);
639         cpu_pmu->name                   = "armv8_cortex_a53";
640         cpu_pmu->map_event              = armv8_a53_map_event;
641         return armv8pmu_probe_num_events(cpu_pmu);
642 }
643
644 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
645 {
646         armv8_pmu_init(cpu_pmu);
647         cpu_pmu->name                   = "armv8_cortex_a57";
648         cpu_pmu->map_event              = armv8_a57_map_event;
649         return armv8pmu_probe_num_events(cpu_pmu);
650 }
651
652 static const struct of_device_id armv8_pmu_of_device_ids[] = {
653         {.compatible = "arm,armv8-pmuv3",       .data = armv8_pmuv3_init},
654         {.compatible = "arm,cortex-a53-pmu",    .data = armv8_a53_pmu_init},
655         {.compatible = "arm,cortex-a57-pmu",    .data = armv8_a57_pmu_init},
656         {},
657 };
658
659 static int armv8_pmu_device_probe(struct platform_device *pdev)
660 {
661         return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
662 }
663
664 static struct platform_driver armv8_pmu_driver = {
665         .driver         = {
666                 .name   = "armv8-pmu",
667                 .of_match_table = armv8_pmu_of_device_ids,
668         },
669         .probe          = armv8_pmu_device_probe,
670 };
671
672 static int __init register_armv8_pmu_driver(void)
673 {
674         return platform_driver_register(&armv8_pmu_driver);
675 }
676 device_initcall(register_armv8_pmu_driver);