4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
7 * This code is based heavily on the ARMv7 perf event code.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <asm/irq_regs.h>
25 #include <linux/perf/arm_pmu.h>
26 #include <linux/platform_device.h>
29 * ARMv8 PMUv3 Performance Events handling code.
33 /* Required events. */
34 #define ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR 0x00
35 #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL 0x03
36 #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS 0x04
37 #define ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED 0x10
38 #define ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES 0x11
39 #define ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED 0x12
41 /* At least one of the following is required. */
42 #define ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED 0x08
43 #define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x1B
45 /* Common architectural events. */
46 #define ARMV8_PMUV3_PERFCTR_MEM_READ 0x06
47 #define ARMV8_PMUV3_PERFCTR_MEM_WRITE 0x07
48 #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
49 #define ARMV8_PMUV3_PERFCTR_EXC_EXECUTED 0x0A
50 #define ARMV8_PMUV3_PERFCTR_CID_WRITE 0x0B
51 #define ARMV8_PMUV3_PERFCTR_PC_WRITE 0x0C
52 #define ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH 0x0D
53 #define ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN 0x0E
54 #define ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F
55 #define ARMV8_PMUV3_PERFCTR_TTBR_WRITE 0x1C
57 /* Common microarchitectural events. */
58 #define ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL 0x01
59 #define ARMV8_PMUV3_PERFCTR_ITLB_REFILL 0x02
60 #define ARMV8_PMUV3_PERFCTR_DTLB_REFILL 0x05
61 #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
62 #define ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS 0x14
63 #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB 0x15
64 #define ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS 0x16
65 #define ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL 0x17
66 #define ARMV8_PMUV3_PERFCTR_L2_CACHE_WB 0x18
67 #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
68 #define ARMV8_PMUV3_PERFCTR_MEM_ERROR 0x1A
69 #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
71 /* ARMv8 Cortex-A53 specific event types. */
72 #define ARMV8_A53_PERFCTR_PREFETCH_LINEFILL 0xC2
74 /* ARMv8 Cortex-A57 specific event types. */
75 #define ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD 0x40
76 #define ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST 0x41
77 #define ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD 0x42
78 #define ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST 0x43
79 #define ARMV8_A57_PERFCTR_DTLB_REFILL_LD 0x4c
80 #define ARMV8_A57_PERFCTR_DTLB_REFILL_ST 0x4d
82 /* PMUv3 HW events mapping. */
83 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
84 PERF_MAP_ALL_UNSUPPORTED,
85 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
86 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
87 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
88 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
89 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
92 /* ARM Cortex-A53 HW events mapping. */
93 static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
94 PERF_MAP_ALL_UNSUPPORTED,
95 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
96 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
97 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
98 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
99 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE,
100 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
101 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
104 static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
105 PERF_MAP_ALL_UNSUPPORTED,
106 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
107 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
108 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
109 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
110 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
111 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
114 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
115 [PERF_COUNT_HW_CACHE_OP_MAX]
116 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
117 PERF_CACHE_MAP_ALL_UNSUPPORTED,
119 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
120 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
121 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
122 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
124 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
125 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
126 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
127 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
130 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
131 [PERF_COUNT_HW_CACHE_OP_MAX]
132 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
133 PERF_CACHE_MAP_ALL_UNSUPPORTED,
135 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
136 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
137 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
138 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
139 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
141 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
142 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
144 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
146 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
147 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
148 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
149 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
152 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
153 [PERF_COUNT_HW_CACHE_OP_MAX]
154 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
155 PERF_CACHE_MAP_ALL_UNSUPPORTED,
157 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD,
158 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD,
159 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST,
160 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST,
162 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
163 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
165 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD,
166 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST,
168 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
170 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
171 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
172 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
173 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
178 * Perf Events' indices
180 #define ARMV8_IDX_CYCLE_COUNTER 0
181 #define ARMV8_IDX_COUNTER0 1
182 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
183 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
185 #define ARMV8_MAX_COUNTERS 32
186 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
189 * ARMv8 low level PMU access
193 * Perf Event to low level counters mapping
195 #define ARMV8_IDX_TO_COUNTER(x) \
196 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
199 * Per-CPU PMCR: config reg
201 #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
202 #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
203 #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
204 #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
205 #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
206 #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
207 #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
208 #define ARMV8_PMCR_N_MASK 0x1f
209 #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
212 * PMOVSR: counters overflow flag status reg
214 #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
215 #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
218 * PMXEVTYPER: Event selection reg
220 #define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
221 #define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
224 * Event filters for PMUv3
226 #define ARMV8_EXCLUDE_EL1 (1 << 31)
227 #define ARMV8_EXCLUDE_EL0 (1 << 30)
228 #define ARMV8_INCLUDE_EL2 (1 << 27)
230 static inline u32 armv8pmu_pmcr_read(void)
233 asm volatile("mrs %0, pmcr_el0" : "=r" (val));
237 static inline void armv8pmu_pmcr_write(u32 val)
239 val &= ARMV8_PMCR_MASK;
241 asm volatile("msr pmcr_el0, %0" :: "r" (val));
244 static inline int armv8pmu_has_overflowed(u32 pmovsr)
246 return pmovsr & ARMV8_OVERFLOWED_MASK;
249 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
251 return idx >= ARMV8_IDX_CYCLE_COUNTER &&
252 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
255 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
257 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
260 static inline int armv8pmu_select_counter(int idx)
262 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
263 asm volatile("msr pmselr_el0, %0" :: "r" (counter));
269 static inline u32 armv8pmu_read_counter(struct perf_event *event)
271 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
272 struct hw_perf_event *hwc = &event->hw;
276 if (!armv8pmu_counter_valid(cpu_pmu, idx))
277 pr_err("CPU%u reading wrong counter %d\n",
278 smp_processor_id(), idx);
279 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
280 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
281 else if (armv8pmu_select_counter(idx) == idx)
282 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
287 static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
289 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
290 struct hw_perf_event *hwc = &event->hw;
293 if (!armv8pmu_counter_valid(cpu_pmu, idx))
294 pr_err("CPU%u writing wrong counter %d\n",
295 smp_processor_id(), idx);
296 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
297 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
298 else if (armv8pmu_select_counter(idx) == idx)
299 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
302 static inline void armv8pmu_write_evtype(int idx, u32 val)
304 if (armv8pmu_select_counter(idx) == idx) {
305 val &= ARMV8_EVTYPE_MASK;
306 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
310 static inline int armv8pmu_enable_counter(int idx)
312 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
313 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
317 static inline int armv8pmu_disable_counter(int idx)
319 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
320 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
324 static inline int armv8pmu_enable_intens(int idx)
326 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
327 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
331 static inline int armv8pmu_disable_intens(int idx)
333 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
334 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
336 /* Clear the overflow flag in case an interrupt is pending. */
337 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
343 static inline u32 armv8pmu_getreset_flags(void)
348 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
350 /* Write to clear flags */
351 value &= ARMV8_OVSR_MASK;
352 asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
357 static void armv8pmu_enable_event(struct perf_event *event)
360 struct hw_perf_event *hwc = &event->hw;
361 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
362 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
366 * Enable counter and interrupt, and set the counter to count
367 * the event that we're interested in.
369 raw_spin_lock_irqsave(&events->pmu_lock, flags);
374 armv8pmu_disable_counter(idx);
377 * Set event (if destined for PMNx counters).
379 armv8pmu_write_evtype(idx, hwc->config_base);
382 * Enable interrupt for this counter
384 armv8pmu_enable_intens(idx);
389 armv8pmu_enable_counter(idx);
391 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
394 static void armv8pmu_disable_event(struct perf_event *event)
397 struct hw_perf_event *hwc = &event->hw;
398 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
399 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
403 * Disable counter and interrupt
405 raw_spin_lock_irqsave(&events->pmu_lock, flags);
410 armv8pmu_disable_counter(idx);
413 * Disable interrupt for this counter
415 armv8pmu_disable_intens(idx);
417 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
420 static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
423 struct perf_sample_data data;
424 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
425 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
426 struct pt_regs *regs;
430 * Get and reset the IRQ flags
432 pmovsr = armv8pmu_getreset_flags();
435 * Did an overflow occur?
437 if (!armv8pmu_has_overflowed(pmovsr))
441 * Handle the counter(s) overflow(s)
443 regs = get_irq_regs();
445 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
446 struct perf_event *event = cpuc->events[idx];
447 struct hw_perf_event *hwc;
449 /* Ignore if we don't have an event. */
454 * We have a single interrupt for all counters. Check that
455 * each counter has overflowed before we process it.
457 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
461 armpmu_event_update(event);
462 perf_sample_data_init(&data, 0, hwc->last_period);
463 if (!armpmu_event_set_period(event))
466 if (perf_event_overflow(event, &data, regs))
467 cpu_pmu->disable(event);
471 * Handle the pending perf events.
473 * Note: this call *must* be run with interrupts disabled. For
474 * platforms that can have the PMU interrupts raised as an NMI, this
482 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
485 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
487 raw_spin_lock_irqsave(&events->pmu_lock, flags);
488 /* Enable all counters */
489 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
490 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
493 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
496 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
498 raw_spin_lock_irqsave(&events->pmu_lock, flags);
499 /* Disable all counters */
500 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
501 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
504 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
505 struct perf_event *event)
508 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
509 struct hw_perf_event *hwc = &event->hw;
510 unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
512 /* Always place a cycle counter into the cycle counter. */
513 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
514 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
517 return ARMV8_IDX_CYCLE_COUNTER;
521 * For anything other than a cycle counter, try and use
522 * the events counters
524 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
525 if (!test_and_set_bit(idx, cpuc->used_mask))
529 /* The counters are all in use. */
534 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
536 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
537 struct perf_event_attr *attr)
539 unsigned long config_base = 0;
541 if (attr->exclude_idle)
543 if (attr->exclude_user)
544 config_base |= ARMV8_EXCLUDE_EL0;
545 if (attr->exclude_kernel)
546 config_base |= ARMV8_EXCLUDE_EL1;
547 if (!attr->exclude_hv)
548 config_base |= ARMV8_INCLUDE_EL2;
551 * Install the filter into config_base as this is used to
552 * construct the event type.
554 event->config_base = config_base;
559 static void armv8pmu_reset(void *info)
561 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
562 u32 idx, nb_cnt = cpu_pmu->num_events;
564 /* The counter and interrupt enable registers are unknown at reset. */
565 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
566 armv8pmu_disable_counter(idx);
567 armv8pmu_disable_intens(idx);
570 /* Initialize & Reset PMNC: C and P bits. */
571 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
574 static int armv8_pmuv3_map_event(struct perf_event *event)
576 return armpmu_map_event(event, &armv8_pmuv3_perf_map,
577 &armv8_pmuv3_perf_cache_map,
581 static int armv8_a53_map_event(struct perf_event *event)
583 return armpmu_map_event(event, &armv8_a53_perf_map,
584 &armv8_a53_perf_cache_map,
588 static int armv8_a57_map_event(struct perf_event *event)
590 return armpmu_map_event(event, &armv8_a57_perf_map,
591 &armv8_a57_perf_cache_map,
595 static void armv8pmu_read_num_pmnc_events(void *info)
599 /* Read the nb of CNTx counters supported from PMNC */
600 *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
602 /* Add the CPU cycles counter */
606 static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
608 return smp_call_function_any(&arm_pmu->supported_cpus,
609 armv8pmu_read_num_pmnc_events,
610 &arm_pmu->num_events, 1);
613 static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
615 cpu_pmu->handle_irq = armv8pmu_handle_irq,
616 cpu_pmu->enable = armv8pmu_enable_event,
617 cpu_pmu->disable = armv8pmu_disable_event,
618 cpu_pmu->read_counter = armv8pmu_read_counter,
619 cpu_pmu->write_counter = armv8pmu_write_counter,
620 cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
621 cpu_pmu->start = armv8pmu_start,
622 cpu_pmu->stop = armv8pmu_stop,
623 cpu_pmu->reset = armv8pmu_reset,
624 cpu_pmu->max_period = (1LLU << 32) - 1,
625 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
628 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
630 armv8_pmu_init(cpu_pmu);
631 cpu_pmu->name = "armv8_pmuv3";
632 cpu_pmu->map_event = armv8_pmuv3_map_event;
633 return armv8pmu_probe_num_events(cpu_pmu);
636 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
638 armv8_pmu_init(cpu_pmu);
639 cpu_pmu->name = "armv8_cortex_a53";
640 cpu_pmu->map_event = armv8_a53_map_event;
641 return armv8pmu_probe_num_events(cpu_pmu);
644 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
646 armv8_pmu_init(cpu_pmu);
647 cpu_pmu->name = "armv8_cortex_a57";
648 cpu_pmu->map_event = armv8_a57_map_event;
649 return armv8pmu_probe_num_events(cpu_pmu);
652 static const struct of_device_id armv8_pmu_of_device_ids[] = {
653 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
654 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
655 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
659 static int armv8_pmu_device_probe(struct platform_device *pdev)
661 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
664 static struct platform_driver armv8_pmu_driver = {
667 .of_match_table = armv8_pmu_of_device_ids,
669 .probe = armv8_pmu_device_probe,
672 static int __init register_armv8_pmu_driver(void)
674 return platform_driver_register(&armv8_pmu_driver);
676 device_initcall(register_armv8_pmu_driver);