2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
23 #include "../vfp/vfpinstr.h"
26 * Common ARMv7 event types
28 * Note: An implementation may not be able to count all of these events
29 * but the encodings are considered to be `reserved' in the case that
30 * they are not available.
32 enum armv7_perf_types {
33 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
34 ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
35 ARMV7_PERFCTR_ITLB_REFILL = 0x02,
36 ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
37 ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
38 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
39 ARMV7_PERFCTR_MEM_READ = 0x06,
40 ARMV7_PERFCTR_MEM_WRITE = 0x07,
41 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
42 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
43 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
44 ARMV7_PERFCTR_CID_WRITE = 0x0B,
47 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
49 * - all (taken) branch instructions,
50 * - instructions that explicitly write the PC,
51 * - exception generating instructions.
53 ARMV7_PERFCTR_PC_WRITE = 0x0C,
54 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
55 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
56 ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
57 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
58 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
59 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
61 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
62 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
63 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
64 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
65 ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
66 ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
67 ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
68 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
69 ARMV7_PERFCTR_MEM_ERROR = 0x1A,
70 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
71 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
72 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
74 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
77 /* ARMv7 Cortex-A8 specific event types */
78 enum armv7_a8_perf_types {
79 ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
80 ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
81 ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
82 ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
85 /* ARMv7 Cortex-A9 specific event types */
86 enum armv7_a9_perf_types {
87 ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
88 ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
89 ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
92 /* ARMv7 Cortex-A5 specific event types */
93 enum armv7_a5_perf_types {
94 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
95 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
98 /* ARMv7 Cortex-A15 specific event types */
99 enum armv7_a15_perf_types {
100 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
101 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
102 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
103 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
105 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
106 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
108 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
109 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
110 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
111 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
113 ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
116 /* ARMv7 Cortex-A12 specific event types */
117 enum armv7_a12_perf_types {
118 ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
119 ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
121 ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
122 ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
124 ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76,
126 ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7,
129 /* ARMv7 Krait specific event types */
130 enum krait_perf_types {
131 KRAIT_PMRESR0_GROUP0 = 0xcc,
132 KRAIT_PMRESR1_GROUP0 = 0xd0,
133 KRAIT_PMRESR2_GROUP0 = 0xd4,
134 KRAIT_VPMRESR0_GROUP0 = 0xd8,
136 KRAIT_PERFCTR_L1_ICACHE_ACCESS = 0x10011,
137 KRAIT_PERFCTR_L1_ICACHE_MISS = 0x10010,
139 KRAIT_PERFCTR_L1_ITLB_ACCESS = 0x12222,
140 KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
144 * Cortex-A8 HW events mapping
146 * The hardware events that we support. We do support cache operations but
147 * we have harvard caches and no way to combine instruction and data
148 * accesses/misses in hardware.
150 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
151 PERF_MAP_ALL_UNSUPPORTED,
152 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
153 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
154 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
155 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
156 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
157 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
158 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
161 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
162 [PERF_COUNT_HW_CACHE_OP_MAX]
163 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
164 PERF_CACHE_MAP_ALL_UNSUPPORTED,
167 * The performance counters don't differentiate between read and write
168 * accesses/misses so this isn't strictly correct, but it's the best we
169 * can do. Writes and reads get combined.
171 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
172 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
173 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
174 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
176 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
177 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
179 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
180 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
181 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
182 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
184 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
185 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
187 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
188 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
190 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
191 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
192 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
193 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
197 * Cortex-A9 HW events mapping
199 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
200 PERF_MAP_ALL_UNSUPPORTED,
201 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
202 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
203 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
204 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
205 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
206 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
207 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
208 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
211 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
212 [PERF_COUNT_HW_CACHE_OP_MAX]
213 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
214 PERF_CACHE_MAP_ALL_UNSUPPORTED,
217 * The performance counters don't differentiate between read and write
218 * accesses/misses so this isn't strictly correct, but it's the best we
219 * can do. Writes and reads get combined.
221 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
222 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
223 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
224 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
226 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
228 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
229 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
231 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
232 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
234 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
235 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
236 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
237 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
241 * Cortex-A5 HW events mapping
243 static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
244 PERF_MAP_ALL_UNSUPPORTED,
245 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
246 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
247 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
248 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
249 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
250 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
253 static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
254 [PERF_COUNT_HW_CACHE_OP_MAX]
255 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
256 PERF_CACHE_MAP_ALL_UNSUPPORTED,
258 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
259 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
260 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
261 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
262 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
263 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
265 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
266 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
268 * The prefetch counters don't differentiate between the I side and the
271 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
272 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
274 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
275 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
277 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
278 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
280 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
281 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
282 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
283 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
287 * Cortex-A15 HW events mapping
289 static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
290 PERF_MAP_ALL_UNSUPPORTED,
291 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
292 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
293 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
294 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
295 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
296 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
297 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
300 static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
301 [PERF_COUNT_HW_CACHE_OP_MAX]
302 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
303 PERF_CACHE_MAP_ALL_UNSUPPORTED,
305 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
306 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
307 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
308 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
311 * Not all performance counters differentiate between read and write
312 * accesses/misses so we're not always strictly correct, but it's the
313 * best we can do. Writes and reads get combined in these cases.
315 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
316 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
318 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
319 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
320 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
321 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
323 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
324 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
326 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
327 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
329 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
330 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
331 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
332 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
336 * Cortex-A7 HW events mapping
338 static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
339 PERF_MAP_ALL_UNSUPPORTED,
340 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
341 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
342 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
343 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
344 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
345 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
346 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
349 static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
350 [PERF_COUNT_HW_CACHE_OP_MAX]
351 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
352 PERF_CACHE_MAP_ALL_UNSUPPORTED,
355 * The performance counters don't differentiate between read and write
356 * accesses/misses so this isn't strictly correct, but it's the best we
357 * can do. Writes and reads get combined.
359 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
360 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
361 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
362 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
364 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
365 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
367 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
368 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
369 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
370 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
372 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
373 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
375 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
376 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
378 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
379 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
380 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
381 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
385 * Cortex-A12 HW events mapping
387 static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
388 PERF_MAP_ALL_UNSUPPORTED,
389 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
390 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
391 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
392 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
393 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
394 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
395 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
398 static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
399 [PERF_COUNT_HW_CACHE_OP_MAX]
400 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
401 PERF_CACHE_MAP_ALL_UNSUPPORTED,
403 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
404 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
405 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
406 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
409 * Not all performance counters differentiate between read and write
410 * accesses/misses so we're not always strictly correct, but it's the
411 * best we can do. Writes and reads get combined in these cases.
413 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
414 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
416 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
417 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
418 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
419 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
421 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
422 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
423 [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
425 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
426 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
428 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
429 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
430 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
431 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
435 * Krait HW events mapping
437 static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
438 PERF_MAP_ALL_UNSUPPORTED,
439 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
440 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
441 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
442 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
443 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
446 static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
447 PERF_MAP_ALL_UNSUPPORTED,
448 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
449 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
450 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
451 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
454 static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
455 [PERF_COUNT_HW_CACHE_OP_MAX]
456 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
457 PERF_CACHE_MAP_ALL_UNSUPPORTED,
460 * The performance counters don't differentiate between read and write
461 * accesses/misses so this isn't strictly correct, but it's the best we
462 * can do. Writes and reads get combined.
464 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
465 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
466 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
467 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
469 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
470 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
472 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
473 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
475 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
476 [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
478 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
479 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
480 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
481 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
485 * Perf Events' indices
487 #define ARMV7_IDX_CYCLE_COUNTER 0
488 #define ARMV7_IDX_COUNTER0 1
489 #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
490 (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
492 #define ARMV7_MAX_COUNTERS 32
493 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
496 * ARMv7 low level PMNC access
500 * Perf Event to low level counters mapping
502 #define ARMV7_IDX_TO_COUNTER(x) \
503 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
506 * Per-CPU PMNC: config reg
508 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
509 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
510 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
511 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
512 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
513 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
514 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
515 #define ARMV7_PMNC_N_MASK 0x1f
516 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
519 * FLAG: counters overflow flag status reg
521 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
522 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
525 * PMXEVTYPER: Event selection reg
527 #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
528 #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
531 * Event filters for PMUv2
533 #define ARMV7_EXCLUDE_PL1 (1 << 31)
534 #define ARMV7_EXCLUDE_USER (1 << 30)
535 #define ARMV7_INCLUDE_HYP (1 << 27)
537 static inline u32 armv7_pmnc_read(void)
540 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
544 static inline void armv7_pmnc_write(u32 val)
546 val &= ARMV7_PMNC_MASK;
548 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
551 static inline int armv7_pmnc_has_overflowed(u32 pmnc)
553 return pmnc & ARMV7_OVERFLOWED_MASK;
556 static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
558 return idx >= ARMV7_IDX_CYCLE_COUNTER &&
559 idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
562 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
564 return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
567 static inline void armv7_pmnc_select_counter(int idx)
569 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
570 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
574 static inline u32 armv7pmu_read_counter(struct perf_event *event)
576 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
577 struct hw_perf_event *hwc = &event->hw;
581 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
582 pr_err("CPU%u reading wrong counter %d\n",
583 smp_processor_id(), idx);
584 } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
585 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
587 armv7_pmnc_select_counter(idx);
588 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
594 static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
596 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
597 struct hw_perf_event *hwc = &event->hw;
600 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
601 pr_err("CPU%u writing wrong counter %d\n",
602 smp_processor_id(), idx);
603 } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
604 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
606 armv7_pmnc_select_counter(idx);
607 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
611 static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
613 armv7_pmnc_select_counter(idx);
614 val &= ARMV7_EVTYPE_MASK;
615 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
618 static inline void armv7_pmnc_enable_counter(int idx)
620 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
621 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
624 static inline void armv7_pmnc_disable_counter(int idx)
626 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
627 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
630 static inline void armv7_pmnc_enable_intens(int idx)
632 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
633 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
636 static inline void armv7_pmnc_disable_intens(int idx)
638 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
639 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
641 /* Clear the overflow flag in case an interrupt is pending. */
642 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
646 static inline u32 armv7_pmnc_getreset_flags(void)
651 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
653 /* Write to clear flags */
654 val &= ARMV7_FLAG_MASK;
655 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
661 static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
666 pr_info("PMNC registers dump:\n");
668 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
669 pr_info("PMNC =0x%08x\n", val);
671 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
672 pr_info("CNTENS=0x%08x\n", val);
674 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
675 pr_info("INTENS=0x%08x\n", val);
677 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
678 pr_info("FLAGS =0x%08x\n", val);
680 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
681 pr_info("SELECT=0x%08x\n", val);
683 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
684 pr_info("CCNT =0x%08x\n", val);
686 for (cnt = ARMV7_IDX_COUNTER0;
687 cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
688 armv7_pmnc_select_counter(cnt);
689 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
690 pr_info("CNT[%d] count =0x%08x\n",
691 ARMV7_IDX_TO_COUNTER(cnt), val);
692 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
693 pr_info("CNT[%d] evtsel=0x%08x\n",
694 ARMV7_IDX_TO_COUNTER(cnt), val);
699 static void armv7pmu_enable_event(struct perf_event *event)
702 struct hw_perf_event *hwc = &event->hw;
703 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
704 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
707 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
708 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
709 smp_processor_id(), idx);
714 * Enable counter and interrupt, and set the counter to count
715 * the event that we're interested in.
717 raw_spin_lock_irqsave(&events->pmu_lock, flags);
722 armv7_pmnc_disable_counter(idx);
725 * Set event (if destined for PMNx counters)
726 * We only need to set the event for the cycle counter if we
727 * have the ability to perform event filtering.
729 if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
730 armv7_pmnc_write_evtsel(idx, hwc->config_base);
733 * Enable interrupt for this counter
735 armv7_pmnc_enable_intens(idx);
740 armv7_pmnc_enable_counter(idx);
742 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
745 static void armv7pmu_disable_event(struct perf_event *event)
748 struct hw_perf_event *hwc = &event->hw;
749 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
750 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
753 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
754 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
755 smp_processor_id(), idx);
760 * Disable counter and interrupt
762 raw_spin_lock_irqsave(&events->pmu_lock, flags);
767 armv7_pmnc_disable_counter(idx);
770 * Disable interrupt for this counter
772 armv7_pmnc_disable_intens(idx);
774 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
777 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
780 struct perf_sample_data data;
781 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
782 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
783 struct pt_regs *regs;
787 * Get and reset the IRQ flags
789 pmnc = armv7_pmnc_getreset_flags();
792 * Did an overflow occur?
794 if (!armv7_pmnc_has_overflowed(pmnc))
798 * Handle the counter(s) overflow(s)
800 regs = get_irq_regs();
802 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
803 struct perf_event *event = cpuc->events[idx];
804 struct hw_perf_event *hwc;
806 /* Ignore if we don't have an event. */
811 * We have a single interrupt for all counters. Check that
812 * each counter has overflowed before we process it.
814 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
818 armpmu_event_update(event);
819 perf_sample_data_init(&data, 0, hwc->last_period);
820 if (!armpmu_event_set_period(event))
823 if (perf_event_overflow(event, &data, regs))
824 cpu_pmu->disable(event);
828 * Handle the pending perf events.
830 * Note: this call *must* be run with interrupts disabled. For
831 * platforms that can have the PMU interrupts raised as an NMI, this
839 static void armv7pmu_start(struct arm_pmu *cpu_pmu)
842 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
844 raw_spin_lock_irqsave(&events->pmu_lock, flags);
845 /* Enable all counters */
846 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
847 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
850 static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
853 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
855 raw_spin_lock_irqsave(&events->pmu_lock, flags);
856 /* Disable all counters */
857 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
858 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
861 static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
862 struct perf_event *event)
865 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
866 struct hw_perf_event *hwc = &event->hw;
867 unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
869 /* Always place a cycle counter into the cycle counter. */
870 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
871 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
874 return ARMV7_IDX_CYCLE_COUNTER;
878 * For anything other than a cycle counter, try and use
879 * the events counters
881 for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
882 if (!test_and_set_bit(idx, cpuc->used_mask))
886 /* The counters are all in use. */
891 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
893 static int armv7pmu_set_event_filter(struct hw_perf_event *event,
894 struct perf_event_attr *attr)
896 unsigned long config_base = 0;
898 if (attr->exclude_idle)
900 if (attr->exclude_user)
901 config_base |= ARMV7_EXCLUDE_USER;
902 if (attr->exclude_kernel)
903 config_base |= ARMV7_EXCLUDE_PL1;
904 if (!attr->exclude_hv)
905 config_base |= ARMV7_INCLUDE_HYP;
908 * Install the filter into config_base as this is used to
909 * construct the event type.
911 event->config_base = config_base;
916 static void armv7pmu_reset(void *info)
918 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
919 u32 idx, nb_cnt = cpu_pmu->num_events;
921 /* The counter and interrupt enable registers are unknown at reset. */
922 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
923 armv7_pmnc_disable_counter(idx);
924 armv7_pmnc_disable_intens(idx);
927 /* Initialize & Reset PMNC: C and P bits */
928 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
931 static int armv7_a8_map_event(struct perf_event *event)
933 return armpmu_map_event(event, &armv7_a8_perf_map,
934 &armv7_a8_perf_cache_map, 0xFF);
937 static int armv7_a9_map_event(struct perf_event *event)
939 return armpmu_map_event(event, &armv7_a9_perf_map,
940 &armv7_a9_perf_cache_map, 0xFF);
943 static int armv7_a5_map_event(struct perf_event *event)
945 return armpmu_map_event(event, &armv7_a5_perf_map,
946 &armv7_a5_perf_cache_map, 0xFF);
949 static int armv7_a15_map_event(struct perf_event *event)
951 return armpmu_map_event(event, &armv7_a15_perf_map,
952 &armv7_a15_perf_cache_map, 0xFF);
955 static int armv7_a7_map_event(struct perf_event *event)
957 return armpmu_map_event(event, &armv7_a7_perf_map,
958 &armv7_a7_perf_cache_map, 0xFF);
961 static int armv7_a12_map_event(struct perf_event *event)
963 return armpmu_map_event(event, &armv7_a12_perf_map,
964 &armv7_a12_perf_cache_map, 0xFF);
967 static int krait_map_event(struct perf_event *event)
969 return armpmu_map_event(event, &krait_perf_map,
970 &krait_perf_cache_map, 0xFFFFF);
973 static int krait_map_event_no_branch(struct perf_event *event)
975 return armpmu_map_event(event, &krait_perf_map_no_branch,
976 &krait_perf_cache_map, 0xFFFFF);
979 static void armv7pmu_init(struct arm_pmu *cpu_pmu)
981 cpu_pmu->handle_irq = armv7pmu_handle_irq;
982 cpu_pmu->enable = armv7pmu_enable_event;
983 cpu_pmu->disable = armv7pmu_disable_event;
984 cpu_pmu->read_counter = armv7pmu_read_counter;
985 cpu_pmu->write_counter = armv7pmu_write_counter;
986 cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
987 cpu_pmu->start = armv7pmu_start;
988 cpu_pmu->stop = armv7pmu_stop;
989 cpu_pmu->reset = armv7pmu_reset;
990 cpu_pmu->max_period = (1LLU << 32) - 1;
993 static u32 armv7_read_num_pmnc_events(void)
997 /* Read the nb of CNTx counters supported from PMNC */
998 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1000 /* Add the CPU cycles counter and return */
1004 static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1006 armv7pmu_init(cpu_pmu);
1007 cpu_pmu->name = "armv7_cortex_a8";
1008 cpu_pmu->map_event = armv7_a8_map_event;
1009 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1013 static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1015 armv7pmu_init(cpu_pmu);
1016 cpu_pmu->name = "armv7_cortex_a9";
1017 cpu_pmu->map_event = armv7_a9_map_event;
1018 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1022 static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1024 armv7pmu_init(cpu_pmu);
1025 cpu_pmu->name = "armv7_cortex_a5";
1026 cpu_pmu->map_event = armv7_a5_map_event;
1027 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1031 static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1033 armv7pmu_init(cpu_pmu);
1034 cpu_pmu->name = "armv7_cortex_a15";
1035 cpu_pmu->map_event = armv7_a15_map_event;
1036 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1037 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1041 static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1043 armv7pmu_init(cpu_pmu);
1044 cpu_pmu->name = "armv7_cortex_a7";
1045 cpu_pmu->map_event = armv7_a7_map_event;
1046 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1047 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1051 static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1053 armv7pmu_init(cpu_pmu);
1054 cpu_pmu->name = "armv7_cortex_a12";
1055 cpu_pmu->map_event = armv7_a12_map_event;
1056 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1057 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1061 static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1063 armv7_a12_pmu_init(cpu_pmu);
1064 cpu_pmu->name = "armv7_cortex_a17";
1069 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1072 * +--------------------------------+
1073 * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
1074 * +--------------------------------+
1075 * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
1076 * +--------------------------------+
1077 * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
1078 * +--------------------------------+
1079 * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
1080 * +--------------------------------+
1081 * EN | G=3 | G=2 | G=1 | G=0
1085 * hwc->config_base = 0xNRCCG
1087 * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1088 * R = region register
1089 * CC = class of events the group G is choosing from
1090 * G = group or particular event
1092 * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1094 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1095 * unit, etc.) while the event code (CC) corresponds to a particular class of
1096 * events (interrupts for example). An event code is broken down into
1097 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1101 #define KRAIT_EVENT (1 << 16)
1102 #define VENUM_EVENT (2 << 16)
1103 #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
1104 #define PMRESRn_EN BIT(31)
1106 static u32 krait_read_pmresrn(int n)
1112 asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1115 asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1118 asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1121 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1127 static void krait_write_pmresrn(int n, u32 val)
1131 asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1134 asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1137 asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1140 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1144 static u32 krait_read_vpmresr0(void)
1147 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1151 static void krait_write_vpmresr0(u32 val)
1153 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1156 static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
1161 BUG_ON(preemptible());
1162 /* CPACR Enable CP10 and CP11 access */
1163 *venum_orig_val = get_copro_access();
1164 venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1165 set_copro_access(venum_new_val);
1168 *fp_orig_val = fmrx(FPEXC);
1169 fp_new_val = *fp_orig_val | FPEXC_EN;
1170 fmxr(FPEXC, fp_new_val);
1173 static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
1175 BUG_ON(preemptible());
1177 fmxr(FPEXC, fp_orig_val);
1180 set_copro_access(venum_orig_val);
1183 static u32 krait_get_pmresrn_event(unsigned int region)
1185 static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1186 KRAIT_PMRESR1_GROUP0,
1187 KRAIT_PMRESR2_GROUP0 };
1188 return pmresrn_table[region];
1191 static void krait_evt_setup(int idx, u32 config_base)
1196 unsigned int region;
1199 unsigned int group_shift;
1202 venum_event = !!(config_base & VENUM_EVENT);
1203 region = (config_base >> 12) & 0xf;
1204 code = (config_base >> 4) & 0xff;
1205 group = (config_base >> 0) & 0xf;
1207 group_shift = group * 8;
1208 mask = 0xff << group_shift;
1210 /* Configure evtsel for the region and group */
1212 val = KRAIT_VPMRESR0_GROUP0;
1214 val = krait_get_pmresrn_event(region);
1216 /* Mix in mode-exclusion bits */
1217 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1218 armv7_pmnc_write_evtsel(idx, val);
1220 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1223 krait_pre_vpmresr0(&vval, &fval);
1224 val = krait_read_vpmresr0();
1226 val |= code << group_shift;
1228 krait_write_vpmresr0(val);
1229 krait_post_vpmresr0(vval, fval);
1231 val = krait_read_pmresrn(region);
1233 val |= code << group_shift;
1235 krait_write_pmresrn(region, val);
1239 static u32 krait_clear_pmresrn_group(u32 val, int group)
1244 group_shift = group * 8;
1245 mask = 0xff << group_shift;
1248 /* Don't clear enable bit if entire region isn't disabled */
1249 if (val & ~PMRESRn_EN)
1250 return val |= PMRESRn_EN;
1255 static void krait_clearpmu(u32 config_base)
1259 unsigned int region;
1263 venum_event = !!(config_base & VENUM_EVENT);
1264 region = (config_base >> 12) & 0xf;
1265 group = (config_base >> 0) & 0xf;
1268 krait_pre_vpmresr0(&vval, &fval);
1269 val = krait_read_vpmresr0();
1270 val = krait_clear_pmresrn_group(val, group);
1271 krait_write_vpmresr0(val);
1272 krait_post_vpmresr0(vval, fval);
1274 val = krait_read_pmresrn(region);
1275 val = krait_clear_pmresrn_group(val, group);
1276 krait_write_pmresrn(region, val);
1280 static void krait_pmu_disable_event(struct perf_event *event)
1282 unsigned long flags;
1283 struct hw_perf_event *hwc = &event->hw;
1285 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1286 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1288 /* Disable counter and interrupt */
1289 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1291 /* Disable counter */
1292 armv7_pmnc_disable_counter(idx);
1295 * Clear pmresr code (if destined for PMNx counters)
1297 if (hwc->config_base & KRAIT_EVENT_MASK)
1298 krait_clearpmu(hwc->config_base);
1300 /* Disable interrupt for this counter */
1301 armv7_pmnc_disable_intens(idx);
1303 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1306 static void krait_pmu_enable_event(struct perf_event *event)
1308 unsigned long flags;
1309 struct hw_perf_event *hwc = &event->hw;
1311 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1312 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1315 * Enable counter and interrupt, and set the counter to count
1316 * the event that we're interested in.
1318 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1320 /* Disable counter */
1321 armv7_pmnc_disable_counter(idx);
1324 * Set event (if destined for PMNx counters)
1325 * We set the event for the cycle counter because we
1326 * have the ability to perform event filtering.
1328 if (hwc->config_base & KRAIT_EVENT_MASK)
1329 krait_evt_setup(idx, hwc->config_base);
1331 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1333 /* Enable interrupt for this counter */
1334 armv7_pmnc_enable_intens(idx);
1336 /* Enable counter */
1337 armv7_pmnc_enable_counter(idx);
1339 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1342 static void krait_pmu_reset(void *info)
1346 armv7pmu_reset(info);
1348 /* Clear all pmresrs */
1349 krait_write_pmresrn(0, 0);
1350 krait_write_pmresrn(1, 0);
1351 krait_write_pmresrn(2, 0);
1353 krait_pre_vpmresr0(&vval, &fval);
1354 krait_write_vpmresr0(0);
1355 krait_post_vpmresr0(vval, fval);
1358 static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1362 struct hw_perf_event *hwc = &event->hw;
1363 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1365 if (hwc->config_base & VENUM_EVENT)
1366 bit = KRAIT_VPMRESR0_GROUP0;
1368 bit = krait_get_pmresrn_event(region);
1369 bit -= krait_get_pmresrn_event(0);
1372 * Lower bits are reserved for use by the counters (see
1373 * armv7pmu_get_event_idx() for more info)
1375 bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1381 * We check for column exclusion constraints here.
1382 * Two events cant use the same group within a pmresr register.
1384 static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1385 struct perf_event *event)
1389 unsigned int prefix;
1390 unsigned int region;
1394 struct hw_perf_event *hwc = &event->hw;
1396 region = (hwc->config_base >> 12) & 0xf;
1397 code = (hwc->config_base >> 4) & 0xff;
1398 group = (hwc->config_base >> 0) & 0xf;
1399 krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1402 /* Ignore invalid events */
1403 if (group > 3 || region > 2)
1405 prefix = hwc->config_base & KRAIT_EVENT_MASK;
1406 if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
1408 if (prefix == VENUM_EVENT && (code & 0xe0))
1411 bit = krait_event_to_bit(event, region, group);
1412 if (test_and_set_bit(bit, cpuc->used_mask))
1416 idx = armv7pmu_get_event_idx(cpuc, event);
1417 if (idx < 0 && bit >= 0)
1418 clear_bit(bit, cpuc->used_mask);
1423 static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1424 struct perf_event *event)
1427 struct hw_perf_event *hwc = &event->hw;
1428 unsigned int region;
1432 region = (hwc->config_base >> 12) & 0xf;
1433 group = (hwc->config_base >> 0) & 0xf;
1434 krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1437 bit = krait_event_to_bit(event, region, group);
1438 clear_bit(bit, cpuc->used_mask);
1442 static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1444 armv7pmu_init(cpu_pmu);
1445 cpu_pmu->name = "armv7_krait";
1446 /* Some early versions of Krait don't support PC write events */
1447 if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1448 "qcom,no-pc-write"))
1449 cpu_pmu->map_event = krait_map_event_no_branch;
1451 cpu_pmu->map_event = krait_map_event;
1452 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1453 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1454 cpu_pmu->reset = krait_pmu_reset;
1455 cpu_pmu->enable = krait_pmu_enable_event;
1456 cpu_pmu->disable = krait_pmu_disable_event;
1457 cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
1458 cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1462 static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1467 static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1472 static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1477 static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1482 static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1487 static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1492 static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1497 static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
1501 #endif /* CONFIG_CPU_V7 */