2 * Performance counter support for POWER8 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/perf_event.h>
15 #include <asm/firmware.h>
19 * Some power8 event codes.
21 #define PM_CYC 0x0001e
22 #define PM_GCT_NOSLOT_CYC 0x100f8
23 #define PM_CMPLU_STALL 0x4000a
24 #define PM_INST_CMPL 0x00002
25 #define PM_BRU_FIN 0x10068
26 #define PM_BR_MPRED_CMPL 0x400f6
30 * Raw event encoding for POWER8:
32 * 60 56 52 48 44 40 36 32
33 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
34 * [ thresh_cmp ] [ thresh_ctl ]
36 * thresh start/stop OR FAB match -*
38 * 28 24 20 16 12 8 4 0
39 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
40 * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
43 * | | *- L1/L2/L3 cache_sel |
45 * | *- sampling mode for marked events *- combine
49 * Below uses IBM bit numbering.
51 * MMCR1[x:y] = unit (PMCxUNIT)
52 * MMCR1[x] = combine (PMCxCOMB)
54 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
55 * # PM_MRK_FAB_RSP_MATCH
56 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
57 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
58 * # PM_MRK_FAB_RSP_MATCH_CYC
59 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
61 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
64 * MMCRA[45:47] = thresh_sel
67 * MMCRA[22:24] = thresh_cmp[0:2]
68 * MMCRA[25:31] = thresh_cmp[3:9]
70 * if unit == 6 or unit == 7
71 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
72 * else if unit == 8 or unit == 9:
73 * if cache_sel[0] == 0: # L3 bank
74 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
75 * else if cache_sel[0] == 1:
76 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
77 * else if cache_sel[1]: # L1 event
78 * MMCR1[16] = cache_sel[2]
79 * MMCR1[17] = cache_sel[3]
82 * MMCRA[63] = 1 (SAMPLE_ENABLE)
83 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
84 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
88 #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
89 #define EVENT_THR_CMP_MASK 0x3ff
90 #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
91 #define EVENT_THR_CTL_MASK 0xffull
92 #define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
93 #define EVENT_THR_SEL_MASK 0x7
94 #define EVENT_THRESH_SHIFT 29 /* All threshold bits */
95 #define EVENT_THRESH_MASK 0x1fffffull
96 #define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
97 #define EVENT_SAMPLE_MASK 0x1f
98 #define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
99 #define EVENT_CACHE_SEL_MASK 0xf
100 #define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
101 #define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
102 #define EVENT_PMC_MASK 0xf
103 #define EVENT_UNIT_SHIFT 12 /* Unit */
104 #define EVENT_UNIT_MASK 0xf
105 #define EVENT_COMBINE_SHIFT 11 /* Combine bit */
106 #define EVENT_COMBINE_MASK 0x1
107 #define EVENT_MARKED_SHIFT 8 /* Marked bit */
108 #define EVENT_MARKED_MASK 0x1
109 #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
110 #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
112 #define EVENT_VALID_MASK \
113 ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
114 (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
115 (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
116 (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
117 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
118 (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
119 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
122 /* MMCRA IFM bits - POWER8 */
123 #define POWER8_MMCRA_IFM1 0x0000000040000000UL
124 #define POWER8_MMCRA_IFM2 0x0000000080000000UL
125 #define POWER8_MMCRA_IFM3 0x00000000C0000000UL
128 (PERF_SAMPLE_BRANCH_USER |\
129 PERF_SAMPLE_BRANCH_KERNEL |\
130 PERF_SAMPLE_BRANCH_HV)
133 * Layout of constraint bits:
135 * 60 56 52 48 44 40 36 32
136 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
137 * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
141 * 28 24 20 16 12 8 4 0
142 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
143 * [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
145 * L1 I/D qualifier -* | Count of events for each PMC.
146 * | p1, p2, p3, p4, p5, p6.
147 * nc - number of counters -*
149 * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
150 * we want the low bit of each field to be added to any existing value.
152 * Everything else is a value field.
155 #define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
156 #define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
158 /* We just throw all the threshold bits into the constraint */
159 #define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
160 #define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
162 #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
163 #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
165 #define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
166 #define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
169 * For NC we are counting up to 4 events. This requires three bits, and we need
170 * the fifth event to overflow and set the 4th bit. To achieve that we bias the
171 * fields by 3 in test_adder.
173 #define CNST_NC_SHIFT 12
174 #define CNST_NC_VAL (1 << CNST_NC_SHIFT)
175 #define CNST_NC_MASK (8 << CNST_NC_SHIFT)
176 #define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
179 * For the per-PMC fields we have two bits. The low bit is added, so if two
180 * events ask for the same PMC the sum will overflow, setting the high bit,
181 * indicating an error. So our mask sets the high bit.
183 #define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
184 #define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
185 #define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
187 /* Our add_fields is defined as: */
188 #define POWER8_ADD_FIELDS \
189 CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
190 CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
193 /* Bits in MMCR1 for POWER8 */
194 #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
195 #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
196 #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
197 #define MMCR1_FAB_SHIFT 36
198 #define MMCR1_DC_QUAL_SHIFT 47
199 #define MMCR1_IC_QUAL_SHIFT 46
201 /* Bits in MMCRA for POWER8 */
202 #define MMCRA_SAMP_MODE_SHIFT 1
203 #define MMCRA_SAMP_ELIG_SHIFT 4
204 #define MMCRA_THR_CTL_SHIFT 8
205 #define MMCRA_THR_SEL_SHIFT 16
206 #define MMCRA_THR_CMP_SHIFT 32
207 #define MMCRA_SDAR_MODE_TLB (1ull << 42)
210 static inline bool event_is_fab_match(u64 event)
212 /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
215 /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
216 return (event == 0x30056 || event == 0x4f052);
219 static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
221 unsigned int unit, pmc, cache;
222 unsigned long mask, value;
226 if (event & ~EVENT_VALID_MASK)
229 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
230 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
231 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
237 mask |= CNST_PMC_MASK(pmc);
238 value |= CNST_PMC_VAL(pmc);
240 if (pmc >= 5 && event != 0x500fa && event != 0x600f4)
246 * Add to number of counters in use. Note this includes events with
247 * a PMC of 0 - they still need a PMC, it's just assigned later.
248 * Don't count events on PMC 5 & 6, there is only one valid event
249 * on each of those counters, and they are handled above.
251 mask |= CNST_NC_MASK;
252 value |= CNST_NC_VAL;
255 if (unit >= 6 && unit <= 9) {
257 * L2/L3 events contain a cache selector field, which is
258 * supposed to be programmed into MMCRC. However MMCRC is only
259 * HV writable, and there is no API for guest kernels to modify
260 * it. The solution is for the hypervisor to initialise the
261 * field to zeroes, and for us to only ever allow events that
262 * have a cache selector of zero.
267 } else if (event & EVENT_IS_L1) {
268 mask |= CNST_L1_QUAL_MASK;
269 value |= CNST_L1_QUAL_VAL(cache);
272 if (event & EVENT_IS_MARKED) {
273 mask |= CNST_SAMPLE_MASK;
274 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
278 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
279 * the threshold control bits are used for the match value.
281 if (event_is_fab_match(event)) {
282 mask |= CNST_FAB_MATCH_MASK;
283 value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
286 * Check the mantissa upper two bits are not zero, unless the
287 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
289 unsigned int cmp, exp;
291 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
294 if (exp && (cmp & 0x60) == 0)
297 mask |= CNST_THRESH_MASK;
298 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
307 static int power8_compute_mmcr(u64 event[], int n_ev,
308 unsigned int hwc[], unsigned long mmcr[])
310 unsigned long mmcra, mmcr1, unit, combine, psel, cache, val;
311 unsigned int pmc, pmc_inuse;
316 /* First pass to count resource use */
317 for (i = 0; i < n_ev; ++i) {
318 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
320 pmc_inuse |= 1 << pmc;
323 /* In continous sampling mode, update SDAR on TLB miss */
324 mmcra = MMCRA_SDAR_MODE_TLB;
327 /* Second pass: assign PMCs, set all MMCR1 fields */
328 for (i = 0; i < n_ev; ++i) {
329 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
330 unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
331 combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
332 psel = event[i] & EVENT_PSEL_MASK;
335 for (pmc = 1; pmc <= 4; ++pmc) {
336 if (!(pmc_inuse & (1 << pmc)))
340 pmc_inuse |= 1 << pmc;
344 mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
345 mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
346 mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
349 if (event[i] & EVENT_IS_L1) {
350 cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
351 mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
353 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
356 if (event[i] & EVENT_IS_MARKED) {
357 mmcra |= MMCRA_SAMPLE_ENABLE;
359 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
361 mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
362 mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
367 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
368 * the threshold bits are used for the match value.
370 if (event_is_fab_match(event[i])) {
371 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
372 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
374 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
375 mmcra |= val << MMCRA_THR_CTL_SHIFT;
376 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
377 mmcra |= val << MMCRA_THR_SEL_SHIFT;
378 val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
379 mmcra |= val << MMCRA_THR_CMP_SHIFT;
385 /* Return MMCRx values */
388 /* pmc_inuse is 1-based */
390 mmcr[0] = MMCR0_PMC1CE;
392 if (pmc_inuse & 0x7c)
393 mmcr[0] |= MMCR0_PMCjCE;
395 /* If we're not using PMC 5 or 6, freeze them */
396 if (!(pmc_inuse & 0x60))
397 mmcr[0] |= MMCR0_FC56;
407 /* Table of alternatives, sorted by column 0 */
408 static const unsigned int event_alternatives[][MAX_ALT] = {
409 { 0x10134, 0x301e2 }, /* PM_MRK_ST_CMPL */
410 { 0x10138, 0x40138 }, /* PM_BR_MRK_2PATH */
411 { 0x18082, 0x3e05e }, /* PM_L3_CO_MEPF */
412 { 0x1d14e, 0x401e8 }, /* PM_MRK_DATA_FROM_L2MISS */
413 { 0x1e054, 0x4000a }, /* PM_CMPLU_STALL */
414 { 0x20036, 0x40036 }, /* PM_BR_2PATH */
415 { 0x200f2, 0x300f2 }, /* PM_INST_DISP */
416 { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
417 { 0x2013c, 0x3012e }, /* PM_MRK_FILT_MATCH */
418 { 0x3e054, 0x400f0 }, /* PM_LD_MISS_L1 */
419 { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
423 * Scan the alternatives table for a match and return the
424 * index into the alternatives table if found, else -1.
426 static int find_alternative(u64 event)
430 for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
431 if (event < event_alternatives[i][0])
434 for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
435 if (event == event_alternatives[i][j])
442 static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
444 int i, j, num_alt = 0;
447 alt[num_alt++] = event;
449 i = find_alternative(event);
451 /* Filter out the original event, it's already in alt[0] */
452 for (j = 0; j < MAX_ALT; ++j) {
453 alt_event = event_alternatives[i][j];
454 if (alt_event && alt_event != event)
455 alt[num_alt++] = alt_event;
459 if (flags & PPMU_ONLY_COUNT_RUN) {
461 * We're only counting in RUN state, so PM_CYC is equivalent to
462 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
465 for (i = 0; i < num_alt; ++i) {
467 case 0x1e: /* PM_CYC */
468 alt[j++] = 0x600f4; /* PM_RUN_CYC */
470 case 0x600f4: /* PM_RUN_CYC */
473 case 0x2: /* PM_PPC_CMPL */
474 alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
476 case 0x500fa: /* PM_RUN_INST_CMPL */
477 alt[j++] = 0x2; /* PM_PPC_CMPL */
487 static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
490 mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
493 PMU_FORMAT_ATTR(event, "config:0-49");
494 PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
495 PMU_FORMAT_ATTR(mark, "config:8");
496 PMU_FORMAT_ATTR(combine, "config:11");
497 PMU_FORMAT_ATTR(unit, "config:12-15");
498 PMU_FORMAT_ATTR(pmc, "config:16-19");
499 PMU_FORMAT_ATTR(cache_sel, "config:20-23");
500 PMU_FORMAT_ATTR(sample_mode, "config:24-28");
501 PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
502 PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
503 PMU_FORMAT_ATTR(thresh_start, "config:36-39");
504 PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
506 static struct attribute *power8_pmu_format_attr[] = {
507 &format_attr_event.attr,
508 &format_attr_pmcxsel.attr,
509 &format_attr_mark.attr,
510 &format_attr_combine.attr,
511 &format_attr_unit.attr,
512 &format_attr_pmc.attr,
513 &format_attr_cache_sel.attr,
514 &format_attr_sample_mode.attr,
515 &format_attr_thresh_sel.attr,
516 &format_attr_thresh_stop.attr,
517 &format_attr_thresh_start.attr,
518 &format_attr_thresh_cmp.attr,
522 struct attribute_group power8_pmu_format_group = {
524 .attrs = power8_pmu_format_attr,
527 static const struct attribute_group *power8_pmu_attr_groups[] = {
528 &power8_pmu_format_group,
532 static int power8_generic_events[] = {
533 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
534 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
535 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
536 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
537 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
538 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
541 static u64 power8_bhrb_filter_map(u64 branch_sample_type)
543 u64 pmu_bhrb_filter = 0;
544 u64 br_privilege = branch_sample_type & ONLY_PLM;
546 /* BHRB and regular PMU events share the same prvillege state
547 * filter configuration. BHRB is always recorded along with a
548 * regular PMU event. So privilege state filter criteria for BHRB
549 * and the companion PMU events has to be the same. As a default
550 * "perf record" tool sets all privillege bits ON when no filter
551 * criteria is provided in the command line. So as along as all
552 * privillege bits are ON or they are OFF, we are good to go.
554 if ((br_privilege != 7) && (br_privilege != 0))
557 /* No branch filter requested */
558 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
559 return pmu_bhrb_filter;
561 /* Invalid branch filter options - HW does not support */
562 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
565 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
568 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
569 pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
570 return pmu_bhrb_filter;
573 /* Every thing else is unsupported */
577 static void power8_config_bhrb(u64 pmu_bhrb_filter)
579 /* Enable BHRB filter in PMU */
580 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
583 static struct power_pmu power8_pmu = {
586 .max_alternatives = MAX_ALT + 1,
587 .add_fields = POWER8_ADD_FIELDS,
588 .test_adder = POWER8_TEST_ADDER,
589 .compute_mmcr = power8_compute_mmcr,
590 .config_bhrb = power8_config_bhrb,
591 .bhrb_filter_map = power8_bhrb_filter_map,
592 .get_constraint = power8_get_constraint,
593 .get_alternatives = power8_get_alternatives,
594 .disable_pmc = power8_disable_pmc,
595 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB,
596 .n_generic = ARRAY_SIZE(power8_generic_events),
597 .generic_events = power8_generic_events,
598 .attr_groups = power8_pmu_attr_groups,
602 static int __init init_power8_pmu(void)
604 if (!cur_cpu_spec->oprofile_cpu_type ||
605 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
608 return register_power_pmu(&power8_pmu);
610 early_initcall(init_power8_pmu);