2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
3 * Added mmcra[slot] support:
4 * Copyright (C) 2006-2007 Will Schmidt <willschm@us.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/oprofile.h>
13 #include <linux/init.h>
14 #include <linux/smp.h>
15 #include <asm/firmware.h>
16 #include <asm/ptrace.h>
17 #include <asm/processor.h>
18 #include <asm/cputable.h>
20 #include <asm/oprofile_impl.h>
25 static unsigned long reset_value[OP_MAX_COUNTER];
27 static int oprofile_running;
28 static int use_slot_nums;
30 /* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
35 static int power4_reg_setup(struct op_counter_config *ctr,
36 struct op_system_config *sys,
42 * The performance counter event settings are given in the mmcr0,
43 * mmcr1 and mmcra values passed from the user in the
44 * op_system_config structure (sys variable).
46 mmcr0_val = sys->mmcr0;
47 mmcr1_val = sys->mmcr1;
48 mmcra_val = sys->mmcra;
50 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
51 reset_value[i] = 0x80000000UL - ctr[i].count;
53 /* setup user and kernel profiling */
54 if (sys->enable_kernel)
55 mmcr0_val &= ~MMCR0_KERNEL_DISABLE;
57 mmcr0_val |= MMCR0_KERNEL_DISABLE;
60 mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
62 mmcr0_val |= MMCR0_PROBLEM_DISABLE;
64 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) ||
65 __is_processor(PV_970) || __is_processor(PV_970FX) ||
66 __is_processor(PV_970MP) || __is_processor(PV_970GX) ||
67 __is_processor(PV_POWER5) || __is_processor(PV_POWER5p))
73 extern void ppc_enable_pmcs(void);
76 * Older CPUs require the MMCRA sample bit to be always set, but newer
77 * CPUs only want it set for some groups. Eventually we will remove all
78 * knowledge of this bit in the kernel, oprofile userspace should be
79 * setting it when required.
81 * In order to keep current installations working we force the bit for
82 * those older CPUs. Once everyone has updated their oprofile userspace we
83 * can remove this hack.
85 static inline int mmcra_must_set_sample(void)
87 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) ||
88 __is_processor(PV_970) || __is_processor(PV_970FX) ||
89 __is_processor(PV_970MP) || __is_processor(PV_970GX))
95 static int power4_cpu_setup(struct op_counter_config *ctr)
97 unsigned int mmcr0 = mmcr0_val;
98 unsigned long mmcra = mmcra_val;
102 /* set the freeze bit */
104 mtspr(SPRN_MMCR0, mmcr0);
106 mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
107 mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
108 mtspr(SPRN_MMCR0, mmcr0);
110 mtspr(SPRN_MMCR1, mmcr1_val);
112 if (mmcra_must_set_sample())
113 mmcra |= MMCRA_SAMPLE_ENABLE;
114 mtspr(SPRN_MMCRA, mmcra);
116 dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
118 dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
120 dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
126 static int power4_start(struct op_counter_config *ctr)
131 /* set the PMM bit (see comment below) */
132 mtmsrd(mfmsr() | MSR_PMM);
134 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
135 if (ctr[i].enabled) {
136 classic_ctr_write(i, reset_value[i]);
138 classic_ctr_write(i, 0);
142 mmcr0 = mfspr(SPRN_MMCR0);
145 * We must clear the PMAO bit on some (GQ) chips. Just do it
148 mmcr0 &= ~MMCR0_PMAO;
151 * now clear the freeze bit, counting will not start until we
152 * rfid from this excetion, because only at that point will
153 * the PMM bit be cleared
156 mtspr(SPRN_MMCR0, mmcr0);
158 oprofile_running = 1;
160 dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
164 static void power4_stop(void)
168 /* freeze counters */
169 mmcr0 = mfspr(SPRN_MMCR0);
171 mtspr(SPRN_MMCR0, mmcr0);
173 oprofile_running = 0;
175 dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
180 /* Fake functions used by canonicalize_pc */
181 static void __used hypervisor_bucket(void)
185 static void __used rtas_bucket(void)
189 static void __used kernel_unknown_bucket(void)
194 * On GQ and newer the MMCRA stores the HV and PR bits at the time
195 * the SIAR was sampled. We use that to work out if the SIAR was sampled in
196 * the hypervisor, our exception vectors or RTAS.
197 * If the MMCRA_SAMPLE_ENABLE bit is set, we can use the MMCRA[slot] bits
198 * to more accurately identify the address of the sampled instruction. The
199 * mmcra[slot] bits represent the slot number of a sampled instruction
200 * within an instruction group. The slot will contain a value between 1
201 * and 5 if MMCRA_SAMPLE_ENABLE is set, otherwise 0.
203 static unsigned long get_pc(struct pt_regs *regs)
205 unsigned long pc = mfspr(SPRN_SIAR);
209 /* Can't do much about it */
210 if (!cur_cpu_spec->oprofile_mmcra_sihv)
213 mmcra = mfspr(SPRN_MMCRA);
215 if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) {
216 slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
218 pc += 4 * (slot - 1);
221 /* Were we in the hypervisor? */
222 if (firmware_has_feature(FW_FEATURE_LPAR) &&
223 (mmcra & cur_cpu_spec->oprofile_mmcra_sihv))
224 /* function descriptor madness */
225 return *((unsigned long *)hypervisor_bucket);
227 /* We were in userspace, nothing to do */
228 if (mmcra & cur_cpu_spec->oprofile_mmcra_sipr)
231 #ifdef CONFIG_PPC_RTAS
232 /* Were we in RTAS? */
233 if (pc >= rtas.base && pc < (rtas.base + rtas.size))
234 /* function descriptor madness */
235 return *((unsigned long *)rtas_bucket);
238 /* Were we in our exception vectors or SLB real mode miss handler? */
239 if (pc < 0x1000000UL)
240 return (unsigned long)__va(pc);
242 /* Not sure where we were */
243 if (!is_kernel_addr(pc))
244 /* function descriptor madness */
245 return *((unsigned long *)kernel_unknown_bucket);
250 static int get_kernel(unsigned long pc, unsigned long mmcra)
254 if (!cur_cpu_spec->oprofile_mmcra_sihv) {
255 is_kernel = is_kernel_addr(pc);
257 is_kernel = ((mmcra & cur_cpu_spec->oprofile_mmcra_sipr) == 0);
263 static bool pmc_overflow(unsigned long val)
269 * Events on POWER7 can roll back if a speculative event doesn't
270 * eventually complete. Unfortunately in some rare cases they will
271 * raise a performance monitor exception. We need to catch this to
272 * ensure we reset the PMC. In all cases the PMC will be 256 or less
273 * cycles from overflow.
275 * We only do this if the first pass fails to find any overflowing
276 * PMCs because a user might set a period of less than 256 and we
277 * don't want to mistakenly reset them.
279 if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
285 static void power4_handle_interrupt(struct pt_regs *regs,
286 struct op_counter_config *ctr)
295 mmcra = mfspr(SPRN_MMCRA);
298 is_kernel = get_kernel(pc, mmcra);
300 /* set the PMM bit (see comment below) */
301 mtmsrd(mfmsr() | MSR_PMM);
303 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
304 val = classic_ctr_read(i);
305 if (pmc_overflow(val)) {
306 if (oprofile_running && ctr[i].enabled) {
307 oprofile_add_ext_sample(pc, regs, i, is_kernel);
308 classic_ctr_write(i, reset_value[i]);
310 classic_ctr_write(i, 0);
315 mmcr0 = mfspr(SPRN_MMCR0);
317 /* reset the perfmon trigger */
321 * We must clear the PMAO bit on some (GQ) chips. Just do it
324 mmcr0 &= ~MMCR0_PMAO;
326 /* Clear the appropriate bits in the MMCRA */
327 mmcra &= ~cur_cpu_spec->oprofile_mmcra_clear;
328 mtspr(SPRN_MMCRA, mmcra);
331 * now clear the freeze bit, counting will not start until we
332 * rfid from this exception, because only at that point will
333 * the PMM bit be cleared
336 mtspr(SPRN_MMCR0, mmcr0);
339 struct op_powerpc_model op_model_power4 = {
340 .reg_setup = power4_reg_setup,
341 .cpu_setup = power4_cpu_setup,
342 .start = power4_start,
344 .handle_interrupt = power4_handle_interrupt,