2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/init.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/cacheflush.h>
17 #include <asm/cacheops.h>
19 #include <asm/mips-cm.h>
20 #include <asm/mips-cpc.h>
21 #include <asm/mipsmtregs.h>
23 #include <asm/pm-cps.h>
24 #include <asm/smp-cps.h>
28 * cps_nc_entry_fn - type of a generated non-coherent state entry function
29 * @online: the count of online coupled VPEs
30 * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
32 * The code entering & exiting non-coherent states is generated at runtime
33 * using uasm, in order to ensure that the compiler cannot insert a stray
34 * memory access at an unfortunate time and to allow the generation of optimal
35 * core-specific code particularly for cache routines. If coupled_coherence
36 * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
37 * returns the number of VPEs that were in the wait state at the point this
38 * VPE left it. Returns garbage if coupled_coherence is zero or this is not
39 * the entry function for CPS_PM_NC_WAIT.
41 typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
44 * The entry point of the generated non-coherent idle state entry/exit
45 * functions. Actually per-core rather than per-CPU.
47 static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
50 /* Bitmap indicating which states are supported by the system */
51 DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
54 * Indicates the number of coupled VPEs ready to operate in a non-coherent
55 * state. Actually per-core rather than per-CPU.
57 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
58 static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
60 /* Indicates online CPUs coupled with the current CPU */
61 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
64 * Used to synchronize entry to deep idle states. Actually per-core rather
67 static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
69 /* Saved CPU state across the CPS_PM_POWER_GATED state */
70 DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
72 /* A somewhat arbitrary number of labels & relocs for uasm */
73 static struct uasm_label labels[32] __initdata;
74 static struct uasm_reloc relocs[32] __initdata;
76 /* CPU dependant sync types */
77 static unsigned stype_intervention;
78 static unsigned stype_memory;
79 static unsigned stype_ordering;
82 zero, at, v0, v1, a0, a1, a2, a3,
83 t0, t1, t2, t3, t4, t5, t6, t7,
84 s0, s1, s2, s3, s4, s5, s6, s7,
85 t8, t9, k0, k1, gp, sp, fp, ra,
88 bool cps_pm_support_state(enum cps_pm_state state)
90 return test_bit(state, state_support);
93 static void coupled_barrier(atomic_t *a, unsigned online)
96 * This function is effectively the same as
97 * cpuidle_coupled_parallel_barrier, which can't be used here since
98 * there's no cpuidle device.
101 if (!coupled_coherence)
104 smp_mb__before_atomic();
107 while (atomic_read(a) < online)
110 if (atomic_inc_return(a) == online * 2) {
115 while (atomic_read(a) > online)
119 int cps_pm_enter_state(enum cps_pm_state state)
121 unsigned cpu = smp_processor_id();
122 unsigned core = current_cpu_data.core;
123 unsigned online, left;
124 cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
125 u32 *core_ready_count, *nc_core_ready_count;
127 cps_nc_entry_fn entry;
128 struct core_boot_config *core_cfg;
129 struct vpe_boot_config *vpe_cfg;
131 /* Check that there is an entry function for this state */
132 entry = per_cpu(nc_asm_enter, core)[state];
136 /* Calculate which coupled CPUs (VPEs) are online */
137 #ifdef CONFIG_MIPS_MT
138 if (cpu_online(cpu)) {
139 cpumask_and(coupled_mask, cpu_online_mask,
140 &cpu_sibling_map[cpu]);
141 online = cpumask_weight(coupled_mask);
142 cpumask_clear_cpu(cpu, coupled_mask);
146 cpumask_clear(coupled_mask);
150 /* Setup the VPE to run mips_cps_pm_restore when started again */
151 if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
152 core_cfg = &mips_cps_core_bootcfg[core];
153 vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
154 vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
155 vpe_cfg->gp = (unsigned long)current_thread_info();
159 /* Indicate that this CPU might not be coherent */
160 cpumask_clear_cpu(cpu, &cpu_coherent_mask);
161 smp_mb__after_atomic();
163 /* Create a non-coherent mapping of the core ready_count */
164 core_ready_count = per_cpu(ready_count, core);
165 nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
166 (unsigned long)core_ready_count);
167 nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
168 nc_core_ready_count = nc_addr;
170 /* Ensure ready_count is zero-initialised before the assembly runs */
171 ACCESS_ONCE(*nc_core_ready_count) = 0;
172 coupled_barrier(&per_cpu(pm_barrier, core), online);
174 /* Run the generated entry code */
175 left = entry(online, nc_core_ready_count);
177 /* Remove the non-coherent mapping of ready_count */
178 kunmap_noncoherent();
180 /* Indicate that this CPU is definitely coherent */
181 cpumask_set_cpu(cpu, &cpu_coherent_mask);
184 * If this VPE is the first to leave the non-coherent wait state then
185 * it needs to wake up any coupled VPEs still running their wait
186 * instruction so that they return to cpuidle, which can then complete
187 * coordination between the coupled VPEs & provide the governor with
188 * a chance to reflect on the length of time the VPEs were in the
191 if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
192 arch_send_call_function_ipi_mask(coupled_mask);
197 static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
198 struct uasm_reloc **pr,
199 const struct cache_desc *cache,
200 unsigned op, int lbl)
202 unsigned cache_size = cache->ways << cache->waybit;
204 const unsigned unroll_lines = 32;
206 /* If the cache isn't present this function has it easy */
207 if (cache->flags & MIPS_CACHE_NOT_PRESENT)
210 /* Load base address */
211 UASM_i_LA(pp, t0, (long)CKSEG0);
213 /* Calculate end address */
214 if (cache_size < 0x8000)
215 uasm_i_addiu(pp, t1, t0, cache_size);
217 UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
219 /* Start of cache op loop */
220 uasm_build_label(pl, *pp, lbl);
222 /* Generate the cache ops */
223 for (i = 0; i < unroll_lines; i++)
224 uasm_i_cache(pp, op, i * cache->linesz, t0);
226 /* Update the base address */
227 uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
229 /* Loop if we haven't reached the end address yet */
230 uasm_il_bne(pp, pr, t0, t1, lbl);
234 static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
235 struct uasm_reloc **pr,
236 const struct cpuinfo_mips *cpu_info,
239 unsigned i, fsb_size = 8;
240 unsigned num_loads = (fsb_size * 3) / 2;
241 unsigned line_stride = 2;
242 unsigned line_size = cpu_info->dcache.linesz;
243 unsigned perf_counter, perf_event;
244 unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
247 * Determine whether this CPU requires an FSB flush, and if so which
248 * performance counter/event reflect stalls due to a full FSB.
250 switch (__get_cpu_type(cpu_info->cputype)) {
257 /* Newer proAptiv cores don't require this workaround */
258 if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
261 /* On older ones it's unavailable */
264 /* CPUs which do not require the workaround */
269 WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
274 * Ensure that the fill/store buffer (FSB) is not holding the results
275 * of a prefetch, since if it is then the CPC sequencer may become
276 * stuck in the D3 (ClrBus) state whilst entering a low power state.
279 /* Preserve perf counter setup */
280 uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
281 uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
283 /* Setup perf counter to count FSB full pipeline stalls */
284 uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
285 uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
287 uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
290 /* Base address for loads */
291 UASM_i_LA(pp, t0, (long)CKSEG0);
293 /* Start of clear loop */
294 uasm_build_label(pl, *pp, lbl);
296 /* Perform some loads to fill the FSB */
297 for (i = 0; i < num_loads; i++)
298 uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
301 * Invalidate the new D-cache entries so that the cache will need
302 * refilling (via the FSB) if the loop is executed again.
304 for (i = 0; i < num_loads; i++) {
305 uasm_i_cache(pp, Hit_Invalidate_D,
306 i * line_size * line_stride, t0);
307 uasm_i_cache(pp, Hit_Writeback_Inv_SD,
308 i * line_size * line_stride, t0);
311 /* Completion barrier */
312 uasm_i_sync(pp, stype_memory);
315 /* Check whether the pipeline stalled due to the FSB being full */
316 uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
318 /* Loop if it didn't */
319 uasm_il_beqz(pp, pr, t1, lbl);
322 /* Restore perf counter 1. The count may well now be wrong... */
323 uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
325 uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
331 static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
332 struct uasm_reloc **pr,
333 unsigned r_addr, int lbl)
335 uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
336 uasm_build_label(pl, *pp, lbl);
337 uasm_i_ll(pp, t1, 0, r_addr);
338 uasm_i_or(pp, t1, t1, t0);
339 uasm_i_sc(pp, t1, 0, r_addr);
340 uasm_il_beqz(pp, pr, t1, lbl);
344 static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
346 struct uasm_label *l = labels;
347 struct uasm_reloc *r = relocs;
349 const unsigned r_online = a0;
350 const unsigned r_nc_count = a1;
351 const unsigned r_pcohctl = t7;
352 const unsigned max_instrs = 256;
359 lbl_disable_coherence,
369 /* Allocate a buffer to hold the generated code */
370 p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
374 /* Clear labels & relocs ready for (re)use */
375 memset(labels, 0, sizeof(labels));
376 memset(relocs, 0, sizeof(relocs));
378 if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
380 * Save CPU state. Note the non-standard calling convention
381 * with the return address placed in v0 to avoid clobbering
382 * the ra register before it is saved.
384 UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
385 uasm_i_jalr(&p, v0, t0);
390 * Load addresses of required CM & CPC registers. This is done early
391 * because they're needed in both the enable & disable coherence steps
392 * but in the coupled case the enable step will only run on one VPE.
394 UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
396 if (coupled_coherence) {
397 /* Increment ready_count */
398 uasm_i_sync(&p, stype_ordering);
399 uasm_build_label(&l, p, lbl_incready);
400 uasm_i_ll(&p, t1, 0, r_nc_count);
401 uasm_i_addiu(&p, t2, t1, 1);
402 uasm_i_sc(&p, t2, 0, r_nc_count);
403 uasm_il_beqz(&p, &r, t2, lbl_incready);
404 uasm_i_addiu(&p, t1, t1, 1);
406 /* Ordering barrier */
407 uasm_i_sync(&p, stype_ordering);
410 * If this is the last VPE to become ready for non-coherence
411 * then it should branch below.
413 uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
416 if (state < CPS_PM_POWER_GATED) {
418 * Otherwise this is not the last VPE to become ready
419 * for non-coherence. It needs to wait until coherence
420 * has been disabled before proceeding, which it will do
421 * by polling for the top bit of ready_count being set.
423 uasm_i_addiu(&p, t1, zero, -1);
424 uasm_build_label(&l, p, lbl_poll_cont);
425 uasm_i_lw(&p, t0, 0, r_nc_count);
426 uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
428 uasm_i_yield(&p, zero, t1);
429 uasm_il_b(&p, &r, lbl_poll_cont);
433 * The core will lose power & this VPE will not continue
434 * so it can simply halt here.
436 uasm_i_addiu(&p, t0, zero, TCHALT_H);
437 uasm_i_mtc0(&p, t0, 2, 4);
438 uasm_build_label(&l, p, lbl_secondary_hang);
439 uasm_il_b(&p, &r, lbl_secondary_hang);
445 * This is the point of no return - this VPE will now proceed to
446 * disable coherence. At this point we *must* be sure that no other
447 * VPE within the core will interfere with the L1 dcache.
449 uasm_build_label(&l, p, lbl_disable_coherence);
451 /* Invalidate the L1 icache */
452 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
453 Index_Invalidate_I, lbl_invicache);
455 /* Writeback & invalidate the L1 dcache */
456 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
457 Index_Writeback_Inv_D, lbl_flushdcache);
459 /* Completion barrier */
460 uasm_i_sync(&p, stype_memory);
464 * Disable all but self interventions. The load from COHCTL is defined
465 * by the interAptiv & proAptiv SUMs as ensuring that the operation
466 * resulting from the preceeding store is complete.
468 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
469 uasm_i_sw(&p, t0, 0, r_pcohctl);
470 uasm_i_lw(&p, t0, 0, r_pcohctl);
472 /* Sync to ensure previous interventions are complete */
473 uasm_i_sync(&p, stype_intervention);
476 /* Disable coherence */
477 uasm_i_sw(&p, zero, 0, r_pcohctl);
478 uasm_i_lw(&p, t0, 0, r_pcohctl);
480 if (state >= CPS_PM_CLOCK_GATED) {
481 err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
486 /* Determine the CPC command to issue */
488 case CPS_PM_CLOCK_GATED:
489 cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
491 case CPS_PM_POWER_GATED:
492 cpc_cmd = CPC_Cx_CMD_PWRDOWN;
499 /* Issue the CPC command */
500 UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
501 uasm_i_addiu(&p, t1, zero, cpc_cmd);
502 uasm_i_sw(&p, t1, 0, t0);
504 if (state == CPS_PM_POWER_GATED) {
505 /* If anything goes wrong just hang */
506 uasm_build_label(&l, p, lbl_hang);
507 uasm_il_b(&p, &r, lbl_hang);
511 * There's no point generating more code, the core is
512 * powered down & if powered back up will run from the
513 * reset vector not from here.
518 /* Completion barrier */
519 uasm_i_sync(&p, stype_memory);
523 if (state == CPS_PM_NC_WAIT) {
525 * At this point it is safe for all VPEs to proceed with
526 * execution. This VPE will set the top bit of ready_count
527 * to indicate to the other VPEs that they may continue.
529 if (coupled_coherence)
530 cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
534 * VPEs which did not disable coherence will continue
535 * executing, after coherence has been disabled, from this
538 uasm_build_label(&l, p, lbl_secondary_cont);
540 /* Now perform our wait */
545 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
546 * will run this. The first will actually re-enable coherence & the
547 * rest will just be performing a rather unusual nop.
549 uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
550 uasm_i_sw(&p, t0, 0, r_pcohctl);
551 uasm_i_lw(&p, t0, 0, r_pcohctl);
553 /* Completion barrier */
554 uasm_i_sync(&p, stype_memory);
557 if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
558 /* Decrement ready_count */
559 uasm_build_label(&l, p, lbl_decready);
560 uasm_i_sync(&p, stype_ordering);
561 uasm_i_ll(&p, t1, 0, r_nc_count);
562 uasm_i_addiu(&p, t2, t1, -1);
563 uasm_i_sc(&p, t2, 0, r_nc_count);
564 uasm_il_beqz(&p, &r, t2, lbl_decready);
565 uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
567 /* Ordering barrier */
568 uasm_i_sync(&p, stype_ordering);
571 if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
573 * At this point it is safe for all VPEs to proceed with
574 * execution. This VPE will set the top bit of ready_count
575 * to indicate to the other VPEs that they may continue.
577 cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
580 * This core will be reliant upon another core sending a
581 * power-up command to the CPC in order to resume operation.
582 * Thus an arbitrary VPE can't trigger the core leaving the
583 * idle state and the one that disables coherence might as well
584 * be the one to re-enable it. The rest will continue from here
585 * after that has been done.
587 uasm_build_label(&l, p, lbl_secondary_cont);
589 /* Ordering barrier */
590 uasm_i_sync(&p, stype_ordering);
593 /* The core is coherent, time to return to C code */
598 /* Ensure the code didn't exceed the resources allocated for it */
599 BUG_ON((p - buf) > max_instrs);
600 BUG_ON((l - labels) > ARRAY_SIZE(labels));
601 BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
603 /* Patch branch offsets */
604 uasm_resolve_relocs(relocs, labels);
606 /* Flush the icache */
607 local_flush_icache_range((unsigned long)buf, (unsigned long)p);
615 static int __init cps_gen_core_entries(unsigned cpu)
617 enum cps_pm_state state;
618 unsigned core = cpu_data[cpu].core;
619 unsigned dlinesz = cpu_data[cpu].dcache.linesz;
620 void *entry_fn, *core_rc;
622 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
623 if (per_cpu(nc_asm_enter, core)[state])
625 if (!test_bit(state, state_support))
628 entry_fn = cps_gen_entry_code(cpu, state);
630 pr_err("Failed to generate core %u state %u entry\n",
632 clear_bit(state, state_support);
635 per_cpu(nc_asm_enter, core)[state] = entry_fn;
638 if (!per_cpu(ready_count, core)) {
639 core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
641 pr_err("Failed allocate core %u ready_count\n", core);
644 per_cpu(ready_count_alloc, core) = core_rc;
646 /* Ensure ready_count is aligned to a cacheline boundary */
647 core_rc += dlinesz - 1;
648 core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
649 per_cpu(ready_count, core) = core_rc;
655 static int __init cps_pm_init(void)
660 /* Detect appropriate sync types for the system */
661 switch (current_cpu_data.cputype) {
666 stype_intervention = 0x2;
668 stype_ordering = 0x10;
672 pr_warn("Power management is using heavyweight sync 0\n");
675 /* A CM is required for all non-coherent states */
676 if (!mips_cm_present()) {
677 pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
682 * If interrupts were enabled whilst running a wait instruction on a
683 * non-coherent core then the VPE may end up processing interrupts
684 * whilst non-coherent. That would be bad.
686 if (cpu_wait == r4k_wait_irqoff)
687 set_bit(CPS_PM_NC_WAIT, state_support);
689 pr_warn("pm-cps: non-coherent wait unavailable\n");
691 /* Detect whether a CPC is present */
692 if (mips_cpc_present()) {
693 /* Detect whether clock gating is implemented */
694 if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
695 set_bit(CPS_PM_CLOCK_GATED, state_support);
697 pr_warn("pm-cps: CPC does not support clock gating\n");
699 /* Power gating is available with CPS SMP & any CPC */
700 if (mips_cps_smp_in_use())
701 set_bit(CPS_PM_POWER_GATED, state_support);
703 pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
705 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
708 for_each_present_cpu(cpu) {
709 err = cps_gen_core_entries(cpu);
716 arch_initcall(cps_pm_init);