2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright (C) 2013 ARM Limited
13 * Author: Will Deacon <will.deacon@arm.com>
16 #define pr_fmt(fmt) "psci: " fmt
18 #include <linux/cpuidle.h>
19 #include <linux/init.h>
21 #include <linux/smp.h>
22 #include <linux/slab.h>
24 #include <asm/compiler.h>
25 #include <asm/cpu_ops.h>
26 #include <asm/errno.h>
28 #include <asm/smp_plat.h>
29 #include <asm/suspend.h>
31 #define PSCI_POWER_STATE_TYPE_STANDBY 0
32 #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
34 struct psci_power_state {
40 struct psci_operations {
41 int (*cpu_suspend)(struct psci_power_state state,
42 unsigned long entry_point);
43 int (*cpu_off)(struct psci_power_state state);
44 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
45 int (*migrate)(unsigned long cpuid);
48 static struct psci_operations psci_ops;
50 static int (*invoke_psci_fn)(u64, u64, u64, u64);
60 static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
62 static u32 psci_function_id[PSCI_FN_MAX];
64 #define PSCI_RET_SUCCESS 0
65 #define PSCI_RET_EOPNOTSUPP -1
66 #define PSCI_RET_EINVAL -2
67 #define PSCI_RET_EPERM -3
69 static int psci_to_linux_errno(int errno)
72 case PSCI_RET_SUCCESS:
74 case PSCI_RET_EOPNOTSUPP:
85 #define PSCI_POWER_STATE_ID_MASK 0xffff
86 #define PSCI_POWER_STATE_ID_SHIFT 0
87 #define PSCI_POWER_STATE_TYPE_MASK 0x1
88 #define PSCI_POWER_STATE_TYPE_SHIFT 16
89 #define PSCI_POWER_STATE_AFFL_MASK 0x3
90 #define PSCI_POWER_STATE_AFFL_SHIFT 24
92 static u32 psci_power_state_pack(struct psci_power_state state)
94 return ((state.id & PSCI_POWER_STATE_ID_MASK)
95 << PSCI_POWER_STATE_ID_SHIFT) |
96 ((state.type & PSCI_POWER_STATE_TYPE_MASK)
97 << PSCI_POWER_STATE_TYPE_SHIFT) |
98 ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
99 << PSCI_POWER_STATE_AFFL_SHIFT);
102 static void psci_power_state_unpack(u32 power_state,
103 struct psci_power_state *state)
105 state->id = (power_state >> PSCI_POWER_STATE_ID_SHIFT)
106 & PSCI_POWER_STATE_ID_MASK;
107 state->type = (power_state >> PSCI_POWER_STATE_TYPE_SHIFT)
108 & PSCI_POWER_STATE_TYPE_MASK;
109 state->affinity_level = (power_state >> PSCI_POWER_STATE_AFFL_SHIFT)
110 & PSCI_POWER_STATE_AFFL_MASK;
114 * The following two functions are invoked via the invoke_psci_fn pointer
115 * and will not be inlined, allowing us to piggyback on the AAPCS.
117 static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
127 : "r" (arg0), "r" (arg1), "r" (arg2));
132 static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
142 : "r" (arg0), "r" (arg1), "r" (arg2));
147 static int psci_cpu_suspend(struct psci_power_state state,
148 unsigned long entry_point)
153 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
154 power_state = psci_power_state_pack(state);
155 err = invoke_psci_fn(fn, power_state, entry_point, 0);
156 return psci_to_linux_errno(err);
159 static int psci_cpu_off(struct psci_power_state state)
164 fn = psci_function_id[PSCI_FN_CPU_OFF];
165 power_state = psci_power_state_pack(state);
166 err = invoke_psci_fn(fn, power_state, 0, 0);
167 return psci_to_linux_errno(err);
170 static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
175 fn = psci_function_id[PSCI_FN_CPU_ON];
176 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
177 return psci_to_linux_errno(err);
180 static int psci_migrate(unsigned long cpuid)
185 fn = psci_function_id[PSCI_FN_MIGRATE];
186 err = invoke_psci_fn(fn, cpuid, 0, 0);
187 return psci_to_linux_errno(err);
190 static const struct of_device_id psci_of_match[] __initconst = {
191 { .compatible = "arm,psci", },
195 int __init psci_dt_register_idle_states(struct cpuidle_driver *drv,
196 struct device_node *state_nodes[])
199 struct psci_power_state *psci_states;
200 const struct cpu_operations *cpu_ops_ptr;
205 * This is belt-and-braces: make sure that if the idle
206 * specified protocol is psci, the cpu_ops have been
207 * initialized to psci operations. Anything else is
208 * a recipe for mayhem.
210 for_each_cpu(cpu, drv->cpumask) {
211 cpu_ops_ptr = cpu_ops[cpu];
212 if (WARN_ON(!cpu_ops_ptr || strcmp(cpu_ops_ptr->name, "psci")))
216 psci_states = kcalloc(drv->state_count, sizeof(*psci_states),
220 pr_warn("psci idle state allocation failed\n");
224 for_each_cpu(cpu, drv->cpumask) {
225 if (per_cpu(psci_power_state, cpu)) {
226 pr_warn("idle states already initialized on cpu %u\n",
230 per_cpu(psci_power_state, cpu) = psci_states;
234 for (i = 0; i < drv->state_count; i++) {
235 u32 psci_power_state;
237 if (!state_nodes[i]) {
239 * An index with a missing node pointer falls back to
242 psci_states[i].type = PSCI_POWER_STATE_TYPE_STANDBY;
246 if (of_property_read_u32(state_nodes[i], "entry-method-param",
247 &psci_power_state)) {
248 pr_warn(" * %s missing entry-method-param property\n",
249 state_nodes[i]->full_name);
251 * If entry-method-param property is missing, fall
252 * back to STANDBYWFI state
254 psci_states[i].type = PSCI_POWER_STATE_TYPE_STANDBY;
258 pr_debug("psci-power-state %#x index %u\n",
259 psci_power_state, i);
260 psci_power_state_unpack(psci_power_state, &psci_states[i]);
266 void __init psci_init(void)
268 struct device_node *np;
272 np = of_find_matching_node(NULL, psci_of_match);
276 pr_info("probing function IDs from device-tree\n");
278 if (of_property_read_string(np, "method", &method)) {
279 pr_warning("missing \"method\" property\n");
283 if (!strcmp("hvc", method)) {
284 invoke_psci_fn = __invoke_psci_fn_hvc;
285 } else if (!strcmp("smc", method)) {
286 invoke_psci_fn = __invoke_psci_fn_smc;
288 pr_warning("invalid \"method\" property: %s\n", method);
292 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
293 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
294 psci_ops.cpu_suspend = psci_cpu_suspend;
297 if (!of_property_read_u32(np, "cpu_off", &id)) {
298 psci_function_id[PSCI_FN_CPU_OFF] = id;
299 psci_ops.cpu_off = psci_cpu_off;
302 if (!of_property_read_u32(np, "cpu_on", &id)) {
303 psci_function_id[PSCI_FN_CPU_ON] = id;
304 psci_ops.cpu_on = psci_cpu_on;
307 if (!of_property_read_u32(np, "migrate", &id)) {
308 psci_function_id[PSCI_FN_MIGRATE] = id;
309 psci_ops.migrate = psci_migrate;
319 static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
324 static int __init cpu_psci_cpu_prepare(unsigned int cpu)
326 if (!psci_ops.cpu_on) {
327 pr_err("no cpu_on method, not booting CPU%d\n", cpu);
334 static int cpu_psci_cpu_boot(unsigned int cpu)
336 int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
338 pr_err("failed to boot CPU%d (%d)\n", cpu, err);
343 #ifdef CONFIG_HOTPLUG_CPU
344 static int cpu_psci_cpu_disable(unsigned int cpu)
346 /* Fail early if we don't have CPU_OFF support */
347 if (!psci_ops.cpu_off)
352 static void cpu_psci_cpu_die(unsigned int cpu)
356 * There are no known implementations of PSCI actually using the
357 * power state field, pass a sensible default for now.
359 struct psci_power_state state = {
360 .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
363 ret = psci_ops.cpu_off(state);
365 pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
369 #ifdef CONFIG_ARM64_CPU_SUSPEND
370 static int cpu_psci_cpu_suspend(unsigned long index)
372 struct psci_power_state *state = __get_cpu_var(psci_power_state);
377 return psci_ops.cpu_suspend(state[index], virt_to_phys(cpu_resume));
381 const struct cpu_operations cpu_psci_ops = {
383 .cpu_init = cpu_psci_cpu_init,
384 .cpu_prepare = cpu_psci_cpu_prepare,
385 .cpu_boot = cpu_psci_cpu_boot,
386 #ifdef CONFIG_HOTPLUG_CPU
387 .cpu_disable = cpu_psci_cpu_disable,
388 .cpu_die = cpu_psci_cpu_die,
390 #ifdef CONFIG_ARM64_CPU_SUSPEND
391 .cpu_suspend = cpu_psci_cpu_suspend,