2 * Versatile Express Serial Power Controller (SPC) support
4 * Copyright (C) 2013 ARM Ltd.
6 * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
7 * Achin Gupta <achin.gupta@arm.com>
8 * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/device.h>
21 #include <linux/err.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/slab.h>
28 #include <linux/vexpress.h>
30 #include <asm/cacheflush.h>
32 #define SCC_CFGREG19 0x120
33 #define SCC_CFGREG20 0x124
34 #define A15_CONF 0x400
36 #define SYS_INFO 0x700
37 #define PERF_LVL_A15 0xB00
38 #define PERF_REQ_A15 0xB04
39 #define PERF_LVL_A7 0xB08
40 #define PERF_REQ_A7 0xB0c
41 #define SYS_CFGCTRL 0xB10
42 #define SYS_CFGCTRL_REQ 0xB14
43 #define PWC_STATUS 0xB18
44 #define PWC_FLAG 0xB1c
45 #define WAKE_INT_MASK 0xB24
46 #define WAKE_INT_RAW 0xB28
47 #define WAKE_INT_STAT 0xB2c
48 #define A15_PWRDN_EN 0xB30
49 #define A7_PWRDN_EN 0xB34
50 #define A7_PWRDNACK 0xB54
51 #define A15_BX_ADDR0 0xB68
52 #define SYS_CFG_WDATA 0xB70
53 #define SYS_CFG_RDATA 0xB74
54 #define A7_BX_ADDR0 0xB78
56 #define GBL_WAKEUP_INT_MSK (0x3 << 10)
59 #define CLKF_MASK 0x1FFF
61 #define CLKR_MASK 0x3F
63 #define CLKOD_MASK 0xF
65 #define OPP_FUNCTION 6
66 #define OPP_BASE_DEVICE 0x300
67 #define OPP_A15_OFFSET 0x4
68 #define OPP_A7_OFFSET 0xc
70 #define SYS_CFGCTRL_START (1 << 31)
71 #define SYS_CFGCTRL_WRITE (1 << 30)
72 #define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
73 #define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
76 #define MAX_CLUSTERS 2
85 #define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
86 #define STAT_ERR(type) ((1 << 1) << (type << 2))
87 #define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
89 struct vexpress_spc_drvdata {
90 void __iomem *baseaddr;
94 u32 freqs[MAX_CLUSTERS][MAX_OPPS];
95 int freqs_cnt[MAX_CLUSTERS];
103 struct vexpress_spc_func {
104 enum spc_func_type type;
109 static struct vexpress_spc_drvdata *info;
110 static u32 *vexpress_spc_config_data;
111 static struct vexpress_config_bridge *vexpress_spc_config_bridge;
112 static struct vexpress_config_func *opp_func, *perf_func;
114 static int vexpress_spc_load_result = -EAGAIN;
116 static bool vexpress_spc_initialized(void)
118 return vexpress_spc_load_result == 0;
122 * vexpress_spc_write_resume_reg() - set the jump address used for warm boot
124 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
125 * @cpu: mpidr[7:0] bitfield describing cpu affinity level
126 * @addr: physical resume address
128 void vexpress_spc_write_resume_reg(u32 cluster, u32 cpu, u32 addr)
130 void __iomem *baseaddr;
132 if (WARN_ON_ONCE(cluster >= MAX_CLUSTERS))
135 if (cluster != info->a15_clusid)
136 baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
138 baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
140 writel_relaxed(addr, baseaddr);
144 * vexpress_spc_get_nb_cpus() - get number of cpus in a cluster
146 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
148 * Return: number of cpus in the cluster
149 * -EINVAL if cluster number invalid
151 int vexpress_spc_get_nb_cpus(u32 cluster)
155 if (WARN_ON_ONCE(cluster >= MAX_CLUSTERS))
158 val = readl_relaxed(info->baseaddr + SYS_INFO);
159 val = (cluster != info->a15_clusid) ? (val >> 20) : (val >> 16);
162 EXPORT_SYMBOL_GPL(vexpress_spc_get_nb_cpus);
165 * vexpress_spc_get_performance - get current performance level of cluster
166 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
167 * @freq: pointer to the performance level to be assigned
169 * Return: 0 on success
172 int vexpress_spc_get_performance(u32 cluster, u32 *freq)
177 if (!vexpress_spc_initialized() || (cluster >= MAX_CLUSTERS))
180 perf_cfg_reg = cluster != info->a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15;
181 ret = vexpress_config_read(perf_func, perf_cfg_reg, &perf);
184 *freq = info->freqs[cluster][perf];
188 EXPORT_SYMBOL_GPL(vexpress_spc_get_performance);
191 * vexpress_spc_get_perf_index - get performance level corresponding to
193 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
194 * @freq: frequency to be looked-up
196 * Return: perf level index on success
199 static int vexpress_spc_find_perf_index(u32 cluster, u32 freq)
203 for (idx = 0; idx < info->freqs_cnt[cluster]; idx++)
204 if (info->freqs[cluster][idx] == freq)
206 return (idx == info->freqs_cnt[cluster]) ? -EINVAL : idx;
210 * vexpress_spc_set_performance - set current performance level of cluster
212 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
213 * @freq: performance level to be programmed
215 * Returns: 0 on success
218 int vexpress_spc_set_performance(u32 cluster, u32 freq)
220 int ret, perf, offset;
222 if (!vexpress_spc_initialized() || (cluster >= MAX_CLUSTERS))
225 offset = (cluster != info->a15_clusid) ? PERF_LVL_A7 : PERF_LVL_A15;
227 perf = vexpress_spc_find_perf_index(cluster, freq);
229 if (perf < 0 || perf >= MAX_OPPS)
232 ret = vexpress_config_write(perf_func, offset, perf);
236 EXPORT_SYMBOL_GPL(vexpress_spc_set_performance);
238 static void vexpress_spc_set_wake_intr(u32 mask)
240 writel_relaxed(mask & VEXPRESS_SPC_WAKE_INTR_MASK,
241 info->baseaddr + WAKE_INT_MASK);
244 static inline void reg_bitmask(u32 *reg, u32 mask, bool set)
253 * vexpress_spc_set_global_wakeup_intr()
255 * Function to set/clear global wakeup IRQs. Not protected by locking since
256 * it might be used in code paths where normal cacheable locks are not
257 * working. Locking must be provided by the caller to ensure atomicity.
259 * @set: if true, global wake-up IRQs are set, if false they are cleared
261 void vexpress_spc_set_global_wakeup_intr(bool set)
263 u32 wake_int_mask_reg = 0;
265 wake_int_mask_reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
266 reg_bitmask(&wake_int_mask_reg, GBL_WAKEUP_INT_MSK, set);
267 vexpress_spc_set_wake_intr(wake_int_mask_reg);
271 * vexpress_spc_set_cpu_wakeup_irq()
273 * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
274 * it might be used in code paths where normal cacheable locks are not
275 * working. Locking must be provided by the caller to ensure atomicity.
277 * @cpu: mpidr[7:0] bitfield describing cpu affinity level
278 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
279 * @set: if true, wake-up IRQs are set, if false they are cleared
281 void vexpress_spc_set_cpu_wakeup_irq(u32 cpu, u32 cluster, bool set)
284 u32 wake_int_mask_reg = 0;
287 if (info->a15_clusid != cluster)
290 wake_int_mask_reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
291 reg_bitmask(&wake_int_mask_reg, mask, set);
292 vexpress_spc_set_wake_intr(wake_int_mask_reg);
296 * vexpress_spc_powerdown_enable()
298 * Function to enable/disable cluster powerdown. Not protected by locking
299 * since it might be used in code paths where normal cacheable locks are not
300 * working. Locking must be provided by the caller to ensure atomicity.
302 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
303 * @enable: if true enables powerdown, if false disables it
305 void vexpress_spc_powerdown_enable(u32 cluster, bool enable)
309 if (cluster >= MAX_CLUSTERS)
311 pwdrn_reg = cluster != info->a15_clusid ? A7_PWRDN_EN : A15_PWRDN_EN;
312 writel_relaxed(enable, info->baseaddr + pwdrn_reg);
315 irqreturn_t vexpress_spc_irq_handler(int irq, void *data)
318 u32 status = readl_relaxed(info->baseaddr + PWC_STATUS);
320 if (!(status & RESPONSE_MASK(info->cur_req_type)))
323 if ((status == STAT_COMPLETE(SYS_CFGCTRL_TYPE))
324 && vexpress_spc_config_data) {
325 *vexpress_spc_config_data =
326 readl_relaxed(info->baseaddr + SYS_CFG_RDATA);
327 vexpress_spc_config_data = NULL;
330 ret = STAT_COMPLETE(info->cur_req_type) ? 0 : -EIO;
331 info->cur_req_type = INVALID_TYPE;
332 vexpress_config_complete(vexpress_spc_config_bridge, ret);
337 * Based on the firmware documentation, this is always fixed to 20
338 * All the 4 OSC: A15 PLL0/1, A7 PLL0/1 must be programmed same
339 * values for both control and value registers.
340 * This function uses A15 PLL 0 registers to compute multiple factor
341 * F out = F in * (CLKF + 1) / ((CLKOD + 1) * (CLKR + 1))
343 static inline int __get_mult_factor(void)
345 int i_div, o_div, f_div;
348 tmp = readl(info->baseaddr + SCC_CFGREG19);
349 f_div = (tmp >> CLKF_SHIFT) & CLKF_MASK;
351 tmp = readl(info->baseaddr + SCC_CFGREG20);
352 o_div = (tmp >> CLKOD_SHIFT) & CLKOD_MASK;
353 i_div = (tmp >> CLKR_SHIFT) & CLKR_MASK;
355 return (f_div + 1) / ((o_div + 1) * (i_div + 1));
359 * vexpress_spc_populate_opps() - initialize opp tables from microcontroller
361 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
363 * Return: 0 on success
366 static int vexpress_spc_populate_opps(u32 cluster)
368 u32 data = 0, ret, i, offset;
369 int mult_fact = __get_mult_factor();
371 if (WARN_ON_ONCE(cluster >= MAX_CLUSTERS))
374 offset = cluster != info->a15_clusid ? OPP_A7_OFFSET : OPP_A15_OFFSET;
375 for (i = 0; i < MAX_OPPS; i++) {
376 ret = vexpress_config_read(opp_func, i + offset, &data);
378 info->freqs[cluster][i] = (data & 0xFFFFF) * mult_fact;
383 info->freqs_cnt[cluster] = i;
388 * vexpress_spc_get_freq_table() - Retrieve a pointer to the frequency
389 * table for a given cluster
391 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
392 * @fptr: pointer to be initialized
393 * Return: operating points count on success
394 * -EINVAL on pointer error
396 int vexpress_spc_get_freq_table(u32 cluster, u32 **fptr)
398 if (WARN_ON_ONCE(!fptr || cluster >= MAX_CLUSTERS))
400 *fptr = info->freqs[cluster];
401 return info->freqs_cnt[cluster];
403 EXPORT_SYMBOL_GPL(vexpress_spc_get_freq_table);
405 static void *vexpress_spc_func_get(struct device *dev,
406 struct device_node *node, const char *id)
408 struct vexpress_spc_func *spc_func;
412 spc_func = kzalloc(sizeof(*spc_func), GFP_KERNEL);
416 if (strcmp(id, "opp") == 0) {
417 spc_func->type = CONFIG_FUNC;
418 spc_func->function = OPP_FUNCTION;
419 spc_func->device = OPP_BASE_DEVICE;
420 } else if (strcmp(id, "perf") == 0) {
421 spc_func->type = PERF_FUNC;
424 err = of_property_read_u32_array(node,
425 "arm,vexpress-sysreg,func", func_device,
426 ARRAY_SIZE(func_device));
428 spc_func->type = CONFIG_FUNC;
429 spc_func->function = func_device[0];
430 spc_func->device = func_device[1];
438 pr_debug("func 0x%p = 0x%x, %d %d\n", spc_func,
446 static void vexpress_spc_func_put(void *func)
451 static int vexpress_spc_func_exec(void *func, int offset, bool write,
454 struct vexpress_spc_func *spc_func = func;
460 * Setting and retrieval of operating points is not part of
461 * DCC config interface. It was made to go through the same
462 * code path so that requests to the M3 can be serialized
463 * properly with config reads/writes through the common
464 * vexpress config interface
466 switch (spc_func->type) {
469 info->cur_req_type = (offset == PERF_LVL_A15) ?
470 A15_OPP_TYPE : A7_OPP_TYPE;
471 writel_relaxed(*data, info->baseaddr + offset);
472 return VEXPRESS_CONFIG_STATUS_WAIT;
474 *data = readl_relaxed(info->baseaddr + offset);
475 return VEXPRESS_CONFIG_STATUS_DONE;
478 info->cur_req_type = SYS_CFGCTRL_TYPE;
480 command = SYS_CFGCTRL_START;
481 command |= write ? SYS_CFGCTRL_WRITE : 0;
482 command |= SYS_CFGCTRL_FUNC(spc_func->function);
483 command |= SYS_CFGCTRL_DEVICE(spc_func->device + offset);
485 pr_debug("command %x\n", command);
488 vexpress_spc_config_data = data;
490 writel_relaxed(*data, info->baseaddr + SYS_CFG_WDATA);
491 writel_relaxed(command, info->baseaddr + SYS_CFGCTRL);
493 return VEXPRESS_CONFIG_STATUS_WAIT;
499 struct vexpress_config_bridge_info vexpress_spc_config_bridge_info = {
500 .name = "vexpress-spc",
501 .func_get = vexpress_spc_func_get,
502 .func_put = vexpress_spc_func_put,
503 .func_exec = vexpress_spc_func_exec,
506 static const struct of_device_id vexpress_spc_ids[] __initconst = {
507 { .compatible = "arm,vexpress-spc,v2p-ca15_a7" },
508 { .compatible = "arm,vexpress-spc" },
512 static int __init vexpress_spc_init(void)
515 struct device_node *node = of_find_matching_node(NULL,
521 info = kzalloc(sizeof(*info), GFP_KERNEL);
523 pr_err("%s: unable to allocate mem\n", __func__);
526 info->cur_req_type = INVALID_TYPE;
528 info->baseaddr = of_iomap(node, 0);
529 if (WARN_ON(!info->baseaddr)) {
534 info->irq = irq_of_parse_and_map(node, 0);
536 if (WARN_ON(!info->irq)) {
541 readl_relaxed(info->baseaddr + PWC_STATUS);
543 ret = request_irq(info->irq, vexpress_spc_irq_handler,
544 IRQF_DISABLED | IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
548 pr_err("IRQ %d request failed\n", info->irq);
553 info->a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
555 vexpress_spc_config_bridge = vexpress_config_bridge_register(
556 node, &vexpress_spc_config_bridge_info);
558 if (WARN_ON(!vexpress_spc_config_bridge)) {
563 opp_func = vexpress_config_func_get(vexpress_spc_config_bridge, "opp");
565 vexpress_config_func_get(vexpress_spc_config_bridge, "perf");
567 if (!opp_func || !perf_func) {
572 if (vexpress_spc_populate_opps(0) || vexpress_spc_populate_opps(1)) {
574 free_irq(info->irq, info);
575 pr_err("failed to build OPP table\n");
580 * Multi-cluster systems may need this data when non-coherent, during
581 * cluster power-up/power-down. Make sure it reaches main memory:
585 pr_info("vexpress-spc loaded at %p\n", info->baseaddr);
589 iounmap(info->baseaddr);
596 static bool __init __vexpress_spc_check_loaded(void);
598 * Pointer spc_check_loaded is swapped after init hence it is safe
599 * to initialize it to a function in the __init section
601 static bool (*spc_check_loaded)(void) __refdata = &__vexpress_spc_check_loaded;
603 static bool __init __vexpress_spc_check_loaded(void)
605 if (vexpress_spc_load_result == -EAGAIN)
606 vexpress_spc_load_result = vexpress_spc_init();
607 spc_check_loaded = &vexpress_spc_initialized;
608 return vexpress_spc_initialized();
612 * Function exported to manage early_initcall ordering.
613 * SPC code is needed very early in the boot process
614 * to bring CPUs out of reset and initialize power
615 * management back-end. After boot swap pointers to
616 * make the functionality check available to loadable
617 * modules, when early boot init functions have been
618 * already freed from kernel address space.
620 bool vexpress_spc_check_loaded(void)
622 return spc_check_loaded();
624 EXPORT_SYMBOL_GPL(vexpress_spc_check_loaded);
626 static int __init vexpress_spc_early_init(void)
628 __vexpress_spc_check_loaded();
629 return vexpress_spc_load_result;
631 early_initcall(vexpress_spc_early_init);
632 MODULE_LICENSE("GPL");
633 MODULE_DESCRIPTION("Serial Power Controller (SPC) support");