2 * Copyright 2011-2013 Freescale Semiconductor, Inc.
3 * Copyright 2011 Linaro Ltd.
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
13 #include <linux/clk.h>
14 #include <linux/delay.h>
16 #include <linux/irq.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_domain.h>
22 #include <linux/regulator/consumer.h>
23 #include <linux/irqchip/arm-gic.h>
27 #define GPC_CNTR 0x000
28 #define GPC_IMR1 0x008
29 #define GPC_PGC_GPU_PDN 0x260
30 #define GPC_PGC_GPU_PUPSCR 0x264
31 #define GPC_PGC_GPU_PDNSCR 0x268
32 #define GPC_PGC_CPU_PDN 0x2a0
33 #define GPC_PGC_CPU_PUPSCR 0x2a4
34 #define GPC_PGC_CPU_PDNSCR 0x2a8
35 #define GPC_PGC_SW2ISO_SHIFT 0x8
36 #define GPC_PGC_SW_SHIFT 0x0
39 #define GPC_MAX_IRQS (IMR_NUM * 32)
41 #define GPU_VPU_PUP_REQ BIT(1)
42 #define GPU_VPU_PDN_REQ BIT(0)
47 struct generic_pm_domain base;
48 struct regulator *reg;
49 struct clk *clk[GPC_CLK_MAX];
53 static void __iomem *gpc_base;
54 static u32 gpc_wake_irqs[IMR_NUM];
55 static u32 gpc_saved_imrs[IMR_NUM];
57 void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw)
59 writel_relaxed((sw2iso << GPC_PGC_SW2ISO_SHIFT) |
60 (sw << GPC_PGC_SW_SHIFT), gpc_base + GPC_PGC_CPU_PUPSCR);
63 void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw)
65 writel_relaxed((sw2iso << GPC_PGC_SW2ISO_SHIFT) |
66 (sw << GPC_PGC_SW_SHIFT), gpc_base + GPC_PGC_CPU_PDNSCR);
69 void imx_gpc_set_arm_power_in_lpm(bool power_off)
71 writel_relaxed(power_off, gpc_base + GPC_PGC_CPU_PDN);
74 void imx_gpc_pre_suspend(bool arm_power_off)
76 void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
79 /* Tell GPC to power off ARM core when suspend */
81 imx_gpc_set_arm_power_in_lpm(arm_power_off);
83 for (i = 0; i < IMR_NUM; i++) {
84 gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
85 writel_relaxed(~gpc_wake_irqs[i], reg_imr1 + i * 4);
89 void imx_gpc_post_resume(void)
91 void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
94 /* Keep ARM core powered on for other low-power modes */
95 imx_gpc_set_arm_power_in_lpm(false);
97 for (i = 0; i < IMR_NUM; i++)
98 writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
101 static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
103 unsigned int idx = d->hwirq / 32;
106 mask = 1 << d->hwirq % 32;
107 gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
108 gpc_wake_irqs[idx] & ~mask;
111 * Do *not* call into the parent, as the GIC doesn't have any
112 * wake-up facility...
117 void imx_gpc_mask_all(void)
119 void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
122 for (i = 0; i < IMR_NUM; i++) {
123 gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
124 writel_relaxed(~0, reg_imr1 + i * 4);
129 void imx_gpc_restore_all(void)
131 void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
134 for (i = 0; i < IMR_NUM; i++)
135 writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
138 void imx_gpc_hwirq_unmask(unsigned int hwirq)
143 reg = gpc_base + GPC_IMR1 + hwirq / 32 * 4;
144 val = readl_relaxed(reg);
145 val &= ~(1 << hwirq % 32);
146 writel_relaxed(val, reg);
149 void imx_gpc_hwirq_mask(unsigned int hwirq)
154 reg = gpc_base + GPC_IMR1 + hwirq / 32 * 4;
155 val = readl_relaxed(reg);
156 val |= 1 << (hwirq % 32);
157 writel_relaxed(val, reg);
160 static void imx_gpc_irq_unmask(struct irq_data *d)
162 imx_gpc_hwirq_unmask(d->hwirq);
163 irq_chip_unmask_parent(d);
166 static void imx_gpc_irq_mask(struct irq_data *d)
168 imx_gpc_hwirq_mask(d->hwirq);
169 irq_chip_mask_parent(d);
172 static struct irq_chip imx_gpc_chip = {
174 .irq_eoi = irq_chip_eoi_parent,
175 .irq_mask = imx_gpc_irq_mask,
176 .irq_unmask = imx_gpc_irq_unmask,
177 .irq_retrigger = irq_chip_retrigger_hierarchy,
178 .irq_set_wake = imx_gpc_irq_set_wake,
181 static int imx_gpc_domain_xlate(struct irq_domain *domain,
182 struct device_node *controller,
184 unsigned int intsize,
185 unsigned long *out_hwirq,
186 unsigned int *out_type)
188 if (domain->of_node != controller)
189 return -EINVAL; /* Shouldn't happen, really... */
191 return -EINVAL; /* Not GIC compliant */
193 return -EINVAL; /* No PPI should point to this domain */
195 *out_hwirq = intspec[1];
196 *out_type = intspec[2];
200 static int imx_gpc_domain_alloc(struct irq_domain *domain,
202 unsigned int nr_irqs, void *data)
204 struct of_phandle_args *args = data;
205 struct of_phandle_args parent_args;
206 irq_hw_number_t hwirq;
209 if (args->args_count != 3)
210 return -EINVAL; /* Not GIC compliant */
211 if (args->args[0] != 0)
212 return -EINVAL; /* No PPI should point to this domain */
214 hwirq = args->args[1];
215 if (hwirq >= GPC_MAX_IRQS)
216 return -EINVAL; /* Can't deal with this */
218 for (i = 0; i < nr_irqs; i++)
219 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
220 &imx_gpc_chip, NULL);
223 parent_args.np = domain->parent->of_node;
224 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args);
227 static struct irq_domain_ops imx_gpc_domain_ops = {
228 .xlate = imx_gpc_domain_xlate,
229 .alloc = imx_gpc_domain_alloc,
230 .free = irq_domain_free_irqs_common,
233 static int __init imx_gpc_init(struct device_node *node,
234 struct device_node *parent)
236 struct irq_domain *parent_domain, *domain;
240 pr_err("%s: no parent, giving up\n", node->full_name);
244 parent_domain = irq_find_host(parent);
245 if (!parent_domain) {
246 pr_err("%s: unable to obtain parent domain\n", node->full_name);
250 gpc_base = of_iomap(node, 0);
251 if (WARN_ON(!gpc_base))
254 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
255 node, &imx_gpc_domain_ops,
262 /* Initially mask all interrupts */
263 for (i = 0; i < IMR_NUM; i++)
264 writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
270 * We cannot use the IRQCHIP_DECLARE macro that lives in
271 * drivers/irqchip, so we're forced to roll our own. Not very nice.
273 OF_DECLARE_2(irqchip, imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
275 #ifdef CONFIG_PM_GENERIC_DOMAINS
277 static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
282 /* Read ISO and ISO2SW power down delays */
283 val = readl_relaxed(gpc_base + GPC_PGC_GPU_PDNSCR);
285 iso2sw = (val >> 8) & 0x3f;
287 /* Gate off PU domain when GPU/VPU when powered down */
288 writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
290 /* Request GPC to power down GPU/VPU */
291 val = readl_relaxed(gpc_base + GPC_CNTR);
292 val |= GPU_VPU_PDN_REQ;
293 writel_relaxed(val, gpc_base + GPC_CNTR);
295 /* Wait ISO + ISO2SW IPG clock cycles */
296 ndelay((iso + iso2sw) * 1000 / 66);
299 static int imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
301 struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
303 _imx6q_pm_pu_power_off(genpd);
306 regulator_disable(pu->reg);
311 static int imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
313 struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
314 int i, ret, sw, sw2iso;
318 ret = regulator_enable(pu->reg);
319 if (pu->reg && ret) {
320 pr_err("%s: failed to enable regulator: %d\n", __func__, ret);
324 /* Enable reset clocks for all devices in the PU domain */
325 for (i = 0; i < pu->num_clks; i++)
326 clk_prepare_enable(pu->clk[i]);
328 /* Gate off PU domain when GPU/VPU when powered down */
329 writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
331 /* Read ISO and ISO2SW power down delays */
332 val = readl_relaxed(gpc_base + GPC_PGC_GPU_PUPSCR);
334 sw2iso = (val >> 8) & 0x3f;
336 /* Request GPC to power up GPU/VPU */
337 val = readl_relaxed(gpc_base + GPC_CNTR);
338 val |= GPU_VPU_PUP_REQ;
339 writel_relaxed(val, gpc_base + GPC_CNTR);
341 /* Wait ISO + ISO2SW IPG clock cycles */
342 ndelay((sw + sw2iso) * 1000 / 66);
344 /* Disable reset clocks for all devices in the PU domain */
345 for (i = 0; i < pu->num_clks; i++)
346 clk_disable_unprepare(pu->clk[i]);
351 static struct generic_pm_domain imx6q_arm_domain = {
355 static struct pu_domain imx6q_pu_domain = {
358 .power_off = imx6q_pm_pu_power_off,
359 .power_on = imx6q_pm_pu_power_on,
360 .power_off_latency_ns = 25000,
361 .power_on_latency_ns = 2000000,
365 static struct generic_pm_domain imx6sl_display_domain = {
369 static struct generic_pm_domain *imx_gpc_domains[] = {
371 &imx6q_pu_domain.base,
372 &imx6sl_display_domain,
375 static struct genpd_onecell_data imx_gpc_onecell_data = {
376 .domains = imx_gpc_domains,
377 .num_domains = ARRAY_SIZE(imx_gpc_domains),
380 static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
386 imx6q_pu_domain.reg = pu_reg;
389 clk = of_clk_get(dev->of_node, i);
392 if (i >= GPC_CLK_MAX) {
393 dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
396 imx6q_pu_domain.clk[i] = clk;
398 imx6q_pu_domain.num_clks = i;
400 is_off = IS_ENABLED(CONFIG_PM);
402 _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
405 * Enable power if compiled without CONFIG_PM in case the
406 * bootloader disabled it.
408 imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
411 pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
412 return of_genpd_add_provider_onecell(dev->of_node,
413 &imx_gpc_onecell_data);
417 clk_put(imx6q_pu_domain.clk[i]);
422 static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
426 #endif /* CONFIG_PM_GENERIC_DOMAINS */
428 static int imx_gpc_probe(struct platform_device *pdev)
430 struct regulator *pu_reg;
433 pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
434 if (PTR_ERR(pu_reg) == -ENODEV)
436 if (IS_ERR(pu_reg)) {
437 ret = PTR_ERR(pu_reg);
438 dev_err(&pdev->dev, "failed to get pu regulator: %d\n", ret);
442 return imx_gpc_genpd_init(&pdev->dev, pu_reg);
445 static const struct of_device_id imx_gpc_dt_ids[] = {
446 { .compatible = "fsl,imx6q-gpc" },
447 { .compatible = "fsl,imx6sl-gpc" },
451 static struct platform_driver imx_gpc_driver = {
454 .owner = THIS_MODULE,
455 .of_match_table = imx_gpc_dt_ids,
457 .probe = imx_gpc_probe,
460 static int __init imx_pgc_init(void)
462 return platform_driver_register(&imx_gpc_driver);
464 subsys_initcall(imx_pgc_init);