2 * Marvell Armada 370 and Armada XP SoC IRQ handling
4 * Copyright (C) 2012 Marvell
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/interrupt.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/irqdomain.h>
25 #include <asm/mach/arch.h>
26 #include <asm/exception.h>
27 #include <asm/smp_plat.h>
28 #include <asm/hardware/cache-l2x0.h>
30 /* Interrupt Controller Registers Map */
31 #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
32 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
34 #define ARMADA_370_XP_INT_CONTROL (0x00)
35 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
36 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
37 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
39 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
41 #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
42 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
43 #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
45 #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
47 #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
49 #define ACTIVE_DOORBELLS (8)
51 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
53 static void __iomem *per_cpu_int_base;
54 static void __iomem *main_int_base;
55 static struct irq_domain *armada_370_xp_mpic_domain;
59 * For shared global interrupts, mask/unmask global enable bit
60 * For CPU interrtups, mask/unmask the calling CPU's bit
62 static void armada_370_xp_irq_mask(struct irq_data *d)
65 irq_hw_number_t hwirq = irqd_to_hwirq(d);
67 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
68 writel(hwirq, main_int_base +
69 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
71 writel(hwirq, per_cpu_int_base +
72 ARMADA_370_XP_INT_SET_MASK_OFFS);
74 writel(irqd_to_hwirq(d),
75 per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
79 static void armada_370_xp_irq_unmask(struct irq_data *d)
82 irq_hw_number_t hwirq = irqd_to_hwirq(d);
84 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
85 writel(hwirq, main_int_base +
86 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
88 writel(hwirq, per_cpu_int_base +
89 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
91 writel(irqd_to_hwirq(d),
92 per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
97 static int armada_xp_set_affinity(struct irq_data *d,
98 const struct cpumask *mask_val, bool force)
101 unsigned long new_mask = 0;
102 unsigned long online_mask = 0;
103 unsigned long count = 0;
104 irq_hw_number_t hwirq = irqd_to_hwirq(d);
107 for_each_cpu(cpu, mask_val) {
108 new_mask |= 1 << cpu_logical_map(cpu);
113 * Forbid mutlicore interrupt affinity
114 * This is required since the MPIC HW doesn't limit
115 * several CPUs from acknowledging the same interrupt.
120 for_each_cpu(cpu, cpu_online_mask)
121 online_mask |= 1 << cpu_logical_map(cpu);
123 raw_spin_lock(&irq_controller_lock);
125 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
126 reg = (reg & (~online_mask)) | new_mask;
127 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
129 raw_spin_unlock(&irq_controller_lock);
135 static struct irq_chip armada_370_xp_irq_chip = {
136 .name = "armada_370_xp_irq",
137 .irq_mask = armada_370_xp_irq_mask,
138 .irq_mask_ack = armada_370_xp_irq_mask,
139 .irq_unmask = armada_370_xp_irq_unmask,
141 .irq_set_affinity = armada_xp_set_affinity,
145 static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
146 unsigned int virq, irq_hw_number_t hw)
148 armada_370_xp_irq_mask(irq_get_irq_data(virq));
149 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
150 irq_set_status_flags(virq, IRQ_LEVEL);
152 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
153 irq_set_percpu_devid(virq);
154 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
155 handle_percpu_devid_irq);
158 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
161 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
167 void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
170 unsigned long map = 0;
172 /* Convert our logical CPU mask into a physical one. */
173 for_each_cpu(cpu, mask)
174 map |= 1 << cpu_logical_map(cpu);
177 * Ensure that stores to Normal memory are visible to the
178 * other CPUs before issuing the IPI.
183 writel((map << 8) | irq, main_int_base +
184 ARMADA_370_XP_SW_TRIG_INT_OFFS);
187 void armada_xp_mpic_smp_cpu_init(void)
189 /* Clear pending IPIs */
190 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
192 /* Enable first 8 IPIs */
193 writel((1 << ACTIVE_DOORBELLS) - 1, per_cpu_int_base +
194 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
196 /* Unmask IPI interrupt */
197 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
199 #endif /* CONFIG_SMP */
201 static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
202 .map = armada_370_xp_mpic_irq_map,
203 .xlate = irq_domain_xlate_onecell,
206 static int __init armada_370_xp_mpic_of_init(struct device_node *node,
207 struct device_node *parent)
211 main_int_base = of_iomap(node, 0);
212 per_cpu_int_base = of_iomap(node, 1);
214 BUG_ON(!main_int_base);
215 BUG_ON(!per_cpu_int_base);
217 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
219 armada_370_xp_mpic_domain =
220 irq_domain_add_linear(node, (control >> 2) & 0x3ff,
221 &armada_370_xp_mpic_irq_ops, NULL);
223 if (!armada_370_xp_mpic_domain)
224 panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
226 irq_set_default_host(armada_370_xp_mpic_domain);
229 armada_xp_mpic_smp_cpu_init();
232 * Set the default affinity from all CPUs to the boot cpu.
233 * This is required since the MPIC doesn't limit several CPUs
234 * from acknowledging the same interrupt.
236 cpumask_clear(irq_default_affinity);
237 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
244 asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
250 irqstat = readl_relaxed(per_cpu_int_base +
251 ARMADA_370_XP_CPU_INTACK_OFFS);
252 irqnr = irqstat & 0x3FF;
258 irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
260 handle_IRQ(irqnr, regs);
268 ipimask = readl_relaxed(per_cpu_int_base +
269 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
272 writel(0x0, per_cpu_int_base +
273 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
275 /* Handle all pending doorbells */
276 for (ipinr = 0; ipinr < ACTIVE_DOORBELLS; ipinr++) {
277 if (ipimask & (0x1 << ipinr))
278 handle_IPI(ipinr, regs);
287 static const struct of_device_id mpic_of_match[] __initconst = {
288 {.compatible = "marvell,mpic", .data = armada_370_xp_mpic_of_init},
292 void __init armada_370_xp_init_irq(void)
294 of_irq_init(mpic_of_match);
295 #ifdef CONFIG_CACHE_L2X0
296 l2x0_of_init(0, ~0UL);