2 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Interrupt architecture for the GIC:
10 * o There is one Interrupt Distributor, which receives interrupts
11 * from system devices and sends them to the Interrupt Controllers.
13 * o There is one CPU Interface per CPU, which sends interrupts sent
14 * by the Distributor, and interrupts generated locally, to the
15 * associated CPU. The base address of the CPU interface is usually
16 * aliased so that the same address points to different chips depending
17 * on the CPU it is accessed from.
19 * Note that IRQs 0-31 are special - they are local to each CPU.
20 * As such, the enable set/clear, pending set/clear and active bit
21 * registers are banked per-cpu for these sources.
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/err.h>
26 #include <linux/module.h>
27 #include <linux/list.h>
28 #include <linux/smp.h>
29 #include <linux/cpu.h>
30 #include <linux/cpu_pm.h>
31 #include <linux/cpumask.h>
34 #include <linux/of_address.h>
35 #include <linux/of_irq.h>
36 #include <linux/irqdomain.h>
37 #include <linux/interrupt.h>
38 #include <linux/percpu.h>
39 #include <linux/slab.h>
40 #include <linux/irqchip/chained_irq.h>
41 #include <linux/irqchip/arm-gic.h>
42 #include <trace/events/arm-ipi.h>
44 #include <asm/cputype.h>
46 #include <asm/exception.h>
47 #include <asm/smp_plat.h>
49 #include "irq-gic-common.h"
53 void __iomem *common_base;
54 void __percpu * __iomem *percpu_base;
57 struct gic_chip_data {
58 union gic_base dist_base;
59 union gic_base cpu_base;
61 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
62 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
63 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
64 u32 __percpu *saved_ppi_enable;
65 u32 __percpu *saved_ppi_conf;
67 struct irq_domain *domain;
68 unsigned int gic_irqs;
69 #ifdef CONFIG_GIC_NON_BANKED
70 void __iomem *(*get_base)(union gic_base *);
74 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
77 * The GIC mapping of CPU interfaces does not necessarily match
78 * the logical CPU numbering. Let's use a mapping as returned
81 #define NR_GIC_CPU_IF 8
82 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
85 * Supported arch specific GIC irq extension.
86 * Default make them NULL.
88 struct irq_chip gic_arch_extn = {
92 .irq_retrigger = NULL,
101 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
103 #ifdef CONFIG_GIC_NON_BANKED
104 static void __iomem *gic_get_percpu_base(union gic_base *base)
106 return *__this_cpu_ptr(base->percpu_base);
109 static void __iomem *gic_get_common_base(union gic_base *base)
111 return base->common_base;
114 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
116 return data->get_base(&data->dist_base);
119 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
121 return data->get_base(&data->cpu_base);
124 static inline void gic_set_base_accessor(struct gic_chip_data *data,
125 void __iomem *(*f)(union gic_base *))
130 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
131 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
132 #define gic_set_base_accessor(d, f)
135 static inline void __iomem *gic_dist_base(struct irq_data *d)
137 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
138 return gic_data_dist_base(gic_data);
141 static inline void __iomem *gic_cpu_base(struct irq_data *d)
143 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
144 return gic_data_cpu_base(gic_data);
147 static inline unsigned int gic_irq(struct irq_data *d)
153 * Routines to acknowledge, disable and enable interrupts
155 static void gic_mask_irq(struct irq_data *d)
157 u32 mask = 1 << (gic_irq(d) % 32);
159 raw_spin_lock(&irq_controller_lock);
160 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
161 if (gic_arch_extn.irq_mask)
162 gic_arch_extn.irq_mask(d);
163 raw_spin_unlock(&irq_controller_lock);
166 static void gic_unmask_irq(struct irq_data *d)
168 u32 mask = 1 << (gic_irq(d) % 32);
170 raw_spin_lock(&irq_controller_lock);
171 if (gic_arch_extn.irq_unmask)
172 gic_arch_extn.irq_unmask(d);
173 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
174 raw_spin_unlock(&irq_controller_lock);
177 static void gic_eoi_irq(struct irq_data *d)
179 if (gic_arch_extn.irq_eoi) {
180 raw_spin_lock(&irq_controller_lock);
181 gic_arch_extn.irq_eoi(d);
182 raw_spin_unlock(&irq_controller_lock);
185 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
188 static int gic_set_type(struct irq_data *d, unsigned int type)
190 void __iomem *base = gic_dist_base(d);
191 unsigned int gicirq = gic_irq(d);
193 /* Interrupt configuration for SGIs can't be changed */
197 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
200 raw_spin_lock(&irq_controller_lock);
202 if (gic_arch_extn.irq_set_type)
203 gic_arch_extn.irq_set_type(d, type);
205 gic_configure_irq(gicirq, type, base, NULL);
207 raw_spin_unlock(&irq_controller_lock);
212 static int gic_retrigger(struct irq_data *d)
214 if (gic_arch_extn.irq_retrigger)
215 return gic_arch_extn.irq_retrigger(d);
217 /* the genirq layer expects 0 if we can't retrigger in hardware */
222 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
225 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
226 unsigned int shift = (gic_irq(d) % 4) * 8;
227 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
230 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
233 raw_spin_lock(&irq_controller_lock);
234 mask = 0xff << shift;
235 bit = gic_cpu_map[cpu] << shift;
236 val = readl_relaxed(reg) & ~mask;
237 writel_relaxed(val | bit, reg);
238 raw_spin_unlock(&irq_controller_lock);
240 return IRQ_SET_MASK_OK;
245 static int gic_set_wake(struct irq_data *d, unsigned int on)
249 if (gic_arch_extn.irq_set_wake)
250 ret = gic_arch_extn.irq_set_wake(d, on);
256 #define gic_set_wake NULL
259 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
262 struct gic_chip_data *gic = &gic_data[0];
263 void __iomem *cpu_base = gic_data_cpu_base(gic);
266 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
267 irqnr = irqstat & GICC_IAR_INT_ID_MASK;
269 if (likely(irqnr > 15 && irqnr < 1021)) {
270 irqnr = irq_find_mapping(gic->domain, irqnr);
271 handle_IRQ(irqnr, regs);
275 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
277 handle_IPI(irqnr, regs);
285 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
287 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
288 struct irq_chip *chip = irq_get_chip(irq);
289 unsigned int cascade_irq, gic_irq;
290 unsigned long status;
292 chained_irq_enter(chip, desc);
294 raw_spin_lock(&irq_controller_lock);
295 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
296 raw_spin_unlock(&irq_controller_lock);
298 gic_irq = (status & 0x3ff);
302 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
303 if (unlikely(gic_irq < 32 || gic_irq > 1020))
304 handle_bad_irq(cascade_irq, desc);
306 generic_handle_irq(cascade_irq);
309 chained_irq_exit(chip, desc);
312 static struct irq_chip gic_chip = {
314 .irq_mask = gic_mask_irq,
315 .irq_unmask = gic_unmask_irq,
316 .irq_eoi = gic_eoi_irq,
317 .irq_set_type = gic_set_type,
318 .irq_retrigger = gic_retrigger,
320 .irq_set_affinity = gic_set_affinity,
322 .irq_set_wake = gic_set_wake,
325 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
327 if (gic_nr >= MAX_GIC_NR)
329 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
331 irq_set_chained_handler(irq, gic_handle_cascade_irq);
334 static u8 gic_get_cpumask(struct gic_chip_data *gic)
336 void __iomem *base = gic_data_dist_base(gic);
339 for (i = mask = 0; i < 32; i += 4) {
340 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
348 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
353 static void __init gic_dist_init(struct gic_chip_data *gic)
357 unsigned int gic_irqs = gic->gic_irqs;
358 void __iomem *base = gic_data_dist_base(gic);
360 writel_relaxed(0, base + GIC_DIST_CTRL);
363 * Set all global interrupts to this CPU only.
365 cpumask = gic_get_cpumask(gic);
366 cpumask |= cpumask << 8;
367 cpumask |= cpumask << 16;
368 for (i = 32; i < gic_irqs; i += 4)
369 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
371 gic_dist_config(base, gic_irqs, NULL);
373 writel_relaxed(1, base + GIC_DIST_CTRL);
376 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
378 void __iomem *dist_base = gic_data_dist_base(gic);
379 void __iomem *base = gic_data_cpu_base(gic);
380 unsigned int cpu_mask, cpu = smp_processor_id();
384 * Get what the GIC says our CPU mask is.
386 BUG_ON(cpu >= NR_GIC_CPU_IF);
387 cpu_mask = gic_get_cpumask(gic);
388 gic_cpu_map[cpu] = cpu_mask;
391 * Clear our mask from the other map entries in case they're
394 for (i = 0; i < NR_GIC_CPU_IF; i++)
396 gic_cpu_map[i] &= ~cpu_mask;
398 gic_cpu_config(dist_base, NULL);
400 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
401 writel_relaxed(1, base + GIC_CPU_CTRL);
404 void gic_cpu_if_down(void)
406 void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
407 writel_relaxed(0, cpu_base + GIC_CPU_CTRL);
412 * Saves the GIC distributor registers during suspend or idle. Must be called
413 * with interrupts disabled but before powering down the GIC. After calling
414 * this function, no interrupts will be delivered by the GIC, and another
415 * platform-specific wakeup source must be enabled.
417 static void gic_dist_save(unsigned int gic_nr)
419 unsigned int gic_irqs;
420 void __iomem *dist_base;
423 if (gic_nr >= MAX_GIC_NR)
426 gic_irqs = gic_data[gic_nr].gic_irqs;
427 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
432 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
433 gic_data[gic_nr].saved_spi_conf[i] =
434 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
436 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
437 gic_data[gic_nr].saved_spi_target[i] =
438 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
440 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
441 gic_data[gic_nr].saved_spi_enable[i] =
442 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
446 * Restores the GIC distributor registers during resume or when coming out of
447 * idle. Must be called before enabling interrupts. If a level interrupt
448 * that occured while the GIC was suspended is still present, it will be
449 * handled normally, but any edge interrupts that occured will not be seen by
450 * the GIC and need to be handled by the platform-specific wakeup source.
452 static void gic_dist_restore(unsigned int gic_nr)
454 unsigned int gic_irqs;
456 void __iomem *dist_base;
458 if (gic_nr >= MAX_GIC_NR)
461 gic_irqs = gic_data[gic_nr].gic_irqs;
462 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
467 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
469 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
470 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
471 dist_base + GIC_DIST_CONFIG + i * 4);
473 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
474 writel_relaxed(0xa0a0a0a0,
475 dist_base + GIC_DIST_PRI + i * 4);
477 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
478 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
479 dist_base + GIC_DIST_TARGET + i * 4);
481 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
482 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
483 dist_base + GIC_DIST_ENABLE_SET + i * 4);
485 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
488 static void gic_cpu_save(unsigned int gic_nr)
492 void __iomem *dist_base;
493 void __iomem *cpu_base;
495 if (gic_nr >= MAX_GIC_NR)
498 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
499 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
501 if (!dist_base || !cpu_base)
504 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
505 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
506 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
508 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
509 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
510 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
514 static void gic_cpu_restore(unsigned int gic_nr)
518 void __iomem *dist_base;
519 void __iomem *cpu_base;
521 if (gic_nr >= MAX_GIC_NR)
524 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
525 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
527 if (!dist_base || !cpu_base)
530 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
531 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
532 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
534 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
535 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
536 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
538 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
539 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
541 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
542 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
545 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
549 for (i = 0; i < MAX_GIC_NR; i++) {
550 #ifdef CONFIG_GIC_NON_BANKED
551 /* Skip over unused GICs */
552 if (!gic_data[i].get_base)
559 case CPU_PM_ENTER_FAILED:
563 case CPU_CLUSTER_PM_ENTER:
566 case CPU_CLUSTER_PM_ENTER_FAILED:
567 case CPU_CLUSTER_PM_EXIT:
576 static struct notifier_block gic_notifier_block = {
577 .notifier_call = gic_notifier,
580 static void __init gic_pm_init(struct gic_chip_data *gic)
582 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
584 BUG_ON(!gic->saved_ppi_enable);
586 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
588 BUG_ON(!gic->saved_ppi_conf);
590 if (gic == &gic_data[0])
591 cpu_pm_register_notifier(&gic_notifier_block);
594 static void __init gic_pm_init(struct gic_chip_data *gic)
600 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
603 unsigned long flags, map = 0;
605 raw_spin_lock_irqsave(&irq_controller_lock, flags);
607 /* Convert our logical CPU mask into a physical one. */
608 for_each_cpu(cpu, mask) {
609 trace_arm_ipi_send(irq, cpu);
610 map |= gic_cpu_map[cpu];
614 * Ensure that stores to Normal memory are visible to the
615 * other CPUs before they observe us issuing the IPI.
619 /* this always happens on GIC0 */
620 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
622 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
626 #ifdef CONFIG_BL_SWITCHER
628 * gic_send_sgi - send a SGI directly to given CPU interface number
630 * cpu_id: the ID for the destination CPU interface
631 * irq: the IPI number to send a SGI for
633 void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
635 BUG_ON(cpu_id >= NR_GIC_CPU_IF);
636 cpu_id = 1 << cpu_id;
637 /* this always happens on GIC0 */
638 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
642 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
644 * @cpu: the logical CPU number to get the GIC ID for.
646 * Return the CPU interface ID for the given logical CPU number,
647 * or -1 if the CPU number is too large or the interface ID is
648 * unknown (more than one bit set).
650 int gic_get_cpu_id(unsigned int cpu)
652 unsigned int cpu_bit;
654 if (cpu >= NR_GIC_CPU_IF)
656 cpu_bit = gic_cpu_map[cpu];
657 if (cpu_bit & (cpu_bit - 1))
659 return __ffs(cpu_bit);
663 * gic_migrate_target - migrate IRQs to another CPU interface
665 * @new_cpu_id: the CPU target ID to migrate IRQs to
667 * Migrate all peripheral interrupts with a target matching the current CPU
668 * to the interface corresponding to @new_cpu_id. The CPU interface mapping
669 * is also updated. Targets to other CPU interfaces are unchanged.
670 * This must be called with IRQs locally disabled.
672 void gic_migrate_target(unsigned int new_cpu_id)
674 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
675 void __iomem *dist_base;
676 int i, ror_val, cpu = smp_processor_id();
677 u32 val, cur_target_mask, active_mask;
679 if (gic_nr >= MAX_GIC_NR)
682 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
685 gic_irqs = gic_data[gic_nr].gic_irqs;
687 cur_cpu_id = __ffs(gic_cpu_map[cpu]);
688 cur_target_mask = 0x01010101 << cur_cpu_id;
689 ror_val = (cur_cpu_id - new_cpu_id) & 31;
691 raw_spin_lock(&irq_controller_lock);
693 /* Update the target interface for this logical CPU */
694 gic_cpu_map[cpu] = 1 << new_cpu_id;
697 * Find all the peripheral interrupts targetting the current
698 * CPU interface and migrate them to the new CPU interface.
699 * We skip DIST_TARGET 0 to 7 as they are read-only.
701 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
702 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
703 active_mask = val & cur_target_mask;
706 val |= ror32(active_mask, ror_val);
707 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
711 raw_spin_unlock(&irq_controller_lock);
714 * Now let's migrate and clear any potential SGIs that might be
715 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
716 * is a banked register, we can only forward the SGI using
717 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
718 * doesn't use that information anyway.
720 * For the same reason we do not adjust SGI source information
721 * for previously sent SGIs by us to other CPUs either.
723 for (i = 0; i < 16; i += 4) {
725 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
728 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
729 for (j = i; j < i + 4; j++) {
731 writel_relaxed((1 << (new_cpu_id + 16)) | j,
732 dist_base + GIC_DIST_SOFTINT);
739 * gic_get_sgir_physaddr - get the physical address for the SGI register
741 * REturn the physical address of the SGI register to be used
742 * by some early assembly code when the kernel is not yet available.
744 static unsigned long gic_dist_physaddr;
746 unsigned long gic_get_sgir_physaddr(void)
748 if (!gic_dist_physaddr)
750 return gic_dist_physaddr + GIC_DIST_SOFTINT;
753 void __init gic_init_physaddr(struct device_node *node)
756 if (of_address_to_resource(node, 0, &res) == 0) {
757 gic_dist_physaddr = res.start;
758 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
763 #define gic_init_physaddr(node) do { } while (0)
766 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
770 irq_set_percpu_devid(irq);
771 irq_set_chip_and_handler(irq, &gic_chip,
772 handle_percpu_devid_irq);
773 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
775 irq_set_chip_and_handler(irq, &gic_chip,
777 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
779 gic_routable_irq_domain_ops->map(d, irq, hw);
781 irq_set_chip_data(irq, d->host_data);
785 static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
787 gic_routable_irq_domain_ops->unmap(d, irq);
790 static int gic_irq_domain_xlate(struct irq_domain *d,
791 struct device_node *controller,
792 const u32 *intspec, unsigned int intsize,
793 unsigned long *out_hwirq, unsigned int *out_type)
795 unsigned long ret = 0;
797 if (d->of_node != controller)
802 /* Get the interrupt number and add 16 to skip over SGIs */
803 *out_hwirq = intspec[1] + 16;
805 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
807 ret = gic_routable_irq_domain_ops->xlate(d, controller,
813 if (IS_ERR_VALUE(ret))
817 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
823 static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
824 unsigned long action, void *hcpu)
826 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
827 gic_cpu_init(&gic_data[0]);
832 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
833 * priority because the GIC needs to be up before the ARM generic timers.
835 static struct notifier_block __cpuinitdata gic_cpu_notifier = {
836 .notifier_call = gic_secondary_init,
841 static const struct irq_domain_ops gic_irq_domain_ops = {
842 .map = gic_irq_domain_map,
843 .unmap = gic_irq_domain_unmap,
844 .xlate = gic_irq_domain_xlate,
847 /* Default functions for routable irq domain */
848 static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq,
854 static void gic_routable_irq_domain_unmap(struct irq_domain *d,
859 static int gic_routable_irq_domain_xlate(struct irq_domain *d,
860 struct device_node *controller,
861 const u32 *intspec, unsigned int intsize,
862 unsigned long *out_hwirq,
863 unsigned int *out_type)
869 const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
870 .map = gic_routable_irq_domain_map,
871 .unmap = gic_routable_irq_domain_unmap,
872 .xlate = gic_routable_irq_domain_xlate,
875 const struct irq_domain_ops *gic_routable_irq_domain_ops =
876 &gic_default_routable_irq_domain_ops;
878 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
879 void __iomem *dist_base, void __iomem *cpu_base,
880 u32 percpu_offset, struct device_node *node)
882 irq_hw_number_t hwirq_base;
883 struct gic_chip_data *gic;
884 int gic_irqs, irq_base, i;
885 int nr_routable_irqs;
887 BUG_ON(gic_nr >= MAX_GIC_NR);
889 gic = &gic_data[gic_nr];
890 #ifdef CONFIG_GIC_NON_BANKED
891 if (percpu_offset) { /* Frankein-GIC without banked registers... */
894 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
895 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
896 if (WARN_ON(!gic->dist_base.percpu_base ||
897 !gic->cpu_base.percpu_base)) {
898 free_percpu(gic->dist_base.percpu_base);
899 free_percpu(gic->cpu_base.percpu_base);
903 for_each_possible_cpu(cpu) {
904 u32 mpidr = cpu_logical_map(cpu);
905 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
906 unsigned long offset = percpu_offset * core_id;
907 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
908 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
911 gic_set_base_accessor(gic, gic_get_percpu_base);
914 { /* Normal, sane GIC... */
916 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
918 gic->dist_base.common_base = dist_base;
919 gic->cpu_base.common_base = cpu_base;
920 gic_set_base_accessor(gic, gic_get_common_base);
924 * Initialize the CPU interface map to all CPUs.
925 * It will be refined as each CPU probes its ID.
927 for (i = 0; i < NR_GIC_CPU_IF; i++)
928 gic_cpu_map[i] = 0xff;
931 * For primary GICs, skip over SGIs.
932 * For secondary GICs, skip over PPIs, too.
934 if (gic_nr == 0 && (irq_start & 31) > 0) {
937 irq_start = (irq_start & ~31) + 16;
943 * Find out how many interrupts are supported.
944 * The GIC only supports up to 1020 interrupt sources.
946 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
947 gic_irqs = (gic_irqs + 1) * 32;
950 gic->gic_irqs = gic_irqs;
952 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
954 if (of_property_read_u32(node, "arm,routable-irqs",
955 &nr_routable_irqs)) {
956 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
958 if (IS_ERR_VALUE(irq_base)) {
959 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
961 irq_base = irq_start;
964 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
965 hwirq_base, &gic_irq_domain_ops, gic);
967 gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
972 if (WARN_ON(!gic->domain))
977 set_smp_cross_call(gic_raise_softirq);
978 register_cpu_notifier(&gic_cpu_notifier);
980 set_handle_irq(gic_handle_irq);
983 gic_chip.flags |= gic_arch_extn.flags;
990 static int gic_cnt __initdata;
993 gic_of_init(struct device_node *node, struct device_node *parent)
995 void __iomem *cpu_base;
996 void __iomem *dist_base;
1003 dist_base = of_iomap(node, 0);
1004 WARN(!dist_base, "unable to map gic dist registers\n");
1006 cpu_base = of_iomap(node, 1);
1007 WARN(!cpu_base, "unable to map gic cpu registers\n");
1009 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
1012 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
1014 gic_init_physaddr(node);
1017 irq = irq_of_parse_and_map(node, 0);
1018 gic_cascade_irq(gic_cnt, irq);
1023 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1024 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1025 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1026 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1027 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1028 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);