2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
28 #include <linux/irqchip.h>
29 #include <linux/irqchip/arm-gic-v3.h>
31 #include <asm/cputype.h>
32 #include <asm/exception.h>
33 #include <asm/smp_plat.h>
36 #include "irq-gic-common.h"
38 struct redist_region {
39 void __iomem *redist_base;
40 phys_addr_t phys_base;
43 struct gic_chip_data {
44 void __iomem *dist_base;
45 struct redist_region *redist_regions;
47 struct irq_domain *domain;
49 u32 nr_redist_regions;
53 static struct gic_chip_data gic_data __read_mostly;
54 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
56 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
57 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
58 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
60 /* Our default, arbitrary priority value. Linux only uses one anyway. */
61 #define DEFAULT_PMR_VALUE 0xf0
63 static inline unsigned int gic_irq(struct irq_data *d)
68 static inline int gic_irq_in_rdist(struct irq_data *d)
70 return gic_irq(d) < 32;
73 static inline void __iomem *gic_dist_base(struct irq_data *d)
75 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
76 return gic_data_rdist_sgi_base();
78 if (d->hwirq <= 1023) /* SPI -> dist_base */
79 return gic_data.dist_base;
84 static void gic_do_wait_for_rwp(void __iomem *base)
86 u32 count = 1000000; /* 1s! */
88 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
91 pr_err_ratelimited("RWP timeout, gone fishing\n");
99 /* Wait for completion of a distributor change */
100 static void gic_dist_wait_for_rwp(void)
102 gic_do_wait_for_rwp(gic_data.dist_base);
105 /* Wait for completion of a redistributor change */
106 static void gic_redist_wait_for_rwp(void)
108 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
111 /* Low level accessors */
112 static u64 __maybe_unused gic_read_iar(void)
116 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
120 static void __maybe_unused gic_write_pmr(u64 val)
122 asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
125 static void __maybe_unused gic_write_ctlr(u64 val)
127 asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
131 static void __maybe_unused gic_write_grpen1(u64 val)
133 asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
137 static void __maybe_unused gic_write_sgi1r(u64 val)
139 asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
142 static void gic_enable_sre(void)
146 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
147 val |= ICC_SRE_EL1_SRE;
148 asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
152 * Need to check that the SRE bit has actually been set. If
153 * not, it means that SRE is disabled at EL2. We're going to
154 * die painfully, and there is nothing we can do about it.
156 * Kindly inform the luser.
158 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
159 if (!(val & ICC_SRE_EL1_SRE))
160 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
163 static void gic_enable_redist(bool enable)
166 u32 count = 1000000; /* 1s! */
169 rbase = gic_data_rdist_rd_base();
171 val = readl_relaxed(rbase + GICR_WAKER);
173 /* Wake up this CPU redistributor */
174 val &= ~GICR_WAKER_ProcessorSleep;
176 val |= GICR_WAKER_ProcessorSleep;
177 writel_relaxed(val, rbase + GICR_WAKER);
179 if (!enable) { /* Check that GICR_WAKER is writeable */
180 val = readl_relaxed(rbase + GICR_WAKER);
181 if (!(val & GICR_WAKER_ProcessorSleep))
182 return; /* No PM support in this redistributor */
186 val = readl_relaxed(rbase + GICR_WAKER);
187 if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
193 pr_err_ratelimited("redistributor failed to %s...\n",
194 enable ? "wakeup" : "sleep");
198 * Routines to disable, enable, EOI and route interrupts
200 static int gic_peek_irq(struct irq_data *d, u32 offset)
202 u32 mask = 1 << (gic_irq(d) % 32);
205 if (gic_irq_in_rdist(d))
206 base = gic_data_rdist_sgi_base();
208 base = gic_data.dist_base;
210 return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
213 static void gic_poke_irq(struct irq_data *d, u32 offset)
215 u32 mask = 1 << (gic_irq(d) % 32);
216 void (*rwp_wait)(void);
219 if (gic_irq_in_rdist(d)) {
220 base = gic_data_rdist_sgi_base();
221 rwp_wait = gic_redist_wait_for_rwp;
223 base = gic_data.dist_base;
224 rwp_wait = gic_dist_wait_for_rwp;
227 writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
231 static void gic_mask_irq(struct irq_data *d)
233 gic_poke_irq(d, GICD_ICENABLER);
236 static void gic_eoimode1_mask_irq(struct irq_data *d)
241 static void gic_unmask_irq(struct irq_data *d)
243 gic_poke_irq(d, GICD_ISENABLER);
246 static int gic_irq_set_irqchip_state(struct irq_data *d,
247 enum irqchip_irq_state which, bool val)
251 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
255 case IRQCHIP_STATE_PENDING:
256 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
259 case IRQCHIP_STATE_ACTIVE:
260 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
263 case IRQCHIP_STATE_MASKED:
264 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
271 gic_poke_irq(d, reg);
275 static int gic_irq_get_irqchip_state(struct irq_data *d,
276 enum irqchip_irq_state which, bool *val)
278 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
282 case IRQCHIP_STATE_PENDING:
283 *val = gic_peek_irq(d, GICD_ISPENDR);
286 case IRQCHIP_STATE_ACTIVE:
287 *val = gic_peek_irq(d, GICD_ISACTIVER);
290 case IRQCHIP_STATE_MASKED:
291 *val = !gic_peek_irq(d, GICD_ISENABLER);
301 static void gic_eoi_irq(struct irq_data *d)
303 gic_write_eoir(gic_irq(d));
306 static void gic_eoimode1_eoi_irq(struct irq_data *d)
309 * No need to deactivate an LPI.
311 if (gic_irq(d) >= 8192)
313 gic_write_dir(gic_irq(d));
316 static int gic_set_type(struct irq_data *d, unsigned int type)
318 unsigned int irq = gic_irq(d);
319 void (*rwp_wait)(void);
322 /* Interrupt configuration for SGIs can't be changed */
326 /* SPIs have restrictions on the supported types */
327 if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
328 type != IRQ_TYPE_EDGE_RISING)
331 if (gic_irq_in_rdist(d)) {
332 base = gic_data_rdist_sgi_base();
333 rwp_wait = gic_redist_wait_for_rwp;
335 base = gic_data.dist_base;
336 rwp_wait = gic_dist_wait_for_rwp;
339 return gic_configure_irq(irq, type, base, rwp_wait);
342 static u64 gic_mpidr_to_affinity(u64 mpidr)
346 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
347 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
348 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
349 MPIDR_AFFINITY_LEVEL(mpidr, 0));
354 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
359 irqnr = gic_read_iar();
361 if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
364 if (static_key_true(&supports_deactivate))
365 gic_write_eoir(irqnr);
367 err = handle_domain_irq(gic_data.domain, irqnr, regs);
369 WARN_ONCE(true, "Unexpected interrupt received!\n");
370 if (static_key_true(&supports_deactivate)) {
372 gic_write_dir(irqnr);
374 gic_write_eoir(irqnr);
380 gic_write_eoir(irqnr);
381 if (static_key_true(&supports_deactivate))
382 gic_write_dir(irqnr);
384 handle_IPI(irqnr, regs);
386 WARN_ONCE(true, "Unexpected SGI received!\n");
390 } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
393 static void __init gic_dist_init(void)
397 void __iomem *base = gic_data.dist_base;
399 /* Disable the distributor */
400 writel_relaxed(0, base + GICD_CTLR);
401 gic_dist_wait_for_rwp();
403 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
405 /* Enable distributor with ARE, Group1 */
406 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
410 * Set all global interrupts to the boot CPU only. ARE must be
413 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
414 for (i = 32; i < gic_data.irq_nr; i++)
415 writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
418 static int gic_populate_rdist(void)
420 u64 mpidr = cpu_logical_map(smp_processor_id());
426 * Convert affinity to a 32bit value that can be matched to
427 * GICR_TYPER bits [63:32].
429 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
430 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
431 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
432 MPIDR_AFFINITY_LEVEL(mpidr, 0));
434 for (i = 0; i < gic_data.nr_redist_regions; i++) {
435 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
438 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
439 if (reg != GIC_PIDR2_ARCH_GICv3 &&
440 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
441 pr_warn("No redistributor present @%p\n", ptr);
446 typer = readq_relaxed(ptr + GICR_TYPER);
447 if ((typer >> 32) == aff) {
448 u64 offset = ptr - gic_data.redist_regions[i].redist_base;
449 gic_data_rdist_rd_base() = ptr;
450 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
451 pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
453 (unsigned long long)mpidr,
454 i, &gic_data_rdist()->phys_base);
458 if (gic_data.redist_stride) {
459 ptr += gic_data.redist_stride;
461 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
462 if (typer & GICR_TYPER_VLPIS)
463 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
465 } while (!(typer & GICR_TYPER_LAST));
468 /* We couldn't even deal with ourselves... */
469 WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
470 smp_processor_id(), (unsigned long long)mpidr);
474 static void gic_cpu_sys_reg_init(void)
476 /* Enable system registers */
479 /* Set priority mask register */
480 gic_write_pmr(DEFAULT_PMR_VALUE);
482 if (static_key_true(&supports_deactivate)) {
483 /* EOI drops priority only (mode 1) */
484 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
486 /* EOI deactivates interrupt too (mode 0) */
487 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
490 /* ... and let's hit the road... */
494 static int gic_dist_supports_lpis(void)
496 return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
499 static void gic_cpu_init(void)
503 /* Register ourselves with the rest of the world */
504 if (gic_populate_rdist())
507 gic_enable_redist(true);
509 rbase = gic_data_rdist_sgi_base();
511 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
513 /* Give LPIs a spin */
514 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
517 /* initialise system registers */
518 gic_cpu_sys_reg_init();
522 static int gic_secondary_init(struct notifier_block *nfb,
523 unsigned long action, void *hcpu)
525 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
531 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
532 * priority because the GIC needs to be up before the ARM generic timers.
534 static struct notifier_block gic_cpu_notifier = {
535 .notifier_call = gic_secondary_init,
539 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
543 u64 mpidr = cpu_logical_map(cpu);
546 while (cpu < nr_cpu_ids) {
548 * If we ever get a cluster of more than 16 CPUs, just
549 * scream and skip that CPU.
551 if (WARN_ON((mpidr & 0xff) >= 16))
554 tlist |= 1 << (mpidr & 0xf);
556 cpu = cpumask_next(cpu, mask);
557 if (cpu >= nr_cpu_ids)
560 mpidr = cpu_logical_map(cpu);
562 if (cluster_id != (mpidr & ~0xffUL)) {
572 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
573 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
574 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
576 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
580 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
581 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
582 irq << ICC_SGI1R_SGI_ID_SHIFT |
583 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
584 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
586 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
587 gic_write_sgi1r(val);
590 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
594 if (WARN_ON(irq >= 16))
598 * Ensure that stores to Normal memory are visible to the
599 * other CPUs before issuing the IPI.
603 for_each_cpu(cpu, mask) {
604 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
607 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
608 gic_send_sgi(cluster_id, tlist, irq);
611 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
615 static void gic_smp_init(void)
617 set_smp_cross_call(gic_raise_softirq);
618 register_cpu_notifier(&gic_cpu_notifier);
621 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
624 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
629 if (gic_irq_in_rdist(d))
632 /* If interrupt was enabled, disable it first */
633 enabled = gic_peek_irq(d, GICD_ISENABLER);
637 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
638 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
640 writeq_relaxed(val, reg);
643 * If the interrupt was enabled, enabled it again. Otherwise,
644 * just wait for the distributor to have digested our changes.
649 gic_dist_wait_for_rwp();
651 return IRQ_SET_MASK_OK;
654 #define gic_set_affinity NULL
655 #define gic_smp_init() do { } while(0)
659 static int gic_cpu_pm_notifier(struct notifier_block *self,
660 unsigned long cmd, void *v)
662 if (cmd == CPU_PM_EXIT) {
663 gic_enable_redist(true);
664 gic_cpu_sys_reg_init();
665 } else if (cmd == CPU_PM_ENTER) {
667 gic_enable_redist(false);
672 static struct notifier_block gic_cpu_pm_notifier_block = {
673 .notifier_call = gic_cpu_pm_notifier,
676 static void gic_cpu_pm_init(void)
678 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
682 static inline void gic_cpu_pm_init(void) { }
683 #endif /* CONFIG_CPU_PM */
685 static struct irq_chip gic_chip = {
687 .irq_mask = gic_mask_irq,
688 .irq_unmask = gic_unmask_irq,
689 .irq_eoi = gic_eoi_irq,
690 .irq_set_type = gic_set_type,
691 .irq_set_affinity = gic_set_affinity,
692 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
693 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
694 .flags = IRQCHIP_SET_TYPE_MASKED,
697 static struct irq_chip gic_eoimode1_chip = {
699 .irq_mask = gic_eoimode1_mask_irq,
700 .irq_unmask = gic_unmask_irq,
701 .irq_eoi = gic_eoimode1_eoi_irq,
702 .irq_set_type = gic_set_type,
703 .irq_set_affinity = gic_set_affinity,
704 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
705 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
706 .flags = IRQCHIP_SET_TYPE_MASKED,
709 #define GIC_ID_NR (1U << gic_data.rdists.id_bits)
711 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
714 struct irq_chip *chip = &gic_chip;
716 if (static_key_true(&supports_deactivate))
717 chip = &gic_eoimode1_chip;
719 /* SGIs are private to the core kernel */
723 if (hw >= gic_data.irq_nr && hw < 8192)
731 irq_set_percpu_devid(irq);
732 irq_domain_set_info(d, irq, hw, chip, d->host_data,
733 handle_percpu_devid_irq, NULL, NULL);
734 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
737 if (hw >= 32 && hw < gic_data.irq_nr) {
738 irq_domain_set_info(d, irq, hw, chip, d->host_data,
739 handle_fasteoi_irq, NULL, NULL);
740 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
743 if (hw >= 8192 && hw < GIC_ID_NR) {
744 if (!gic_dist_supports_lpis())
746 irq_domain_set_info(d, irq, hw, chip, d->host_data,
747 handle_fasteoi_irq, NULL, NULL);
748 set_irq_flags(irq, IRQF_VALID);
754 static int gic_irq_domain_xlate(struct irq_domain *d,
755 struct device_node *controller,
756 const u32 *intspec, unsigned int intsize,
757 unsigned long *out_hwirq, unsigned int *out_type)
759 if (d->of_node != controller)
766 *out_hwirq = intspec[1] + 32;
769 *out_hwirq = intspec[1] + 16;
771 case GIC_IRQ_TYPE_LPI: /* LPI */
772 *out_hwirq = intspec[1];
778 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
782 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
783 unsigned int nr_irqs, void *arg)
786 irq_hw_number_t hwirq;
787 unsigned int type = IRQ_TYPE_NONE;
788 struct of_phandle_args *irq_data = arg;
790 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
791 irq_data->args_count, &hwirq, &type);
795 for (i = 0; i < nr_irqs; i++)
796 gic_irq_domain_map(domain, virq + i, hwirq + i);
801 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
802 unsigned int nr_irqs)
806 for (i = 0; i < nr_irqs; i++) {
807 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
808 irq_set_handler(virq + i, NULL);
809 irq_domain_reset_irq_data(d);
813 static const struct irq_domain_ops gic_irq_domain_ops = {
814 .xlate = gic_irq_domain_xlate,
815 .alloc = gic_irq_domain_alloc,
816 .free = gic_irq_domain_free,
819 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
821 void __iomem *dist_base;
822 struct redist_region *rdist_regs;
824 u32 nr_redist_regions;
831 dist_base = of_iomap(node, 0);
833 pr_err("%s: unable to map gic dist registers\n",
838 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
839 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
840 pr_err("%s: no distributor detected, giving up\n",
846 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
847 nr_redist_regions = 1;
849 rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
855 for (i = 0; i < nr_redist_regions; i++) {
859 ret = of_address_to_resource(node, 1 + i, &res);
860 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
861 if (ret || !rdist_regs[i].redist_base) {
862 pr_err("%s: couldn't map region %d\n",
865 goto out_unmap_rdist;
867 rdist_regs[i].phys_base = res.start;
870 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
873 if (!is_hyp_mode_available())
874 static_key_slow_dec(&supports_deactivate);
876 if (static_key_true(&supports_deactivate))
877 pr_info("GIC: Using split EOI/Deactivate mode\n");
879 gic_data.dist_base = dist_base;
880 gic_data.redist_regions = rdist_regs;
881 gic_data.nr_redist_regions = nr_redist_regions;
882 gic_data.redist_stride = redist_stride;
885 * Find out how many interrupts are supported.
886 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
888 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
889 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
890 gic_irqs = GICD_TYPER_IRQS(typer);
893 gic_data.irq_nr = gic_irqs;
895 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
897 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
899 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
904 set_handle_irq(gic_handle_irq);
906 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
907 its_init(node, &gic_data.rdists, gic_data.domain);
918 irq_domain_remove(gic_data.domain);
919 free_percpu(gic_data.rdists.rdist);
921 for (i = 0; i < nr_redist_regions; i++)
922 if (rdist_regs[i].redist_base)
923 iounmap(rdist_regs[i].redist_base);
930 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);