2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Common Codes for EXYNOS
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
16 #include <linux/device.h>
17 #include <linux/gpio.h>
18 #include <linux/sched.h>
19 #include <linux/serial_core.h>
21 #include <linux/of_fdt.h>
22 #include <linux/of_irq.h>
23 #include <linux/export.h>
24 #include <linux/irqdomain.h>
25 #include <linux/of_address.h>
27 #include <asm/proc-fns.h>
28 #include <asm/exception.h>
29 #include <asm/hardware/cache-l2x0.h>
30 #include <asm/hardware/gic.h>
31 #include <asm/mach/map.h>
32 #include <asm/mach/irq.h>
33 #include <asm/cacheflush.h>
35 #include <mach/regs-irq.h>
36 #include <mach/regs-pmu.h>
37 #include <mach/regs-gpio.h>
41 #include <plat/clock.h>
42 #include <plat/devs.h>
44 #include <plat/sdhci.h>
45 #include <plat/gpio-cfg.h>
46 #include <plat/adc-core.h>
47 #include <plat/fb-core.h>
48 #include <plat/fimc-core.h>
49 #include <plat/iic-core.h>
50 #include <plat/tv-core.h>
51 #include <plat/spi-core.h>
52 #include <plat/regs-serial.h>
55 #define L2_AUX_VAL 0x7C470001
56 #define L2_AUX_MASK 0xC200ffff
58 static const char name_exynos4210[] = "EXYNOS4210";
59 static const char name_exynos4212[] = "EXYNOS4212";
60 static const char name_exynos4412[] = "EXYNOS4412";
61 static const char name_exynos5250[] = "EXYNOS5250";
62 static const char name_exynos5440[] = "EXYNOS5440";
64 static void exynos4_map_io(void);
65 static void exynos5_map_io(void);
66 static void exynos5440_map_io(void);
67 static void exynos4_init_clocks(int xtal);
68 static void exynos5_init_clocks(int xtal);
69 static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no);
70 static int exynos_init(void);
72 static struct cpu_table cpu_ids[] __initdata = {
74 .idcode = EXYNOS4210_CPU_ID,
75 .idmask = EXYNOS4_CPU_MASK,
76 .map_io = exynos4_map_io,
77 .init_clocks = exynos4_init_clocks,
78 .init_uarts = exynos_init_uarts,
80 .name = name_exynos4210,
82 .idcode = EXYNOS4212_CPU_ID,
83 .idmask = EXYNOS4_CPU_MASK,
84 .map_io = exynos4_map_io,
85 .init_clocks = exynos4_init_clocks,
86 .init_uarts = exynos_init_uarts,
88 .name = name_exynos4212,
90 .idcode = EXYNOS4412_CPU_ID,
91 .idmask = EXYNOS4_CPU_MASK,
92 .map_io = exynos4_map_io,
93 .init_clocks = exynos4_init_clocks,
94 .init_uarts = exynos_init_uarts,
96 .name = name_exynos4412,
98 .idcode = EXYNOS5250_SOC_ID,
99 .idmask = EXYNOS5_SOC_MASK,
100 .map_io = exynos5_map_io,
101 .init_clocks = exynos5_init_clocks,
102 .init_uarts = exynos_init_uarts,
104 .name = name_exynos5250,
106 .idcode = EXYNOS5440_SOC_ID,
107 .idmask = EXYNOS5_SOC_MASK,
108 .map_io = exynos5440_map_io,
110 .name = name_exynos5440,
114 /* Initial IO mappings */
116 static struct map_desc exynos_iodesc[] __initdata = {
118 .virtual = (unsigned long)S5P_VA_CHIPID,
119 .pfn = __phys_to_pfn(EXYNOS_PA_CHIPID),
125 #ifdef CONFIG_ARCH_EXYNOS5
126 static struct map_desc exynos5440_iodesc[] __initdata = {
128 .virtual = (unsigned long)S5P_VA_CHIPID,
129 .pfn = __phys_to_pfn(EXYNOS5440_PA_CHIPID),
136 static struct map_desc exynos4_iodesc[] __initdata = {
138 .virtual = (unsigned long)S3C_VA_SYS,
139 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
143 .virtual = (unsigned long)S3C_VA_TIMER,
144 .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
148 .virtual = (unsigned long)S3C_VA_WATCHDOG,
149 .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
153 .virtual = (unsigned long)S5P_VA_SROMC,
154 .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
158 .virtual = (unsigned long)S5P_VA_SYSTIMER,
159 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
163 .virtual = (unsigned long)S5P_VA_PMU,
164 .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
168 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
169 .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
173 .virtual = (unsigned long)S5P_VA_GIC_CPU,
174 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
178 .virtual = (unsigned long)S5P_VA_GIC_DIST,
179 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
183 .virtual = (unsigned long)S3C_VA_UART,
184 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
188 .virtual = (unsigned long)S5P_VA_CMU,
189 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
193 .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
194 .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
198 .virtual = (unsigned long)S5P_VA_L2CC,
199 .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
203 .virtual = (unsigned long)S5P_VA_DMC0,
204 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
208 .virtual = (unsigned long)S5P_VA_DMC1,
209 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC1),
213 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
214 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
220 static struct map_desc exynos4_iodesc0[] __initdata = {
222 .virtual = (unsigned long)S5P_VA_SYSRAM,
223 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
229 static struct map_desc exynos4_iodesc1[] __initdata = {
231 .virtual = (unsigned long)S5P_VA_SYSRAM,
232 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
238 static struct map_desc exynos5_iodesc[] __initdata = {
240 .virtual = (unsigned long)S3C_VA_SYS,
241 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSCON),
245 .virtual = (unsigned long)S3C_VA_TIMER,
246 .pfn = __phys_to_pfn(EXYNOS5_PA_TIMER),
250 .virtual = (unsigned long)S3C_VA_WATCHDOG,
251 .pfn = __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
255 .virtual = (unsigned long)S5P_VA_SROMC,
256 .pfn = __phys_to_pfn(EXYNOS5_PA_SROMC),
260 .virtual = (unsigned long)S5P_VA_SYSTIMER,
261 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
265 .virtual = (unsigned long)S5P_VA_SYSRAM,
266 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSRAM),
270 .virtual = (unsigned long)S5P_VA_CMU,
271 .pfn = __phys_to_pfn(EXYNOS5_PA_CMU),
272 .length = 144 * SZ_1K,
275 .virtual = (unsigned long)S5P_VA_PMU,
276 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
280 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
281 .pfn = __phys_to_pfn(EXYNOS5_PA_COMBINER),
285 .virtual = (unsigned long)S3C_VA_UART,
286 .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
290 .virtual = (unsigned long)S5P_VA_GIC_CPU,
291 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
295 .virtual = (unsigned long)S5P_VA_GIC_DIST,
296 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
302 static struct map_desc exynos5440_iodesc0[] __initdata = {
304 .virtual = (unsigned long)S3C_VA_UART,
305 .pfn = __phys_to_pfn(EXYNOS5440_PA_UART0),
311 void exynos4_restart(char mode, const char *cmd)
313 __raw_writel(0x1, S5P_SWRESET);
316 void exynos5_restart(char mode, const char *cmd)
321 if (of_machine_is_compatible("samsung,exynos5250")) {
323 addr = EXYNOS_SWRESET;
324 } else if (of_machine_is_compatible("samsung,exynos5440")) {
325 val = (0x10 << 20) | (0x1 << 16);
326 addr = EXYNOS5440_SWRESET;
328 pr_err("%s: cannot support non-DT\n", __func__);
332 __raw_writel(val, addr);
335 void __init exynos_init_late(void)
337 if (of_machine_is_compatible("samsung,exynos5440"))
338 /* to be supported later */
341 exynos_pm_late_initcall();
347 * register the standard cpu IO areas
350 void __init exynos_init_io(struct map_desc *mach_desc, int size)
352 struct map_desc *iodesc = exynos_iodesc;
353 int iodesc_sz = ARRAY_SIZE(exynos_iodesc);
354 #if defined(CONFIG_OF) && defined(CONFIG_ARCH_EXYNOS5)
355 unsigned long root = of_get_flat_dt_root();
357 /* initialize the io descriptors we need for initialization */
358 if (of_flat_dt_is_compatible(root, "samsung,exynos5440")) {
359 iodesc = exynos5440_iodesc;
360 iodesc_sz = ARRAY_SIZE(exynos5440_iodesc);
364 iotable_init(iodesc, iodesc_sz);
367 iotable_init(mach_desc, size);
369 /* detect cpu id and rev. */
370 s5p_init_cpu(S5P_VA_CHIPID);
372 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
375 static void __init exynos4_map_io(void)
377 iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
379 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
380 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
382 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
384 /* initialize device information early */
385 exynos4_default_sdhci0();
386 exynos4_default_sdhci1();
387 exynos4_default_sdhci2();
388 exynos4_default_sdhci3();
390 s3c_adc_setname("samsung-adc-v3");
392 s3c_fimc_setname(0, "exynos4-fimc");
393 s3c_fimc_setname(1, "exynos4-fimc");
394 s3c_fimc_setname(2, "exynos4-fimc");
395 s3c_fimc_setname(3, "exynos4-fimc");
397 s3c_sdhci_setname(0, "exynos4-sdhci");
398 s3c_sdhci_setname(1, "exynos4-sdhci");
399 s3c_sdhci_setname(2, "exynos4-sdhci");
400 s3c_sdhci_setname(3, "exynos4-sdhci");
402 /* The I2C bus controllers are directly compatible with s3c2440 */
403 s3c_i2c0_setname("s3c2440-i2c");
404 s3c_i2c1_setname("s3c2440-i2c");
405 s3c_i2c2_setname("s3c2440-i2c");
407 s5p_fb_setname(0, "exynos4-fb");
408 s5p_hdmi_setname("exynos4-hdmi");
410 s3c64xx_spi_setname("exynos4210-spi");
413 static void __init exynos5_map_io(void)
415 iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
417 s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
418 s3c_device_i2c0.resource[0].end = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
419 s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
420 s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC;
422 s3c_sdhci_setname(0, "exynos4-sdhci");
423 s3c_sdhci_setname(1, "exynos4-sdhci");
424 s3c_sdhci_setname(2, "exynos4-sdhci");
425 s3c_sdhci_setname(3, "exynos4-sdhci");
427 /* The I2C bus controllers are directly compatible with s3c2440 */
428 s3c_i2c0_setname("s3c2440-i2c");
429 s3c_i2c1_setname("s3c2440-i2c");
430 s3c_i2c2_setname("s3c2440-i2c");
432 s3c64xx_spi_setname("exynos4210-spi");
435 static void __init exynos4_init_clocks(int xtal)
437 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
439 s3c24xx_register_baseclocks(xtal);
440 s5p_register_clocks(xtal);
442 if (soc_is_exynos4210())
443 exynos4210_register_clocks();
444 else if (soc_is_exynos4212() || soc_is_exynos4412())
445 exynos4212_register_clocks();
447 exynos4_register_clocks();
448 exynos4_setup_clocks();
451 static void __init exynos5440_map_io(void)
453 iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
456 static void __init exynos5_init_clocks(int xtal)
458 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
460 s3c24xx_register_baseclocks(xtal);
461 s5p_register_clocks(xtal);
463 exynos5_register_clocks();
464 exynos5_setup_clocks();
467 #define COMBINER_ENABLE_SET 0x0
468 #define COMBINER_ENABLE_CLEAR 0x4
469 #define COMBINER_INT_STATUS 0xC
471 static DEFINE_SPINLOCK(irq_controller_lock);
473 struct combiner_chip_data {
474 unsigned int irq_offset;
475 unsigned int irq_mask;
479 static struct irq_domain *combiner_irq_domain;
480 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
482 static inline void __iomem *combiner_base(struct irq_data *data)
484 struct combiner_chip_data *combiner_data =
485 irq_data_get_irq_chip_data(data);
487 return combiner_data->base;
490 static void combiner_mask_irq(struct irq_data *data)
492 u32 mask = 1 << (data->hwirq % 32);
494 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
497 static void combiner_unmask_irq(struct irq_data *data)
499 u32 mask = 1 << (data->hwirq % 32);
501 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
504 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
506 struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
507 struct irq_chip *chip = irq_get_chip(irq);
508 unsigned int cascade_irq, combiner_irq;
509 unsigned long status;
511 chained_irq_enter(chip, desc);
513 spin_lock(&irq_controller_lock);
514 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
515 spin_unlock(&irq_controller_lock);
516 status &= chip_data->irq_mask;
521 combiner_irq = __ffs(status);
523 cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
524 if (unlikely(cascade_irq >= NR_IRQS))
525 do_bad_IRQ(cascade_irq, desc);
527 generic_handle_irq(cascade_irq);
530 chained_irq_exit(chip, desc);
533 static struct irq_chip combiner_chip = {
535 .irq_mask = combiner_mask_irq,
536 .irq_unmask = combiner_unmask_irq,
539 static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
543 if (soc_is_exynos5250())
544 max_nr = EXYNOS5_MAX_COMBINER_NR;
546 max_nr = EXYNOS4_MAX_COMBINER_NR;
548 if (combiner_nr >= max_nr)
550 if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
552 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
555 static void __init combiner_init_one(unsigned int combiner_nr,
558 combiner_data[combiner_nr].base = base;
559 combiner_data[combiner_nr].irq_offset = irq_find_mapping(
560 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
561 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
563 /* Disable all interrupts */
564 __raw_writel(combiner_data[combiner_nr].irq_mask,
565 base + COMBINER_ENABLE_CLEAR);
569 static int combiner_irq_domain_xlate(struct irq_domain *d,
570 struct device_node *controller,
571 const u32 *intspec, unsigned int intsize,
572 unsigned long *out_hwirq,
573 unsigned int *out_type)
575 if (d->of_node != controller)
581 *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
587 static int combiner_irq_domain_xlate(struct irq_domain *d,
588 struct device_node *controller,
589 const u32 *intspec, unsigned int intsize,
590 unsigned long *out_hwirq,
591 unsigned int *out_type)
597 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
600 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
601 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
602 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
607 static struct irq_domain_ops combiner_irq_domain_ops = {
608 .xlate = combiner_irq_domain_xlate,
609 .map = combiner_irq_domain_map,
612 static void __init combiner_init(void __iomem *combiner_base,
613 struct device_node *np)
615 int i, irq, irq_base;
616 unsigned int max_nr, nr_irq;
619 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
620 pr_warning("%s: number of combiners not specified, "
621 "setting default as %d.\n",
622 __func__, EXYNOS4_MAX_COMBINER_NR);
623 max_nr = EXYNOS4_MAX_COMBINER_NR;
626 max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
627 EXYNOS4_MAX_COMBINER_NR;
629 nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
631 irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
632 if (IS_ERR_VALUE(irq_base)) {
633 irq_base = COMBINER_IRQ(0, 0);
634 pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
637 combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
638 &combiner_irq_domain_ops, &combiner_data);
639 if (WARN_ON(!combiner_irq_domain)) {
640 pr_warning("%s: irq domain init failed\n", __func__);
644 for (i = 0; i < max_nr; i++) {
645 combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
649 irq = irq_of_parse_and_map(np, i);
651 combiner_cascade_irq(i, irq);
656 int __init combiner_of_init(struct device_node *np, struct device_node *parent)
658 void __iomem *combiner_base;
660 combiner_base = of_iomap(np, 0);
661 if (!combiner_base) {
662 pr_err("%s: failed to map combiner registers\n", __func__);
666 combiner_init(combiner_base, np);
671 static const struct of_device_id exynos_dt_irq_match[] = {
672 { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
673 { .compatible = "arm,cortex-a15-gic", .data = gic_of_init, },
674 { .compatible = "samsung,exynos4210-combiner",
675 .data = combiner_of_init, },
680 void __init exynos4_init_irq(void)
682 unsigned int gic_bank_offset;
684 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
686 if (!of_have_populated_dt())
687 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
690 of_irq_init(exynos_dt_irq_match);
693 if (!of_have_populated_dt())
694 combiner_init(S5P_VA_COMBINER_BASE, NULL);
697 * The parameters of s5p_init_irq() are for VIC init.
698 * Theses parameters should be NULL and 0 because EXYNOS4
699 * uses GIC instead of VIC.
701 s5p_init_irq(NULL, 0);
704 void __init exynos5_init_irq(void)
707 of_irq_init(exynos_dt_irq_match);
710 * The parameters of s5p_init_irq() are for VIC init.
711 * Theses parameters should be NULL and 0 because EXYNOS4
712 * uses GIC instead of VIC.
714 s5p_init_irq(NULL, 0);
717 struct bus_type exynos_subsys = {
718 .name = "exynos-core",
719 .dev_name = "exynos-core",
722 static struct device exynos4_dev = {
723 .bus = &exynos_subsys,
726 static int __init exynos_core_init(void)
728 return subsys_system_register(&exynos_subsys, NULL);
730 core_initcall(exynos_core_init);
732 #ifdef CONFIG_CACHE_L2X0
733 static int __init exynos4_l2x0_cache_init(void)
737 if (soc_is_exynos5250() || soc_is_exynos5440())
740 ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
742 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
743 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
747 if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
748 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
749 /* TAG, Data Latency Control: 2 cycles */
750 l2x0_saved_regs.tag_latency = 0x110;
752 if (soc_is_exynos4212() || soc_is_exynos4412())
753 l2x0_saved_regs.data_latency = 0x120;
755 l2x0_saved_regs.data_latency = 0x110;
757 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
758 l2x0_saved_regs.pwr_ctrl =
759 (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
761 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
763 __raw_writel(l2x0_saved_regs.tag_latency,
764 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
765 __raw_writel(l2x0_saved_regs.data_latency,
766 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
768 /* L2X0 Prefetch Control */
769 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
770 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
772 /* L2X0 Power Control */
773 __raw_writel(l2x0_saved_regs.pwr_ctrl,
774 S5P_VA_L2CC + L2X0_POWER_CTRL);
776 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
777 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
780 l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
783 early_initcall(exynos4_l2x0_cache_init);
786 static int __init exynos_init(void)
788 printk(KERN_INFO "EXYNOS: Initializing architecture\n");
790 return device_register(&exynos4_dev);
793 /* uart registration process */
795 static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no)
797 struct s3c2410_uartcfg *tcfg = cfg;
800 for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
801 tcfg->has_fracval = 1;
803 if (soc_is_exynos5250())
804 s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no);
806 s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
809 static void __iomem *exynos_eint_base;
811 static DEFINE_SPINLOCK(eint_lock);
813 static unsigned int eint0_15_data[16];
815 static inline int exynos4_irq_to_gpio(unsigned int irq)
817 if (irq < IRQ_EINT(0))
822 return EXYNOS4_GPX0(irq);
826 return EXYNOS4_GPX1(irq);
830 return EXYNOS4_GPX2(irq);
834 return EXYNOS4_GPX3(irq);
839 static inline int exynos5_irq_to_gpio(unsigned int irq)
841 if (irq < IRQ_EINT(0))
846 return EXYNOS5_GPX0(irq);
850 return EXYNOS5_GPX1(irq);
854 return EXYNOS5_GPX2(irq);
858 return EXYNOS5_GPX3(irq);
863 static unsigned int exynos4_eint0_15_src_int[16] = {
882 static unsigned int exynos5_eint0_15_src_int[16] = {
900 static inline void exynos_irq_eint_mask(struct irq_data *data)
904 spin_lock(&eint_lock);
905 mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
906 mask |= EINT_OFFSET_BIT(data->irq);
907 __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
908 spin_unlock(&eint_lock);
911 static void exynos_irq_eint_unmask(struct irq_data *data)
915 spin_lock(&eint_lock);
916 mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
917 mask &= ~(EINT_OFFSET_BIT(data->irq));
918 __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
919 spin_unlock(&eint_lock);
922 static inline void exynos_irq_eint_ack(struct irq_data *data)
924 __raw_writel(EINT_OFFSET_BIT(data->irq),
925 EINT_PEND(exynos_eint_base, data->irq));
928 static void exynos_irq_eint_maskack(struct irq_data *data)
930 exynos_irq_eint_mask(data);
931 exynos_irq_eint_ack(data);
934 static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
936 int offs = EINT_OFFSET(data->irq);
942 case IRQ_TYPE_EDGE_RISING:
943 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
946 case IRQ_TYPE_EDGE_FALLING:
947 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
950 case IRQ_TYPE_EDGE_BOTH:
951 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
954 case IRQ_TYPE_LEVEL_LOW:
955 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
958 case IRQ_TYPE_LEVEL_HIGH:
959 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
963 printk(KERN_ERR "No such irq type %d", type);
967 shift = (offs & 0x7) * 4;
970 spin_lock(&eint_lock);
971 ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq));
973 ctrl |= newvalue << shift;
974 __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq));
975 spin_unlock(&eint_lock);
977 if (soc_is_exynos5250())
978 s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
980 s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
985 static struct irq_chip exynos_irq_eint = {
986 .name = "exynos-eint",
987 .irq_mask = exynos_irq_eint_mask,
988 .irq_unmask = exynos_irq_eint_unmask,
989 .irq_mask_ack = exynos_irq_eint_maskack,
990 .irq_ack = exynos_irq_eint_ack,
991 .irq_set_type = exynos_irq_eint_set_type,
993 .irq_set_wake = s3c_irqext_wake,
998 * exynos4_irq_demux_eint
1000 * This function demuxes the IRQ from from EINTs 16 to 31.
1001 * It is designed to be inlined into the specific handler
1002 * s5p_irq_demux_eintX_Y.
1004 * Each EINT pend/mask registers handle eight of them.
1006 static inline void exynos_irq_demux_eint(unsigned int start)
1010 u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
1011 u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
1017 irq = fls(status) - 1;
1018 generic_handle_irq(irq + start);
1019 status &= ~(1 << irq);
1023 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
1025 struct irq_chip *chip = irq_get_chip(irq);
1026 chained_irq_enter(chip, desc);
1027 exynos_irq_demux_eint(IRQ_EINT(16));
1028 exynos_irq_demux_eint(IRQ_EINT(24));
1029 chained_irq_exit(chip, desc);
1032 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
1034 u32 *irq_data = irq_get_handler_data(irq);
1035 struct irq_chip *chip = irq_get_chip(irq);
1037 chained_irq_enter(chip, desc);
1038 chip->irq_mask(&desc->irq_data);
1041 chip->irq_ack(&desc->irq_data);
1043 generic_handle_irq(*irq_data);
1045 chip->irq_unmask(&desc->irq_data);
1046 chained_irq_exit(chip, desc);
1049 static int __init exynos_init_irq_eint(void)
1053 #ifdef CONFIG_PINCTRL_SAMSUNG
1055 * The Samsung pinctrl driver provides an integrated gpio/pinmux/pinconf
1056 * functionality along with support for external gpio and wakeup
1057 * interrupts. If the samsung pinctrl driver is enabled and includes
1058 * the wakeup interrupt support, then the setting up external wakeup
1059 * interrupts here can be skipped. This check here is temporary to
1060 * allow exynos4 platforms that do not use Samsung pinctrl driver to
1061 * co-exist with platforms that do. When all of the Samsung Exynos4
1062 * platforms switch over to using the pinctrl driver, the wakeup
1063 * interrupt support code here can be completely removed.
1065 struct device_node *pctrl_np, *wkup_np;
1066 const char *pctrl_compat = "samsung,pinctrl-exynos4210";
1067 const char *wkup_compat = "samsung,exynos4210-wakeup-eint";
1069 for_each_compatible_node(pctrl_np, NULL, pctrl_compat) {
1070 if (of_device_is_available(pctrl_np)) {
1071 wkup_np = of_find_compatible_node(pctrl_np, NULL,
1078 if (soc_is_exynos5440())
1081 if (soc_is_exynos5250())
1082 exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
1084 exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
1086 if (exynos_eint_base == NULL) {
1087 pr_err("unable to ioremap for EINT base address\n");
1091 for (irq = 0 ; irq <= 31 ; irq++) {
1092 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint,
1094 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
1097 irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31);
1099 for (irq = 0 ; irq <= 15 ; irq++) {
1100 eint0_15_data[irq] = IRQ_EINT(irq);
1102 if (soc_is_exynos5250()) {
1103 irq_set_handler_data(exynos5_eint0_15_src_int[irq],
1104 &eint0_15_data[irq]);
1105 irq_set_chained_handler(exynos5_eint0_15_src_int[irq],
1106 exynos_irq_eint0_15);
1108 irq_set_handler_data(exynos4_eint0_15_src_int[irq],
1109 &eint0_15_data[irq]);
1110 irq_set_chained_handler(exynos4_eint0_15_src_int[irq],
1111 exynos_irq_eint0_15);
1117 arch_initcall(exynos_init_irq_eint);