Merge tag 'cleanup-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[firefly-linux-kernel-4.4.55.git] / drivers / irqchip / exynos-combiner.c
1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * Combiner irqchip for EXYNOS
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/io.h>
15 #include <linux/irqdomain.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <asm/mach/irq.h>
20
21 #include <plat/cpu.h>
22
23 #include "irqchip.h"
24
25 #define COMBINER_ENABLE_SET     0x0
26 #define COMBINER_ENABLE_CLEAR   0x4
27 #define COMBINER_INT_STATUS     0xC
28
29 static DEFINE_SPINLOCK(irq_controller_lock);
30
31 struct combiner_chip_data {
32         unsigned int irq_offset;
33         unsigned int irq_mask;
34         void __iomem *base;
35         unsigned int parent_irq;
36 };
37
38 static struct irq_domain *combiner_irq_domain;
39 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
40
41 static inline void __iomem *combiner_base(struct irq_data *data)
42 {
43         struct combiner_chip_data *combiner_data =
44                 irq_data_get_irq_chip_data(data);
45
46         return combiner_data->base;
47 }
48
49 static void combiner_mask_irq(struct irq_data *data)
50 {
51         u32 mask = 1 << (data->hwirq % 32);
52
53         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
54 }
55
56 static void combiner_unmask_irq(struct irq_data *data)
57 {
58         u32 mask = 1 << (data->hwirq % 32);
59
60         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
61 }
62
63 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
64 {
65         struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
66         struct irq_chip *chip = irq_get_chip(irq);
67         unsigned int cascade_irq, combiner_irq;
68         unsigned long status;
69
70         chained_irq_enter(chip, desc);
71
72         spin_lock(&irq_controller_lock);
73         status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
74         spin_unlock(&irq_controller_lock);
75         status &= chip_data->irq_mask;
76
77         if (status == 0)
78                 goto out;
79
80         combiner_irq = __ffs(status);
81
82         cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
83         if (unlikely(cascade_irq >= NR_IRQS))
84                 do_bad_IRQ(cascade_irq, desc);
85         else
86                 generic_handle_irq(cascade_irq);
87
88  out:
89         chained_irq_exit(chip, desc);
90 }
91
92 #ifdef CONFIG_SMP
93 static int combiner_set_affinity(struct irq_data *d,
94                                  const struct cpumask *mask_val, bool force)
95 {
96         struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
97         struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
98         struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
99
100         if (chip && chip->irq_set_affinity)
101                 return chip->irq_set_affinity(data, mask_val, force);
102         else
103                 return -EINVAL;
104 }
105 #endif
106
107 static struct irq_chip combiner_chip = {
108         .name                   = "COMBINER",
109         .irq_mask               = combiner_mask_irq,
110         .irq_unmask             = combiner_unmask_irq,
111 #ifdef CONFIG_SMP
112         .irq_set_affinity       = combiner_set_affinity,
113 #endif
114 };
115
116 static unsigned int max_combiner_nr(void)
117 {
118         if (soc_is_exynos5250())
119                 return EXYNOS5_MAX_COMBINER_NR;
120         else if (soc_is_exynos4412())
121                 return EXYNOS4412_MAX_COMBINER_NR;
122         else if (soc_is_exynos4212())
123                 return EXYNOS4212_MAX_COMBINER_NR;
124         else
125                 return EXYNOS4210_MAX_COMBINER_NR;
126 }
127
128 static void __init combiner_cascade_irq(unsigned int combiner_nr,
129                                         unsigned int irq)
130 {
131         if (combiner_nr >= max_combiner_nr())
132                 BUG();
133         if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
134                 BUG();
135         irq_set_chained_handler(irq, combiner_handle_cascade_irq);
136 }
137
138 static void __init combiner_init_one(unsigned int combiner_nr,
139                                      void __iomem *base, unsigned int irq)
140 {
141         combiner_data[combiner_nr].base = base;
142         combiner_data[combiner_nr].irq_offset = irq_find_mapping(
143                 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
144         combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
145         combiner_data[combiner_nr].parent_irq = irq;
146
147         /* Disable all interrupts */
148         __raw_writel(combiner_data[combiner_nr].irq_mask,
149                      base + COMBINER_ENABLE_CLEAR);
150 }
151
152 #ifdef CONFIG_OF
153 static int combiner_irq_domain_xlate(struct irq_domain *d,
154                                      struct device_node *controller,
155                                      const u32 *intspec, unsigned int intsize,
156                                      unsigned long *out_hwirq,
157                                      unsigned int *out_type)
158 {
159         if (d->of_node != controller)
160                 return -EINVAL;
161
162         if (intsize < 2)
163                 return -EINVAL;
164
165         *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
166         *out_type = 0;
167
168         return 0;
169 }
170 #else
171 static int combiner_irq_domain_xlate(struct irq_domain *d,
172                                      struct device_node *controller,
173                                      const u32 *intspec, unsigned int intsize,
174                                      unsigned long *out_hwirq,
175                                      unsigned int *out_type)
176 {
177         return -EINVAL;
178 }
179 #endif
180
181 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
182                                    irq_hw_number_t hw)
183 {
184         irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
185         irq_set_chip_data(irq, &combiner_data[hw >> 3]);
186         set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
187
188         return 0;
189 }
190
191 static struct irq_domain_ops combiner_irq_domain_ops = {
192         .xlate  = combiner_irq_domain_xlate,
193         .map    = combiner_irq_domain_map,
194 };
195
196 static unsigned int exynos4x12_combiner_extra_irq(int group)
197 {
198         switch (group) {
199         case 16:
200                 return IRQ_SPI(107);
201         case 17:
202                 return IRQ_SPI(108);
203         case 18:
204                 return IRQ_SPI(48);
205         case 19:
206                 return IRQ_SPI(42);
207         default:
208                 return 0;
209         }
210 }
211
212 void __init combiner_init(void __iomem *combiner_base,
213                           struct device_node *np)
214 {
215         int i, irq, irq_base;
216         unsigned int max_nr, nr_irq;
217
218         max_nr = max_combiner_nr();
219
220         if (np) {
221                 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
222                         pr_info("%s: number of combiners not specified, "
223                                 "setting default as %d.\n",
224                                 __func__, max_nr);
225                 }
226         }
227
228         nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
229
230         irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
231         if (IS_ERR_VALUE(irq_base)) {
232                 irq_base = COMBINER_IRQ(0, 0);
233                 pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
234         }
235
236         combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
237                                 &combiner_irq_domain_ops, &combiner_data);
238         if (WARN_ON(!combiner_irq_domain)) {
239                 pr_warning("%s: irq domain init failed\n", __func__);
240                 return;
241         }
242
243         for (i = 0; i < max_nr; i++) {
244                 if (i < EXYNOS4210_MAX_COMBINER_NR || soc_is_exynos5250())
245                         irq = IRQ_SPI(i);
246                 else
247                         irq = exynos4x12_combiner_extra_irq(i);
248 #ifdef CONFIG_OF
249                 if (np)
250                         irq = irq_of_parse_and_map(np, i);
251 #endif
252                 combiner_init_one(i, combiner_base + (i >> 2) * 0x10, irq);
253                 combiner_cascade_irq(i, irq);
254         }
255 }
256
257 #ifdef CONFIG_OF
258 static int __init combiner_of_init(struct device_node *np,
259                                    struct device_node *parent)
260 {
261         void __iomem *combiner_base;
262
263         combiner_base = of_iomap(np, 0);
264         if (!combiner_base) {
265                 pr_err("%s: failed to map combiner registers\n", __func__);
266                 return -ENXIO;
267         }
268
269         combiner_init(combiner_base, np);
270
271         return 0;
272 }
273 IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
274                 combiner_of_init);
275 #endif