d4325c70cf6197b4dcf283c0586ceccb1214d008
[firefly-linux-kernel-4.4.55.git] / drivers / sh / intc.c
1 /*
2  * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3  *
4  * Copyright (C) 2007, 2008 Magnus Damm
5  * Copyright (C) 2009, 2010 Paul Mundt
6  *
7  * Based on intc2.c and ipr.c
8  *
9  * Copyright (C) 1999  Niibe Yutaka & Takeshi Yaegashi
10  * Copyright (C) 2000  Kazumoto Kojima
11  * Copyright (C) 2001  David J. Mckay (david.mckay@st.com)
12  * Copyright (C) 2003  Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13  * Copyright (C) 2005, 2006  Paul Mundt
14  *
15  * This file is subject to the terms and conditions of the GNU General Public
16  * License.  See the file "COPYING" in the main directory of this archive
17  * for more details.
18  */
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/irq.h>
23 #include <linux/module.h>
24 #include <linux/io.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/sh_intc.h>
28 #include <linux/sysdev.h>
29 #include <linux/list.h>
30 #include <linux/topology.h>
31 #include <linux/bitmap.h>
32 #include <linux/cpumask.h>
33 #include <linux/spinlock.h>
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/radix-tree.h>
37 #include <linux/mutex.h>
38 #include <linux/rcupdate.h>
39 #include <asm/sizes.h>
40
41 #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
42         ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
43          ((addr_e) << 16) | ((addr_d << 24)))
44
45 #define _INTC_SHIFT(h) (h & 0x1f)
46 #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
47 #define _INTC_FN(h) ((h >> 9) & 0xf)
48 #define _INTC_MODE(h) ((h >> 13) & 0x7)
49 #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
50 #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
51
52 struct intc_handle_int {
53         unsigned int irq;
54         unsigned long handle;
55 };
56
57 struct intc_window {
58         phys_addr_t phys;
59         void __iomem *virt;
60         unsigned long size;
61 };
62
63 struct intc_map_entry {
64         intc_enum enum_id;
65         struct intc_desc_int *desc;
66 };
67
68 struct intc_subgroup_entry {
69         unsigned int pirq;
70         intc_enum enum_id;
71         unsigned long handle;
72 };
73
74 struct intc_desc_int {
75         struct list_head list;
76         struct sys_device sysdev;
77         struct radix_tree_root tree;
78         pm_message_t state;
79         spinlock_t lock;
80         unsigned int index;
81         unsigned long *reg;
82 #ifdef CONFIG_SMP
83         unsigned long *smp;
84 #endif
85         unsigned int nr_reg;
86         struct intc_handle_int *prio;
87         unsigned int nr_prio;
88         struct intc_handle_int *sense;
89         unsigned int nr_sense;
90         struct intc_window *window;
91         unsigned int nr_windows;
92         struct irq_chip chip;
93 };
94
95 static LIST_HEAD(intc_list);
96 static unsigned int nr_intc_controllers;
97
98 /*
99  * The intc_irq_map provides a global map of bound IRQ vectors for a
100  * given platform. Allocation of IRQs are either static through the CPU
101  * vector map, or dynamic in the case of board mux vectors or MSI.
102  *
103  * As this is a central point for all IRQ controllers on the system,
104  * each of the available sources are mapped out here. This combined with
105  * sparseirq makes it quite trivial to keep the vector map tightly packed
106  * when dynamically creating IRQs, as well as tying in to otherwise
107  * unused irq_desc positions in the sparse array.
108  */
109 static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
110 static struct intc_map_entry intc_irq_xlate[NR_IRQS];
111 static DEFINE_SPINLOCK(vector_lock);
112 static DEFINE_SPINLOCK(xlate_lock);
113
114 #ifdef CONFIG_SMP
115 #define IS_SMP(x) x.smp
116 #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
117 #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
118 #else
119 #define IS_SMP(x) 0
120 #define INTC_REG(d, x, c) (d->reg[(x)])
121 #define SMP_NR(d, x) 1
122 #endif
123
124 static unsigned int intc_prio_level[NR_IRQS];   /* for now */
125 static unsigned int default_prio_level = 2;     /* 2 - 16 */
126 static unsigned long ack_handle[NR_IRQS];
127 #ifdef CONFIG_INTC_BALANCING
128 static unsigned long dist_handle[NR_IRQS];
129 #endif
130
131 struct intc_virq_list {
132         unsigned int irq;
133         struct intc_virq_list *next;
134 };
135
136 #define for_each_virq(entry, head) \
137         for (entry = head; entry; entry = entry->next)
138
139 static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
140 {
141         struct irq_chip *chip = get_irq_chip(irq);
142
143         return container_of(chip, struct intc_desc_int, chip);
144 }
145
146 static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
147 {
148         generic_handle_irq((unsigned int)get_irq_data(irq));
149 }
150
151 static inline void activate_irq(int irq)
152 {
153 #ifdef CONFIG_ARM
154         /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
155          * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
156          */
157         set_irq_flags(irq, IRQF_VALID);
158 #else
159         /* same effect on other architectures */
160         set_irq_noprobe(irq);
161 #endif
162 }
163
164 static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
165                                        unsigned long address)
166 {
167         struct intc_window *window;
168         int k;
169
170         /* scan through physical windows and convert address */
171         for (k = 0; k < d->nr_windows; k++) {
172                 window = d->window + k;
173
174                 if (address < window->phys)
175                         continue;
176
177                 if (address >= (window->phys + window->size))
178                         continue;
179
180                 address -= window->phys;
181                 address += (unsigned long)window->virt;
182
183                 return address;
184         }
185
186         /* no windows defined, register must be 1:1 mapped virt:phys */
187         return address;
188 }
189
190 static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
191 {
192         unsigned int k;
193
194         address = intc_phys_to_virt(d, address);
195
196         for (k = 0; k < d->nr_reg; k++) {
197                 if (d->reg[k] == address)
198                         return k;
199         }
200
201         BUG();
202         return 0;
203 }
204
205 static inline unsigned int set_field(unsigned int value,
206                                      unsigned int field_value,
207                                      unsigned int handle)
208 {
209         unsigned int width = _INTC_WIDTH(handle);
210         unsigned int shift = _INTC_SHIFT(handle);
211
212         value &= ~(((1 << width) - 1) << shift);
213         value |= field_value << shift;
214         return value;
215 }
216
217 static inline unsigned long get_field(unsigned int value, unsigned int handle)
218 {
219         unsigned int width = _INTC_WIDTH(handle);
220         unsigned int shift = _INTC_SHIFT(handle);
221         unsigned int mask = ((1 << width) - 1) << shift;
222
223         return (value & mask) >> shift;
224 }
225
226 static unsigned long test_8(unsigned long addr, unsigned long h,
227                             unsigned long ignore)
228 {
229         return get_field(__raw_readb(addr), h);
230 }
231
232 static unsigned long test_16(unsigned long addr, unsigned long h,
233                              unsigned long ignore)
234 {
235         return get_field(__raw_readw(addr), h);
236 }
237
238 static unsigned long test_32(unsigned long addr, unsigned long h,
239                              unsigned long ignore)
240 {
241         return get_field(__raw_readl(addr), h);
242 }
243
244 static unsigned long write_8(unsigned long addr, unsigned long h,
245                              unsigned long data)
246 {
247         __raw_writeb(set_field(0, data, h), addr);
248         (void)__raw_readb(addr);        /* Defeat write posting */
249         return 0;
250 }
251
252 static unsigned long write_16(unsigned long addr, unsigned long h,
253                               unsigned long data)
254 {
255         __raw_writew(set_field(0, data, h), addr);
256         (void)__raw_readw(addr);        /* Defeat write posting */
257         return 0;
258 }
259
260 static unsigned long write_32(unsigned long addr, unsigned long h,
261                               unsigned long data)
262 {
263         __raw_writel(set_field(0, data, h), addr);
264         (void)__raw_readl(addr);        /* Defeat write posting */
265         return 0;
266 }
267
268 static unsigned long modify_8(unsigned long addr, unsigned long h,
269                               unsigned long data)
270 {
271         unsigned long flags;
272         local_irq_save(flags);
273         __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
274         (void)__raw_readb(addr);        /* Defeat write posting */
275         local_irq_restore(flags);
276         return 0;
277 }
278
279 static unsigned long modify_16(unsigned long addr, unsigned long h,
280                                unsigned long data)
281 {
282         unsigned long flags;
283         local_irq_save(flags);
284         __raw_writew(set_field(__raw_readw(addr), data, h), addr);
285         (void)__raw_readw(addr);        /* Defeat write posting */
286         local_irq_restore(flags);
287         return 0;
288 }
289
290 static unsigned long modify_32(unsigned long addr, unsigned long h,
291                                unsigned long data)
292 {
293         unsigned long flags;
294         local_irq_save(flags);
295         __raw_writel(set_field(__raw_readl(addr), data, h), addr);
296         (void)__raw_readl(addr);        /* Defeat write posting */
297         local_irq_restore(flags);
298         return 0;
299 }
300
301 enum {
302         REG_FN_ERR = 0,
303         REG_FN_TEST_BASE = 1,
304         REG_FN_WRITE_BASE = 5,
305         REG_FN_MODIFY_BASE = 9
306 };
307
308 static unsigned long (*intc_reg_fns[])(unsigned long addr,
309                                        unsigned long h,
310                                        unsigned long data) = {
311         [REG_FN_TEST_BASE + 0] = test_8,
312         [REG_FN_TEST_BASE + 1] = test_16,
313         [REG_FN_TEST_BASE + 3] = test_32,
314         [REG_FN_WRITE_BASE + 0] = write_8,
315         [REG_FN_WRITE_BASE + 1] = write_16,
316         [REG_FN_WRITE_BASE + 3] = write_32,
317         [REG_FN_MODIFY_BASE + 0] = modify_8,
318         [REG_FN_MODIFY_BASE + 1] = modify_16,
319         [REG_FN_MODIFY_BASE + 3] = modify_32,
320 };
321
322 enum {  MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
323         MODE_MASK_REG,       /* Bit(s) set -> interrupt disabled */
324         MODE_DUAL_REG,       /* Two registers, set bit to enable / disable */
325         MODE_PRIO_REG,       /* Priority value written to enable interrupt */
326         MODE_PCLR_REG,       /* Above plus all bits set to disable interrupt */
327 };
328
329 static unsigned long intc_mode_field(unsigned long addr,
330                                      unsigned long handle,
331                                      unsigned long (*fn)(unsigned long,
332                                                 unsigned long,
333                                                 unsigned long),
334                                      unsigned int irq)
335 {
336         return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
337 }
338
339 static unsigned long intc_mode_zero(unsigned long addr,
340                                     unsigned long handle,
341                                     unsigned long (*fn)(unsigned long,
342                                                unsigned long,
343                                                unsigned long),
344                                     unsigned int irq)
345 {
346         return fn(addr, handle, 0);
347 }
348
349 static unsigned long intc_mode_prio(unsigned long addr,
350                                     unsigned long handle,
351                                     unsigned long (*fn)(unsigned long,
352                                                unsigned long,
353                                                unsigned long),
354                                     unsigned int irq)
355 {
356         return fn(addr, handle, intc_prio_level[irq]);
357 }
358
359 static unsigned long (*intc_enable_fns[])(unsigned long addr,
360                                           unsigned long handle,
361                                           unsigned long (*fn)(unsigned long,
362                                                     unsigned long,
363                                                     unsigned long),
364                                           unsigned int irq) = {
365         [MODE_ENABLE_REG] = intc_mode_field,
366         [MODE_MASK_REG] = intc_mode_zero,
367         [MODE_DUAL_REG] = intc_mode_field,
368         [MODE_PRIO_REG] = intc_mode_prio,
369         [MODE_PCLR_REG] = intc_mode_prio,
370 };
371
372 static unsigned long (*intc_disable_fns[])(unsigned long addr,
373                                   unsigned long handle,
374                                   unsigned long (*fn)(unsigned long,
375                                              unsigned long,
376                                              unsigned long),
377                                   unsigned int irq) = {
378         [MODE_ENABLE_REG] = intc_mode_zero,
379         [MODE_MASK_REG] = intc_mode_field,
380         [MODE_DUAL_REG] = intc_mode_field,
381         [MODE_PRIO_REG] = intc_mode_zero,
382         [MODE_PCLR_REG] = intc_mode_field,
383 };
384
385 #ifdef CONFIG_INTC_BALANCING
386 static inline void intc_balancing_enable(unsigned int irq)
387 {
388         struct intc_desc_int *d = get_intc_desc(irq);
389         unsigned long handle = dist_handle[irq];
390         unsigned long addr;
391
392         if (irq_balancing_disabled(irq) || !handle)
393                 return;
394
395         addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
396         intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
397 }
398
399 static inline void intc_balancing_disable(unsigned int irq)
400 {
401         struct intc_desc_int *d = get_intc_desc(irq);
402         unsigned long handle = dist_handle[irq];
403         unsigned long addr;
404
405         if (irq_balancing_disabled(irq) || !handle)
406                 return;
407
408         addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
409         intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
410 }
411
412 static unsigned int intc_dist_data(struct intc_desc *desc,
413                                    struct intc_desc_int *d,
414                                    intc_enum enum_id)
415 {
416         struct intc_mask_reg *mr = desc->hw.mask_regs;
417         unsigned int i, j, fn, mode;
418         unsigned long reg_e, reg_d;
419
420         for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
421                 mr = desc->hw.mask_regs + i;
422
423                 /*
424                  * Skip this entry if there's no auto-distribution
425                  * register associated with it.
426                  */
427                 if (!mr->dist_reg)
428                         continue;
429
430                 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
431                         if (mr->enum_ids[j] != enum_id)
432                                 continue;
433
434                         fn = REG_FN_MODIFY_BASE;
435                         mode = MODE_ENABLE_REG;
436                         reg_e = mr->dist_reg;
437                         reg_d = mr->dist_reg;
438
439                         fn += (mr->reg_width >> 3) - 1;
440                         return _INTC_MK(fn, mode,
441                                         intc_get_reg(d, reg_e),
442                                         intc_get_reg(d, reg_d),
443                                         1,
444                                         (mr->reg_width - 1) - j);
445                 }
446         }
447
448         /*
449          * It's possible we've gotten here with no distribution options
450          * available for the IRQ in question, so we just skip over those.
451          */
452         return 0;
453 }
454 #else
455 static inline void intc_balancing_enable(unsigned int irq)
456 {
457 }
458
459 static inline void intc_balancing_disable(unsigned int irq)
460 {
461 }
462 #endif
463
464 static inline void _intc_enable(unsigned int irq, unsigned long handle)
465 {
466         struct intc_desc_int *d = get_intc_desc(irq);
467         unsigned long addr;
468         unsigned int cpu;
469
470         for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
471 #ifdef CONFIG_SMP
472                 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
473                         continue;
474 #endif
475                 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
476                 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
477                                                     [_INTC_FN(handle)], irq);
478         }
479
480         intc_balancing_enable(irq);
481 }
482
483 static void intc_enable(unsigned int irq)
484 {
485         _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
486 }
487
488 static void intc_disable(unsigned int irq)
489 {
490         struct intc_desc_int *d = get_intc_desc(irq);
491         unsigned long handle = (unsigned long)get_irq_chip_data(irq);
492         unsigned long addr;
493         unsigned int cpu;
494
495         intc_balancing_disable(irq);
496
497         for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
498 #ifdef CONFIG_SMP
499                 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
500                         continue;
501 #endif
502                 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
503                 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
504                                                      [_INTC_FN(handle)], irq);
505         }
506 }
507
508 static unsigned long
509 (*intc_enable_noprio_fns[])(unsigned long addr,
510                             unsigned long handle,
511                             unsigned long (*fn)(unsigned long,
512                                         unsigned long,
513                                         unsigned long),
514                             unsigned int irq) = {
515         [MODE_ENABLE_REG] = intc_mode_field,
516         [MODE_MASK_REG] = intc_mode_zero,
517         [MODE_DUAL_REG] = intc_mode_field,
518         [MODE_PRIO_REG] = intc_mode_field,
519         [MODE_PCLR_REG] = intc_mode_field,
520 };
521
522 static void intc_enable_disable(struct intc_desc_int *d,
523                                 unsigned long handle, int do_enable)
524 {
525         unsigned long addr;
526         unsigned int cpu;
527         unsigned long (*fn)(unsigned long, unsigned long,
528                    unsigned long (*)(unsigned long, unsigned long,
529                                      unsigned long),
530                    unsigned int);
531
532         if (do_enable) {
533                 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
534                         addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
535                         fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
536                         fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
537                 }
538         } else {
539                 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
540                         addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
541                         fn = intc_disable_fns[_INTC_MODE(handle)];
542                         fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
543                 }
544         }
545 }
546
547 static int intc_set_wake(unsigned int irq, unsigned int on)
548 {
549         return 0; /* allow wakeup, but setup hardware in intc_suspend() */
550 }
551
552 #ifdef CONFIG_SMP
553 /*
554  * This is held with the irq desc lock held, so we don't require any
555  * additional locking here at the intc desc level. The affinity mask is
556  * later tested in the enable/disable paths.
557  */
558 static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
559 {
560         if (!cpumask_intersects(cpumask, cpu_online_mask))
561                 return -1;
562
563         cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
564
565         return 0;
566 }
567 #endif
568
569 static void intc_mask_ack(unsigned int irq)
570 {
571         struct intc_desc_int *d = get_intc_desc(irq);
572         unsigned long handle = ack_handle[irq];
573         unsigned long addr;
574
575         intc_disable(irq);
576
577         /* read register and write zero only to the associated bit */
578         if (handle) {
579                 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
580                 switch (_INTC_FN(handle)) {
581                 case REG_FN_MODIFY_BASE + 0:    /* 8bit */
582                         __raw_readb(addr);
583                         __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
584                         break;
585                 case REG_FN_MODIFY_BASE + 1:    /* 16bit */
586                         __raw_readw(addr);
587                         __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
588                         break;
589                 case REG_FN_MODIFY_BASE + 3:    /* 32bit */
590                         __raw_readl(addr);
591                         __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
592                         break;
593                 default:
594                         BUG();
595                         break;
596                 }
597         }
598 }
599
600 static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
601                                              unsigned int nr_hp,
602                                              unsigned int irq)
603 {
604         int i;
605
606         /*
607          * this doesn't scale well, but...
608          *
609          * this function should only be used for cerain uncommon
610          * operations such as intc_set_priority() and intc_set_sense()
611          * and in those rare cases performance doesn't matter that much.
612          * keeping the memory footprint low is more important.
613          *
614          * one rather simple way to speed this up and still keep the
615          * memory footprint down is to make sure the array is sorted
616          * and then perform a bisect to lookup the irq.
617          */
618         for (i = 0; i < nr_hp; i++) {
619                 if ((hp + i)->irq != irq)
620                         continue;
621
622                 return hp + i;
623         }
624
625         return NULL;
626 }
627
628 int intc_set_priority(unsigned int irq, unsigned int prio)
629 {
630         struct intc_desc_int *d = get_intc_desc(irq);
631         struct intc_handle_int *ihp;
632
633         if (!intc_prio_level[irq] || prio <= 1)
634                 return -EINVAL;
635
636         ihp = intc_find_irq(d->prio, d->nr_prio, irq);
637         if (ihp) {
638                 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
639                         return -EINVAL;
640
641                 intc_prio_level[irq] = prio;
642
643                 /*
644                  * only set secondary masking method directly
645                  * primary masking method is using intc_prio_level[irq]
646                  * priority level will be set during next enable()
647                  */
648                 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
649                         _intc_enable(irq, ihp->handle);
650         }
651         return 0;
652 }
653
654 #define VALID(x) (x | 0x80)
655
656 static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
657         [IRQ_TYPE_EDGE_FALLING] = VALID(0),
658         [IRQ_TYPE_EDGE_RISING] = VALID(1),
659         [IRQ_TYPE_LEVEL_LOW] = VALID(2),
660         /* SH7706, SH7707 and SH7709 do not support high level triggered */
661 #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
662     !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
663     !defined(CONFIG_CPU_SUBTYPE_SH7709)
664         [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
665 #endif
666 };
667
668 static int intc_set_sense(unsigned int irq, unsigned int type)
669 {
670         struct intc_desc_int *d = get_intc_desc(irq);
671         unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
672         struct intc_handle_int *ihp;
673         unsigned long addr;
674
675         if (!value)
676                 return -EINVAL;
677
678         ihp = intc_find_irq(d->sense, d->nr_sense, irq);
679         if (ihp) {
680                 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
681                 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
682         }
683         return 0;
684 }
685
686 static intc_enum __init intc_grp_id(struct intc_desc *desc,
687                                     intc_enum enum_id)
688 {
689         struct intc_group *g = desc->hw.groups;
690         unsigned int i, j;
691
692         for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
693                 g = desc->hw.groups + i;
694
695                 for (j = 0; g->enum_ids[j]; j++) {
696                         if (g->enum_ids[j] != enum_id)
697                                 continue;
698
699                         return g->enum_id;
700                 }
701         }
702
703         return 0;
704 }
705
706 static unsigned int __init _intc_mask_data(struct intc_desc *desc,
707                                            struct intc_desc_int *d,
708                                            intc_enum enum_id,
709                                            unsigned int *reg_idx,
710                                            unsigned int *fld_idx)
711 {
712         struct intc_mask_reg *mr = desc->hw.mask_regs;
713         unsigned int fn, mode;
714         unsigned long reg_e, reg_d;
715
716         while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
717                 mr = desc->hw.mask_regs + *reg_idx;
718
719                 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
720                         if (mr->enum_ids[*fld_idx] != enum_id)
721                                 continue;
722
723                         if (mr->set_reg && mr->clr_reg) {
724                                 fn = REG_FN_WRITE_BASE;
725                                 mode = MODE_DUAL_REG;
726                                 reg_e = mr->clr_reg;
727                                 reg_d = mr->set_reg;
728                         } else {
729                                 fn = REG_FN_MODIFY_BASE;
730                                 if (mr->set_reg) {
731                                         mode = MODE_ENABLE_REG;
732                                         reg_e = mr->set_reg;
733                                         reg_d = mr->set_reg;
734                                 } else {
735                                         mode = MODE_MASK_REG;
736                                         reg_e = mr->clr_reg;
737                                         reg_d = mr->clr_reg;
738                                 }
739                         }
740
741                         fn += (mr->reg_width >> 3) - 1;
742                         return _INTC_MK(fn, mode,
743                                         intc_get_reg(d, reg_e),
744                                         intc_get_reg(d, reg_d),
745                                         1,
746                                         (mr->reg_width - 1) - *fld_idx);
747                 }
748
749                 *fld_idx = 0;
750                 (*reg_idx)++;
751         }
752
753         return 0;
754 }
755
756 static unsigned int __init intc_mask_data(struct intc_desc *desc,
757                                           struct intc_desc_int *d,
758                                           intc_enum enum_id, int do_grps)
759 {
760         unsigned int i = 0;
761         unsigned int j = 0;
762         unsigned int ret;
763
764         ret = _intc_mask_data(desc, d, enum_id, &i, &j);
765         if (ret)
766                 return ret;
767
768         if (do_grps)
769                 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
770
771         return 0;
772 }
773
774 static unsigned int __init _intc_prio_data(struct intc_desc *desc,
775                                            struct intc_desc_int *d,
776                                            intc_enum enum_id,
777                                            unsigned int *reg_idx,
778                                            unsigned int *fld_idx)
779 {
780         struct intc_prio_reg *pr = desc->hw.prio_regs;
781         unsigned int fn, n, mode, bit;
782         unsigned long reg_e, reg_d;
783
784         while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
785                 pr = desc->hw.prio_regs + *reg_idx;
786
787                 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
788                         if (pr->enum_ids[*fld_idx] != enum_id)
789                                 continue;
790
791                         if (pr->set_reg && pr->clr_reg) {
792                                 fn = REG_FN_WRITE_BASE;
793                                 mode = MODE_PCLR_REG;
794                                 reg_e = pr->set_reg;
795                                 reg_d = pr->clr_reg;
796                         } else {
797                                 fn = REG_FN_MODIFY_BASE;
798                                 mode = MODE_PRIO_REG;
799                                 if (!pr->set_reg)
800                                         BUG();
801                                 reg_e = pr->set_reg;
802                                 reg_d = pr->set_reg;
803                         }
804
805                         fn += (pr->reg_width >> 3) - 1;
806                         n = *fld_idx + 1;
807
808                         BUG_ON(n * pr->field_width > pr->reg_width);
809
810                         bit = pr->reg_width - (n * pr->field_width);
811
812                         return _INTC_MK(fn, mode,
813                                         intc_get_reg(d, reg_e),
814                                         intc_get_reg(d, reg_d),
815                                         pr->field_width, bit);
816                 }
817
818                 *fld_idx = 0;
819                 (*reg_idx)++;
820         }
821
822         return 0;
823 }
824
825 static unsigned int __init intc_prio_data(struct intc_desc *desc,
826                                           struct intc_desc_int *d,
827                                           intc_enum enum_id, int do_grps)
828 {
829         unsigned int i = 0;
830         unsigned int j = 0;
831         unsigned int ret;
832
833         ret = _intc_prio_data(desc, d, enum_id, &i, &j);
834         if (ret)
835                 return ret;
836
837         if (do_grps)
838                 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
839
840         return 0;
841 }
842
843 static void __init intc_enable_disable_enum(struct intc_desc *desc,
844                                             struct intc_desc_int *d,
845                                             intc_enum enum_id, int enable)
846 {
847         unsigned int i, j, data;
848
849         /* go through and enable/disable all mask bits */
850         i = j = 0;
851         do {
852                 data = _intc_mask_data(desc, d, enum_id, &i, &j);
853                 if (data)
854                         intc_enable_disable(d, data, enable);
855                 j++;
856         } while (data);
857
858         /* go through and enable/disable all priority fields */
859         i = j = 0;
860         do {
861                 data = _intc_prio_data(desc, d, enum_id, &i, &j);
862                 if (data)
863                         intc_enable_disable(d, data, enable);
864
865                 j++;
866         } while (data);
867 }
868
869 static unsigned int __init intc_ack_data(struct intc_desc *desc,
870                                           struct intc_desc_int *d,
871                                           intc_enum enum_id)
872 {
873         struct intc_mask_reg *mr = desc->hw.ack_regs;
874         unsigned int i, j, fn, mode;
875         unsigned long reg_e, reg_d;
876
877         for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
878                 mr = desc->hw.ack_regs + i;
879
880                 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
881                         if (mr->enum_ids[j] != enum_id)
882                                 continue;
883
884                         fn = REG_FN_MODIFY_BASE;
885                         mode = MODE_ENABLE_REG;
886                         reg_e = mr->set_reg;
887                         reg_d = mr->set_reg;
888
889                         fn += (mr->reg_width >> 3) - 1;
890                         return _INTC_MK(fn, mode,
891                                         intc_get_reg(d, reg_e),
892                                         intc_get_reg(d, reg_d),
893                                         1,
894                                         (mr->reg_width - 1) - j);
895                 }
896         }
897
898         return 0;
899 }
900
901 static unsigned int __init intc_sense_data(struct intc_desc *desc,
902                                            struct intc_desc_int *d,
903                                            intc_enum enum_id)
904 {
905         struct intc_sense_reg *sr = desc->hw.sense_regs;
906         unsigned int i, j, fn, bit;
907
908         for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
909                 sr = desc->hw.sense_regs + i;
910
911                 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
912                         if (sr->enum_ids[j] != enum_id)
913                                 continue;
914
915                         fn = REG_FN_MODIFY_BASE;
916                         fn += (sr->reg_width >> 3) - 1;
917
918                         BUG_ON((j + 1) * sr->field_width > sr->reg_width);
919
920                         bit = sr->reg_width - ((j + 1) * sr->field_width);
921
922                         return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
923                                         0, sr->field_width, bit);
924                 }
925         }
926
927         return 0;
928 }
929
930 #define INTC_TAG_VIRQ_NEEDS_ALLOC       0
931
932 int intc_irq_lookup(const char *chipname, intc_enum enum_id)
933 {
934         struct intc_map_entry *ptr;
935         struct intc_desc_int *d;
936         int irq = -1;
937
938         list_for_each_entry(d, &intc_list, list) {
939                 int tagged;
940
941                 if (strcmp(d->chip.name, chipname) != 0)
942                         continue;
943
944                 /*
945                  * Catch early lookups for subgroup VIRQs that have not
946                  * yet been allocated an IRQ. This already includes a
947                  * fast-path out if the tree is untagged, so there is no
948                  * need to explicitly test the root tree.
949                  */
950                 tagged = radix_tree_tag_get(&d->tree, enum_id,
951                                             INTC_TAG_VIRQ_NEEDS_ALLOC);
952                 if (unlikely(tagged))
953                         break;
954
955                 ptr = radix_tree_lookup(&d->tree, enum_id);
956                 if (ptr) {
957                         irq = ptr - intc_irq_xlate;
958                         break;
959                 }
960         }
961
962         return irq;
963 }
964 EXPORT_SYMBOL_GPL(intc_irq_lookup);
965
966 static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
967 {
968         struct intc_virq_list **last, *entry;
969         struct irq_desc *desc = irq_to_desc(irq);
970
971         /* scan for duplicates */
972         last = (struct intc_virq_list **)&desc->handler_data;
973         for_each_virq(entry, desc->handler_data) {
974                 if (entry->irq == virq)
975                         return 0;
976                 last = &entry->next;
977         }
978
979         entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
980         if (!entry) {
981                 pr_err("can't allocate VIRQ mapping for %d\n", virq);
982                 return -ENOMEM;
983         }
984
985         entry->irq = virq;
986
987         *last = entry;
988
989         return 0;
990 }
991
992 static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
993 {
994         struct intc_virq_list *entry, *vlist = get_irq_data(irq);
995         struct intc_desc_int *d = get_intc_desc(irq);
996
997         desc->chip->mask_ack(irq);
998
999         for_each_virq(entry, vlist) {
1000                 unsigned long addr, handle;
1001
1002                 handle = (unsigned long)get_irq_data(entry->irq);
1003                 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
1004
1005                 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
1006                         generic_handle_irq(entry->irq);
1007         }
1008
1009         desc->chip->unmask(irq);
1010 }
1011
1012 static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
1013                                                struct intc_desc_int *d,
1014                                                unsigned int index)
1015 {
1016         unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
1017
1018         return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
1019                         0, 1, (subgroup->reg_width - 1) - index);
1020 }
1021
1022 static void __init intc_subgroup_init_one(struct intc_desc *desc,
1023                                           struct intc_desc_int *d,
1024                                           struct intc_subgroup *subgroup)
1025 {
1026         struct intc_map_entry *mapped;
1027         unsigned int pirq;
1028         unsigned long flags;
1029         int i;
1030
1031         mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
1032         if (!mapped) {
1033                 WARN_ON(1);
1034                 return;
1035         }
1036
1037         pirq = mapped - intc_irq_xlate;
1038
1039         spin_lock_irqsave(&d->lock, flags);
1040
1041         for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
1042                 struct intc_subgroup_entry *entry;
1043                 int err;
1044
1045                 if (!subgroup->enum_ids[i])
1046                         continue;
1047
1048                 entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
1049                 if (!entry)
1050                         break;
1051
1052                 entry->pirq = pirq;
1053                 entry->enum_id = subgroup->enum_ids[i];
1054                 entry->handle = intc_subgroup_data(subgroup, d, i);
1055
1056                 err = radix_tree_insert(&d->tree, entry->enum_id, entry);
1057                 if (unlikely(err < 0))
1058                         break;
1059
1060                 radix_tree_tag_set(&d->tree, entry->enum_id,
1061                                    INTC_TAG_VIRQ_NEEDS_ALLOC);
1062         }
1063
1064         spin_unlock_irqrestore(&d->lock, flags);
1065 }
1066
1067 static void __init intc_subgroup_init(struct intc_desc *desc,
1068                                       struct intc_desc_int *d)
1069 {
1070         int i;
1071
1072         if (!desc->hw.subgroups)
1073                 return;
1074
1075         for (i = 0; i < desc->hw.nr_subgroups; i++)
1076                 intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
1077 }
1078
1079 static void __init intc_subgroup_map(struct intc_desc_int *d)
1080 {
1081         struct intc_subgroup_entry *entries[32];
1082         unsigned long flags;
1083         unsigned int nr_found;
1084         int i;
1085
1086         spin_lock_irqsave(&d->lock, flags);
1087
1088 restart:
1089         nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
1090                         (void ***)entries, 0, ARRAY_SIZE(entries),
1091                         INTC_TAG_VIRQ_NEEDS_ALLOC);
1092
1093         for (i = 0; i < nr_found; i++) {
1094                 struct intc_subgroup_entry *entry;
1095                 int irq;
1096
1097                 entry = radix_tree_deref_slot((void **)entries[i]);
1098                 if (unlikely(!entry))
1099                         continue;
1100                 if (unlikely(entry == RADIX_TREE_RETRY))
1101                         goto restart;
1102
1103                 irq = create_irq();
1104                 if (unlikely(irq < 0)) {
1105                         pr_err("no more free IRQs, bailing..\n");
1106                         break;
1107                 }
1108
1109                 pr_info("Setting up a chained VIRQ from %d -> %d\n",
1110                         irq, entry->pirq);
1111
1112                 spin_lock(&xlate_lock);
1113                 intc_irq_xlate[irq].desc = d;
1114                 intc_irq_xlate[irq].enum_id = entry->enum_id;
1115                 spin_unlock(&xlate_lock);
1116
1117                 set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
1118                                               handle_simple_irq, "virq");
1119                 set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
1120
1121                 set_irq_data(irq, (void *)entry->handle);
1122
1123                 set_irq_chained_handler(entry->pirq, intc_virq_handler);
1124                 add_virq_to_pirq(entry->pirq, irq);
1125
1126                 radix_tree_tag_clear(&d->tree, entry->enum_id,
1127                                      INTC_TAG_VIRQ_NEEDS_ALLOC);
1128                 radix_tree_replace_slot((void **)entries[i],
1129                                         &intc_irq_xlate[irq]);
1130         }
1131
1132         spin_unlock_irqrestore(&d->lock, flags);
1133 }
1134
1135 void __init intc_finalize(void)
1136 {
1137         struct intc_desc_int *d;
1138
1139         list_for_each_entry(d, &intc_list, list)
1140                 if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
1141                         intc_subgroup_map(d);
1142 }
1143
1144 static void __init intc_register_irq(struct intc_desc *desc,
1145                                      struct intc_desc_int *d,
1146                                      intc_enum enum_id,
1147                                      unsigned int irq)
1148 {
1149         struct intc_handle_int *hp;
1150         unsigned int data[2], primary;
1151         unsigned long flags;
1152
1153         /*
1154          * Register the IRQ position with the global IRQ map, then insert
1155          * it in to the radix tree.
1156          */
1157         set_bit(irq, intc_irq_map);
1158
1159         spin_lock_irqsave(&xlate_lock, flags);
1160         radix_tree_insert(&d->tree, enum_id, &intc_irq_xlate[irq]);
1161         spin_unlock_irqrestore(&xlate_lock, flags);
1162
1163         /*
1164          * Prefer single interrupt source bitmap over other combinations:
1165          *
1166          * 1. bitmap, single interrupt source
1167          * 2. priority, single interrupt source
1168          * 3. bitmap, multiple interrupt sources (groups)
1169          * 4. priority, multiple interrupt sources (groups)
1170          */
1171         data[0] = intc_mask_data(desc, d, enum_id, 0);
1172         data[1] = intc_prio_data(desc, d, enum_id, 0);
1173
1174         primary = 0;
1175         if (!data[0] && data[1])
1176                 primary = 1;
1177
1178         if (!data[0] && !data[1])
1179                 pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
1180                            irq, irq2evt(irq));
1181
1182         data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
1183         data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
1184
1185         if (!data[primary])
1186                 primary ^= 1;
1187
1188         BUG_ON(!data[primary]); /* must have primary masking method */
1189
1190         disable_irq_nosync(irq);
1191         set_irq_chip_and_handler_name(irq, &d->chip,
1192                                       handle_level_irq, "level");
1193         set_irq_chip_data(irq, (void *)data[primary]);
1194
1195         /*
1196          * set priority level
1197          * - this needs to be at least 2 for 5-bit priorities on 7780
1198          */
1199         intc_prio_level[irq] = default_prio_level;
1200
1201         /* enable secondary masking method if present */
1202         if (data[!primary])
1203                 _intc_enable(irq, data[!primary]);
1204
1205         /* add irq to d->prio list if priority is available */
1206         if (data[1]) {
1207                 hp = d->prio + d->nr_prio;
1208                 hp->irq = irq;
1209                 hp->handle = data[1];
1210
1211                 if (primary) {
1212                         /*
1213                          * only secondary priority should access registers, so
1214                          * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
1215                          */
1216                         hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
1217                         hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
1218                 }
1219                 d->nr_prio++;
1220         }
1221
1222         /* add irq to d->sense list if sense is available */
1223         data[0] = intc_sense_data(desc, d, enum_id);
1224         if (data[0]) {
1225                 (d->sense + d->nr_sense)->irq = irq;
1226                 (d->sense + d->nr_sense)->handle = data[0];
1227                 d->nr_sense++;
1228         }
1229
1230         /* irq should be disabled by default */
1231         d->chip.mask(irq);
1232
1233         if (desc->hw.ack_regs)
1234                 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
1235
1236 #ifdef CONFIG_INTC_BALANCING
1237         if (desc->hw.mask_regs)
1238                 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
1239 #endif
1240
1241         activate_irq(irq);
1242 }
1243
1244 static unsigned int __init save_reg(struct intc_desc_int *d,
1245                                     unsigned int cnt,
1246                                     unsigned long value,
1247                                     unsigned int smp)
1248 {
1249         if (value) {
1250                 value = intc_phys_to_virt(d, value);
1251
1252                 d->reg[cnt] = value;
1253 #ifdef CONFIG_SMP
1254                 d->smp[cnt] = smp;
1255 #endif
1256                 return 1;
1257         }
1258
1259         return 0;
1260 }
1261
1262 int __init register_intc_controller(struct intc_desc *desc)
1263 {
1264         unsigned int i, k, smp;
1265         struct intc_hw_desc *hw = &desc->hw;
1266         struct intc_desc_int *d;
1267         struct resource *res;
1268
1269         pr_info("Registered controller '%s' with %u IRQs\n",
1270                 desc->name, hw->nr_vectors);
1271
1272         d = kzalloc(sizeof(*d), GFP_NOWAIT);
1273         if (!d)
1274                 goto err0;
1275
1276         INIT_LIST_HEAD(&d->list);
1277         list_add_tail(&d->list, &intc_list);
1278
1279         spin_lock_init(&d->lock);
1280
1281         d->index = nr_intc_controllers;
1282
1283         if (desc->num_resources) {
1284                 d->nr_windows = desc->num_resources;
1285                 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
1286                                     GFP_NOWAIT);
1287                 if (!d->window)
1288                         goto err1;
1289
1290                 for (k = 0; k < d->nr_windows; k++) {
1291                         res = desc->resource + k;
1292                         WARN_ON(resource_type(res) != IORESOURCE_MEM);
1293                         d->window[k].phys = res->start;
1294                         d->window[k].size = resource_size(res);
1295                         d->window[k].virt = ioremap_nocache(res->start,
1296                                                          resource_size(res));
1297                         if (!d->window[k].virt)
1298                                 goto err2;
1299                 }
1300         }
1301
1302         d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
1303 #ifdef CONFIG_INTC_BALANCING
1304         if (d->nr_reg)
1305                 d->nr_reg += hw->nr_mask_regs;
1306 #endif
1307         d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
1308         d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
1309         d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
1310         d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
1311
1312         d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
1313         if (!d->reg)
1314                 goto err2;
1315
1316 #ifdef CONFIG_SMP
1317         d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
1318         if (!d->smp)
1319                 goto err3;
1320 #endif
1321         k = 0;
1322
1323         if (hw->mask_regs) {
1324                 for (i = 0; i < hw->nr_mask_regs; i++) {
1325                         smp = IS_SMP(hw->mask_regs[i]);
1326                         k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
1327                         k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
1328 #ifdef CONFIG_INTC_BALANCING
1329                         k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1330 #endif
1331                 }
1332         }
1333
1334         if (hw->prio_regs) {
1335                 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
1336                                   GFP_NOWAIT);
1337                 if (!d->prio)
1338                         goto err4;
1339
1340                 for (i = 0; i < hw->nr_prio_regs; i++) {
1341                         smp = IS_SMP(hw->prio_regs[i]);
1342                         k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
1343                         k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
1344                 }
1345         }
1346
1347         if (hw->sense_regs) {
1348                 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
1349                                    GFP_NOWAIT);
1350                 if (!d->sense)
1351                         goto err5;
1352
1353                 for (i = 0; i < hw->nr_sense_regs; i++)
1354                         k += save_reg(d, k, hw->sense_regs[i].reg, 0);
1355         }
1356
1357         if (hw->subgroups)
1358                 for (i = 0; i < hw->nr_subgroups; i++)
1359                         if (hw->subgroups[i].reg)
1360                                 k+= save_reg(d, k, hw->subgroups[i].reg, 0);
1361
1362         d->chip.name = desc->name;
1363         d->chip.mask = intc_disable;
1364         d->chip.unmask = intc_enable;
1365         d->chip.mask_ack = intc_disable;
1366         d->chip.enable = intc_enable;
1367         d->chip.disable = intc_disable;
1368         d->chip.shutdown = intc_disable;
1369         d->chip.set_type = intc_set_sense;
1370         d->chip.set_wake = intc_set_wake;
1371 #ifdef CONFIG_SMP
1372         d->chip.set_affinity = intc_set_affinity;
1373 #endif
1374
1375         if (hw->ack_regs) {
1376                 for (i = 0; i < hw->nr_ack_regs; i++)
1377                         k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
1378
1379                 d->chip.mask_ack = intc_mask_ack;
1380         }
1381
1382         /* disable bits matching force_disable before registering irqs */
1383         if (desc->force_disable)
1384                 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
1385
1386         /* disable bits matching force_enable before registering irqs */
1387         if (desc->force_enable)
1388                 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
1389
1390         BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
1391
1392         /* register the vectors one by one */
1393         for (i = 0; i < hw->nr_vectors; i++) {
1394                 struct intc_vect *vect = hw->vectors + i;
1395                 unsigned int irq = evt2irq(vect->vect);
1396                 unsigned long flags;
1397                 struct irq_desc *irq_desc;
1398
1399                 if (!vect->enum_id)
1400                         continue;
1401
1402                 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
1403                 if (unlikely(!irq_desc)) {
1404                         pr_err("can't get irq_desc for %d\n", irq);
1405                         continue;
1406                 }
1407
1408                 spin_lock_irqsave(&xlate_lock, flags);
1409                 intc_irq_xlate[irq].enum_id = vect->enum_id;
1410                 intc_irq_xlate[irq].desc = d;
1411                 spin_unlock_irqrestore(&xlate_lock, flags);
1412
1413                 intc_register_irq(desc, d, vect->enum_id, irq);
1414
1415                 for (k = i + 1; k < hw->nr_vectors; k++) {
1416                         struct intc_vect *vect2 = hw->vectors + k;
1417                         unsigned int irq2 = evt2irq(vect2->vect);
1418
1419                         if (vect->enum_id != vect2->enum_id)
1420                                 continue;
1421
1422                         /*
1423                          * In the case of multi-evt handling and sparse
1424                          * IRQ support, each vector still needs to have
1425                          * its own backing irq_desc.
1426                          */
1427                         irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
1428                         if (unlikely(!irq_desc)) {
1429                                 pr_err("can't get irq_desc for %d\n", irq2);
1430                                 continue;
1431                         }
1432
1433                         vect2->enum_id = 0;
1434
1435                         /* redirect this interrupts to the first one */
1436                         set_irq_chip(irq2, &dummy_irq_chip);
1437                         set_irq_chained_handler(irq2, intc_redirect_irq);
1438                         set_irq_data(irq2, (void *)irq);
1439                 }
1440         }
1441
1442         intc_subgroup_init(desc, d);
1443
1444         /* enable bits matching force_enable after registering irqs */
1445         if (desc->force_enable)
1446                 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
1447
1448         nr_intc_controllers++;
1449
1450         return 0;
1451 err5:
1452         kfree(d->prio);
1453 err4:
1454 #ifdef CONFIG_SMP
1455         kfree(d->smp);
1456 err3:
1457 #endif
1458         kfree(d->reg);
1459 err2:
1460         for (k = 0; k < d->nr_windows; k++)
1461                 if (d->window[k].virt)
1462                         iounmap(d->window[k].virt);
1463
1464         kfree(d->window);
1465 err1:
1466         kfree(d);
1467 err0:
1468         pr_err("unable to allocate INTC memory\n");
1469
1470         return -ENOMEM;
1471 }
1472
1473 #ifdef CONFIG_INTC_USERIMASK
1474 static void __iomem *uimask;
1475
1476 int register_intc_userimask(unsigned long addr)
1477 {
1478         if (unlikely(uimask))
1479                 return -EBUSY;
1480
1481         uimask = ioremap_nocache(addr, SZ_4K);
1482         if (unlikely(!uimask))
1483                 return -ENOMEM;
1484
1485         pr_info("userimask support registered for levels 0 -> %d\n",
1486                 default_prio_level - 1);
1487
1488         return 0;
1489 }
1490
1491 static ssize_t
1492 show_intc_userimask(struct sysdev_class *cls,
1493                     struct sysdev_class_attribute *attr, char *buf)
1494 {
1495         return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1496 }
1497
1498 static ssize_t
1499 store_intc_userimask(struct sysdev_class *cls,
1500                      struct sysdev_class_attribute *attr,
1501                      const char *buf, size_t count)
1502 {
1503         unsigned long level;
1504
1505         level = simple_strtoul(buf, NULL, 10);
1506
1507         /*
1508          * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1509          * these are chomped so as to not interfere with normal IRQs.
1510          *
1511          * Level 1 is a special case on some CPUs in that it's not
1512          * directly settable, but given that USERIMASK cuts off below a
1513          * certain level, we don't care about this limitation here.
1514          * Level 0 on the other hand equates to user masking disabled.
1515          *
1516          * We use default_prio_level as a cut off so that only special
1517          * case opt-in IRQs can be mangled.
1518          */
1519         if (level >= default_prio_level)
1520                 return -EINVAL;
1521
1522         __raw_writel(0xa5 << 24 | level << 4, uimask);
1523
1524         return count;
1525 }
1526
1527 static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1528                          show_intc_userimask, store_intc_userimask);
1529 #endif
1530
1531 #ifdef CONFIG_INTC_MAPPING_DEBUG
1532 static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
1533 {
1534         int i;
1535
1536         seq_printf(m, "%-5s  %-7s  %-15s\n", "irq", "enum", "chip name");
1537
1538         for (i = 1; i < nr_irqs; i++) {
1539                 struct intc_desc_int *desc = intc_irq_xlate[i].desc;
1540
1541                 if (!desc)
1542                         continue;
1543
1544                 seq_printf(m, "%5d  ", i);
1545                 seq_printf(m, "0x%05x  ", intc_irq_xlate[i].enum_id);
1546                 seq_printf(m, "%-15s\n", desc->chip.name);
1547         }
1548
1549         return 0;
1550 }
1551
1552 static int intc_irq_xlate_open(struct inode *inode, struct file *file)
1553 {
1554         return single_open(file, intc_irq_xlate_debug, inode->i_private);
1555 }
1556
1557 static const struct file_operations intc_irq_xlate_fops = {
1558         .open = intc_irq_xlate_open,
1559         .read = seq_read,
1560         .llseek = seq_lseek,
1561         .release = single_release,
1562 };
1563
1564 static int __init intc_irq_xlate_init(void)
1565 {
1566         /*
1567          * XXX.. use arch_debugfs_dir here when all of the intc users are
1568          * converted.
1569          */
1570         if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
1571                                 &intc_irq_xlate_fops) == NULL)
1572                 return -ENOMEM;
1573
1574         return 0;
1575 }
1576 fs_initcall(intc_irq_xlate_init);
1577 #endif
1578
1579 static ssize_t
1580 show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1581 {
1582         struct intc_desc_int *d;
1583
1584         d = container_of(dev, struct intc_desc_int, sysdev);
1585
1586         return sprintf(buf, "%s\n", d->chip.name);
1587 }
1588
1589 static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1590
1591 static int intc_suspend(struct sys_device *dev, pm_message_t state)
1592 {
1593         struct intc_desc_int *d;
1594         struct irq_desc *desc;
1595         int irq;
1596
1597         /* get intc controller associated with this sysdev */
1598         d = container_of(dev, struct intc_desc_int, sysdev);
1599
1600         switch (state.event) {
1601         case PM_EVENT_ON:
1602                 if (d->state.event != PM_EVENT_FREEZE)
1603                         break;
1604                 for_each_irq_desc(irq, desc) {
1605                         if (desc->handle_irq == intc_redirect_irq)
1606                                 continue;
1607                         if (desc->chip != &d->chip)
1608                                 continue;
1609                         if (desc->status & IRQ_DISABLED)
1610                                 intc_disable(irq);
1611                         else
1612                                 intc_enable(irq);
1613                 }
1614                 break;
1615         case PM_EVENT_FREEZE:
1616                 /* nothing has to be done */
1617                 break;
1618         case PM_EVENT_SUSPEND:
1619                 /* enable wakeup irqs belonging to this intc controller */
1620                 for_each_irq_desc(irq, desc) {
1621                         if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
1622                                 intc_enable(irq);
1623                 }
1624                 break;
1625         }
1626         d->state = state;
1627
1628         return 0;
1629 }
1630
1631 static int intc_resume(struct sys_device *dev)
1632 {
1633         return intc_suspend(dev, PMSG_ON);
1634 }
1635
1636 static struct sysdev_class intc_sysdev_class = {
1637         .name = "intc",
1638         .suspend = intc_suspend,
1639         .resume = intc_resume,
1640 };
1641
1642 /* register this intc as sysdev to allow suspend/resume */
1643 static int __init register_intc_sysdevs(void)
1644 {
1645         struct intc_desc_int *d;
1646         int error;
1647
1648         error = sysdev_class_register(&intc_sysdev_class);
1649 #ifdef CONFIG_INTC_USERIMASK
1650         if (!error && uimask)
1651                 error = sysdev_class_create_file(&intc_sysdev_class,
1652                                                  &attr_userimask);
1653 #endif
1654         if (!error) {
1655                 list_for_each_entry(d, &intc_list, list) {
1656                         d->sysdev.id = d->index;
1657                         d->sysdev.cls = &intc_sysdev_class;
1658                         error = sysdev_register(&d->sysdev);
1659                         if (error == 0)
1660                                 error = sysdev_create_file(&d->sysdev,
1661                                                            &attr_name);
1662                         if (error)
1663                                 break;
1664                 }
1665         }
1666
1667         if (error)
1668                 pr_err("sysdev registration error\n");
1669
1670         return error;
1671 }
1672 device_initcall(register_intc_sysdevs);
1673
1674 /*
1675  * Dynamic IRQ allocation and deallocation
1676  */
1677 unsigned int create_irq_nr(unsigned int irq_want, int node)
1678 {
1679         unsigned int irq = 0, new;
1680         unsigned long flags;
1681         struct irq_desc *desc;
1682
1683         spin_lock_irqsave(&vector_lock, flags);
1684
1685         /*
1686          * First try the wanted IRQ
1687          */
1688         if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1689                 new = irq_want;
1690         } else {
1691                 /* .. then fall back to scanning. */
1692                 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1693                 if (unlikely(new == nr_irqs))
1694                         goto out_unlock;
1695
1696                 __set_bit(new, intc_irq_map);
1697         }
1698
1699         desc = irq_to_desc_alloc_node(new, node);
1700         if (unlikely(!desc)) {
1701                 pr_err("can't get irq_desc for %d\n", new);
1702                 goto out_unlock;
1703         }
1704
1705         desc = move_irq_desc(desc, node);
1706         irq = new;
1707
1708 out_unlock:
1709         spin_unlock_irqrestore(&vector_lock, flags);
1710
1711         if (irq > 0) {
1712                 dynamic_irq_init(irq);
1713                 activate_irq(irq);
1714         }
1715
1716         return irq;
1717 }
1718
1719 int create_irq(void)
1720 {
1721         int nid = cpu_to_node(smp_processor_id());
1722         int irq;
1723
1724         irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1725         if (irq == 0)
1726                 irq = -1;
1727
1728         return irq;
1729 }
1730
1731 void destroy_irq(unsigned int irq)
1732 {
1733         unsigned long flags;
1734
1735         dynamic_irq_cleanup(irq);
1736
1737         spin_lock_irqsave(&vector_lock, flags);
1738         __clear_bit(irq, intc_irq_map);
1739         spin_unlock_irqrestore(&vector_lock, flags);
1740 }
1741
1742 int reserve_irq_vector(unsigned int irq)
1743 {
1744         unsigned long flags;
1745         int ret = 0;
1746
1747         spin_lock_irqsave(&vector_lock, flags);
1748         if (test_and_set_bit(irq, intc_irq_map))
1749                 ret = -EBUSY;
1750         spin_unlock_irqrestore(&vector_lock, flags);
1751
1752         return ret;
1753 }
1754
1755 void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
1756 {
1757         unsigned long flags;
1758         int i;
1759
1760         spin_lock_irqsave(&vector_lock, flags);
1761         for (i = 0; i < nr_vecs; i++)
1762                 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
1763         spin_unlock_irqrestore(&vector_lock, flags);
1764 }
1765
1766 void reserve_irq_legacy(void)
1767 {
1768         unsigned long flags;
1769         int i, j;
1770
1771         spin_lock_irqsave(&vector_lock, flags);
1772         j = find_first_bit(intc_irq_map, nr_irqs);
1773         for (i = 0; i < j; i++)
1774                 __set_bit(i, intc_irq_map);
1775         spin_unlock_irqrestore(&vector_lock, flags);
1776 }