2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2012 Cavium, Inc.
9 #include <linux/interrupt.h>
10 #include <linux/irqdomain.h>
11 #include <linux/bitops.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/irq.h>
15 #include <linux/smp.h>
18 #include <asm/octeon/octeon.h>
19 #include <asm/octeon/cvmx-ciu2-defs.h>
21 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
22 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
23 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
25 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
27 union octeon_ciu_chip_data {
33 unsigned long gpio_line:6;
37 struct octeon_core_chip_data {
38 struct mutex core_irq_mutex;
44 #define MIPS_CORE_IRQ_LINES 8
46 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
48 static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
49 struct irq_chip *chip,
50 irq_flow_handler_t handler)
52 union octeon_ciu_chip_data cd;
54 irq_set_chip_and_handler(irq, chip, handler);
59 cd.s.gpio_line = gpio_line;
61 irq_set_chip_data(irq, cd.p);
62 octeon_irq_ciu_to_irq[line][bit] = irq;
65 static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
66 int irq, int line, int bit)
68 irq_domain_associate(domain, irq, line << 6 | bit);
71 static int octeon_coreid_for_cpu(int cpu)
74 return cpu_logical_map(cpu);
76 return cvmx_get_core_num();
80 static int octeon_cpu_for_coreid(int coreid)
83 return cpu_number_map(coreid);
85 return smp_processor_id();
89 static void octeon_irq_core_ack(struct irq_data *data)
91 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
92 unsigned int bit = cd->bit;
95 * We don't need to disable IRQs to make these atomic since
96 * they are already disabled earlier in the low level
99 clear_c0_status(0x100 << bit);
100 /* The two user interrupts must be cleared manually. */
102 clear_c0_cause(0x100 << bit);
105 static void octeon_irq_core_eoi(struct irq_data *data)
107 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
110 * We don't need to disable IRQs to make these atomic since
111 * they are already disabled earlier in the low level
114 set_c0_status(0x100 << cd->bit);
117 static void octeon_irq_core_set_enable_local(void *arg)
119 struct irq_data *data = arg;
120 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
121 unsigned int mask = 0x100 << cd->bit;
124 * Interrupts are already disabled, so these are atomic.
129 clear_c0_status(mask);
133 static void octeon_irq_core_disable(struct irq_data *data)
135 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
136 cd->desired_en = false;
139 static void octeon_irq_core_enable(struct irq_data *data)
141 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
142 cd->desired_en = true;
145 static void octeon_irq_core_bus_lock(struct irq_data *data)
147 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
149 mutex_lock(&cd->core_irq_mutex);
152 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
154 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
156 if (cd->desired_en != cd->current_en) {
157 on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
159 cd->current_en = cd->desired_en;
162 mutex_unlock(&cd->core_irq_mutex);
165 static struct irq_chip octeon_irq_chip_core = {
167 .irq_enable = octeon_irq_core_enable,
168 .irq_disable = octeon_irq_core_disable,
169 .irq_ack = octeon_irq_core_ack,
170 .irq_eoi = octeon_irq_core_eoi,
171 .irq_bus_lock = octeon_irq_core_bus_lock,
172 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
174 .irq_cpu_online = octeon_irq_core_eoi,
175 .irq_cpu_offline = octeon_irq_core_ack,
176 .flags = IRQCHIP_ONOFFLINE_ENABLED,
179 static void __init octeon_irq_init_core(void)
183 struct octeon_core_chip_data *cd;
185 for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
186 cd = &octeon_irq_core_chip_data[i];
187 cd->current_en = false;
188 cd->desired_en = false;
190 mutex_init(&cd->core_irq_mutex);
192 irq = OCTEON_IRQ_SW0 + i;
193 irq_set_chip_data(irq, cd);
194 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
199 static int next_cpu_for_irq(struct irq_data *data)
204 int weight = cpumask_weight(data->affinity);
207 cpu = smp_processor_id();
209 cpu = cpumask_next(cpu, data->affinity);
210 if (cpu >= nr_cpu_ids) {
213 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
217 } else if (weight == 1) {
218 cpu = cpumask_first(data->affinity);
220 cpu = smp_processor_id();
224 return smp_processor_id();
228 static void octeon_irq_ciu_enable(struct irq_data *data)
230 int cpu = next_cpu_for_irq(data);
231 int coreid = octeon_coreid_for_cpu(cpu);
234 union octeon_ciu_chip_data cd;
235 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
237 cd.p = irq_data_get_irq_chip_data(data);
239 raw_spin_lock_irqsave(lock, flags);
240 if (cd.s.line == 0) {
241 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
242 __set_bit(cd.s.bit, pen);
244 * Must be visible to octeon_irq_ip{2,3}_ciu() before
248 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
250 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
251 __set_bit(cd.s.bit, pen);
253 * Must be visible to octeon_irq_ip{2,3}_ciu() before
257 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
259 raw_spin_unlock_irqrestore(lock, flags);
262 static void octeon_irq_ciu_enable_local(struct irq_data *data)
266 union octeon_ciu_chip_data cd;
267 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
269 cd.p = irq_data_get_irq_chip_data(data);
271 raw_spin_lock_irqsave(lock, flags);
272 if (cd.s.line == 0) {
273 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
274 __set_bit(cd.s.bit, pen);
276 * Must be visible to octeon_irq_ip{2,3}_ciu() before
280 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
282 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
283 __set_bit(cd.s.bit, pen);
285 * Must be visible to octeon_irq_ip{2,3}_ciu() before
289 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
291 raw_spin_unlock_irqrestore(lock, flags);
294 static void octeon_irq_ciu_disable_local(struct irq_data *data)
298 union octeon_ciu_chip_data cd;
299 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
301 cd.p = irq_data_get_irq_chip_data(data);
303 raw_spin_lock_irqsave(lock, flags);
304 if (cd.s.line == 0) {
305 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
306 __clear_bit(cd.s.bit, pen);
308 * Must be visible to octeon_irq_ip{2,3}_ciu() before
312 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
314 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
315 __clear_bit(cd.s.bit, pen);
317 * Must be visible to octeon_irq_ip{2,3}_ciu() before
321 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
323 raw_spin_unlock_irqrestore(lock, flags);
326 static void octeon_irq_ciu_disable_all(struct irq_data *data)
331 union octeon_ciu_chip_data cd;
332 raw_spinlock_t *lock;
334 cd.p = irq_data_get_irq_chip_data(data);
336 for_each_online_cpu(cpu) {
337 int coreid = octeon_coreid_for_cpu(cpu);
338 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
340 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
342 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
344 raw_spin_lock_irqsave(lock, flags);
345 __clear_bit(cd.s.bit, pen);
347 * Must be visible to octeon_irq_ip{2,3}_ciu() before
352 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
354 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
355 raw_spin_unlock_irqrestore(lock, flags);
359 static void octeon_irq_ciu_enable_all(struct irq_data *data)
364 union octeon_ciu_chip_data cd;
365 raw_spinlock_t *lock;
367 cd.p = irq_data_get_irq_chip_data(data);
369 for_each_online_cpu(cpu) {
370 int coreid = octeon_coreid_for_cpu(cpu);
371 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
373 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
375 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
377 raw_spin_lock_irqsave(lock, flags);
378 __set_bit(cd.s.bit, pen);
380 * Must be visible to octeon_irq_ip{2,3}_ciu() before
385 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
387 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
388 raw_spin_unlock_irqrestore(lock, flags);
393 * Enable the irq on the next core in the affinity set for chips that
394 * have the EN*_W1{S,C} registers.
396 static void octeon_irq_ciu_enable_v2(struct irq_data *data)
399 int cpu = next_cpu_for_irq(data);
400 union octeon_ciu_chip_data cd;
402 cd.p = irq_data_get_irq_chip_data(data);
403 mask = 1ull << (cd.s.bit);
406 * Called under the desc lock, so these should never get out
409 if (cd.s.line == 0) {
410 int index = octeon_coreid_for_cpu(cpu) * 2;
411 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
412 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
414 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
415 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
416 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
421 * Enable the irq on the current CPU for chips that
422 * have the EN*_W1{S,C} registers.
424 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
427 union octeon_ciu_chip_data cd;
429 cd.p = irq_data_get_irq_chip_data(data);
430 mask = 1ull << (cd.s.bit);
432 if (cd.s.line == 0) {
433 int index = cvmx_get_core_num() * 2;
434 set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
435 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
437 int index = cvmx_get_core_num() * 2 + 1;
438 set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
439 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
443 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
446 union octeon_ciu_chip_data cd;
448 cd.p = irq_data_get_irq_chip_data(data);
449 mask = 1ull << (cd.s.bit);
451 if (cd.s.line == 0) {
452 int index = cvmx_get_core_num() * 2;
453 clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
454 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
456 int index = cvmx_get_core_num() * 2 + 1;
457 clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
458 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
463 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
465 static void octeon_irq_ciu_ack(struct irq_data *data)
468 union octeon_ciu_chip_data cd;
470 cd.p = irq_data_get_irq_chip_data(data);
471 mask = 1ull << (cd.s.bit);
473 if (cd.s.line == 0) {
474 int index = cvmx_get_core_num() * 2;
475 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
477 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
482 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
485 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
489 union octeon_ciu_chip_data cd;
491 cd.p = irq_data_get_irq_chip_data(data);
492 mask = 1ull << (cd.s.bit);
494 if (cd.s.line == 0) {
495 for_each_online_cpu(cpu) {
496 int index = octeon_coreid_for_cpu(cpu) * 2;
497 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
498 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
501 for_each_online_cpu(cpu) {
502 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
503 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
504 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
510 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
513 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
517 union octeon_ciu_chip_data cd;
519 cd.p = irq_data_get_irq_chip_data(data);
520 mask = 1ull << (cd.s.bit);
522 if (cd.s.line == 0) {
523 for_each_online_cpu(cpu) {
524 int index = octeon_coreid_for_cpu(cpu) * 2;
525 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
526 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
529 for_each_online_cpu(cpu) {
530 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
531 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
532 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
537 static void octeon_irq_gpio_setup(struct irq_data *data)
539 union cvmx_gpio_bit_cfgx cfg;
540 union octeon_ciu_chip_data cd;
541 u32 t = irqd_get_trigger_type(data);
543 cd.p = irq_data_get_irq_chip_data(data);
547 cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
548 cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
550 /* 140 nS glitch filter*/
554 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64);
557 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
559 octeon_irq_gpio_setup(data);
560 octeon_irq_ciu_enable_v2(data);
563 static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
565 octeon_irq_gpio_setup(data);
566 octeon_irq_ciu_enable(data);
569 static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
571 irqd_set_trigger_type(data, t);
572 octeon_irq_gpio_setup(data);
574 return IRQ_SET_MASK_OK;
577 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
579 union octeon_ciu_chip_data cd;
581 cd.p = irq_data_get_irq_chip_data(data);
582 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
584 octeon_irq_ciu_disable_all_v2(data);
587 static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
589 union octeon_ciu_chip_data cd;
591 cd.p = irq_data_get_irq_chip_data(data);
592 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
594 octeon_irq_ciu_disable_all(data);
597 static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
599 union octeon_ciu_chip_data cd;
602 cd.p = irq_data_get_irq_chip_data(data);
603 mask = 1ull << (cd.s.gpio_line);
605 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
608 static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
610 if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH)
611 handle_edge_irq(irq, desc);
613 handle_level_irq(irq, desc);
618 static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
620 int cpu = smp_processor_id();
621 cpumask_t new_affinity;
623 if (!cpumask_test_cpu(cpu, data->affinity))
626 if (cpumask_weight(data->affinity) > 1) {
628 * It has multi CPU affinity, just remove this CPU
629 * from the affinity set.
631 cpumask_copy(&new_affinity, data->affinity);
632 cpumask_clear_cpu(cpu, &new_affinity);
634 /* Otherwise, put it on lowest numbered online CPU. */
635 cpumask_clear(&new_affinity);
636 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
638 irq_set_affinity_locked(data, &new_affinity, false);
641 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
642 const struct cpumask *dest, bool force)
645 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
647 union octeon_ciu_chip_data cd;
649 raw_spinlock_t *lock;
651 cd.p = irq_data_get_irq_chip_data(data);
654 * For non-v2 CIU, we will allow only single CPU affinity.
655 * This removes the need to do locking in the .ack/.eoi
658 if (cpumask_weight(dest) != 1)
665 for_each_online_cpu(cpu) {
666 int coreid = octeon_coreid_for_cpu(cpu);
668 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
669 raw_spin_lock_irqsave(lock, flags);
672 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
674 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
676 if (cpumask_test_cpu(cpu, dest) && enable_one) {
678 __set_bit(cd.s.bit, pen);
680 __clear_bit(cd.s.bit, pen);
683 * Must be visible to octeon_irq_ip{2,3}_ciu() before
689 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
691 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
693 raw_spin_unlock_irqrestore(lock, flags);
699 * Set affinity for the irq for chips that have the EN*_W1{S,C}
702 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
703 const struct cpumask *dest,
707 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
709 union octeon_ciu_chip_data cd;
714 cd.p = irq_data_get_irq_chip_data(data);
715 mask = 1ull << cd.s.bit;
717 if (cd.s.line == 0) {
718 for_each_online_cpu(cpu) {
719 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
720 int index = octeon_coreid_for_cpu(cpu) * 2;
721 if (cpumask_test_cpu(cpu, dest) && enable_one) {
723 set_bit(cd.s.bit, pen);
724 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
726 clear_bit(cd.s.bit, pen);
727 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
731 for_each_online_cpu(cpu) {
732 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
733 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
734 if (cpumask_test_cpu(cpu, dest) && enable_one) {
736 set_bit(cd.s.bit, pen);
737 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
739 clear_bit(cd.s.bit, pen);
740 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
749 * Newer octeon chips have support for lockless CIU operation.
751 static struct irq_chip octeon_irq_chip_ciu_v2 = {
753 .irq_enable = octeon_irq_ciu_enable_v2,
754 .irq_disable = octeon_irq_ciu_disable_all_v2,
755 .irq_ack = octeon_irq_ciu_ack,
756 .irq_mask = octeon_irq_ciu_disable_local_v2,
757 .irq_unmask = octeon_irq_ciu_enable_v2,
759 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
760 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
764 static struct irq_chip octeon_irq_chip_ciu = {
766 .irq_enable = octeon_irq_ciu_enable,
767 .irq_disable = octeon_irq_ciu_disable_all,
768 .irq_ack = octeon_irq_ciu_ack,
769 .irq_mask = octeon_irq_ciu_disable_local,
770 .irq_unmask = octeon_irq_ciu_enable,
772 .irq_set_affinity = octeon_irq_ciu_set_affinity,
773 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
777 /* The mbox versions don't do any affinity or round-robin. */
778 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
780 .irq_enable = octeon_irq_ciu_enable_all_v2,
781 .irq_disable = octeon_irq_ciu_disable_all_v2,
782 .irq_ack = octeon_irq_ciu_disable_local_v2,
783 .irq_eoi = octeon_irq_ciu_enable_local_v2,
785 .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
786 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
787 .flags = IRQCHIP_ONOFFLINE_ENABLED,
790 static struct irq_chip octeon_irq_chip_ciu_mbox = {
792 .irq_enable = octeon_irq_ciu_enable_all,
793 .irq_disable = octeon_irq_ciu_disable_all,
794 .irq_ack = octeon_irq_ciu_disable_local,
795 .irq_eoi = octeon_irq_ciu_enable_local,
797 .irq_cpu_online = octeon_irq_ciu_enable_local,
798 .irq_cpu_offline = octeon_irq_ciu_disable_local,
799 .flags = IRQCHIP_ONOFFLINE_ENABLED,
802 static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
804 .irq_enable = octeon_irq_ciu_enable_gpio_v2,
805 .irq_disable = octeon_irq_ciu_disable_gpio_v2,
806 .irq_ack = octeon_irq_ciu_gpio_ack,
807 .irq_mask = octeon_irq_ciu_disable_local_v2,
808 .irq_unmask = octeon_irq_ciu_enable_v2,
809 .irq_set_type = octeon_irq_ciu_gpio_set_type,
811 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
812 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
814 .flags = IRQCHIP_SET_TYPE_MASKED,
817 static struct irq_chip octeon_irq_chip_ciu_gpio = {
819 .irq_enable = octeon_irq_ciu_enable_gpio,
820 .irq_disable = octeon_irq_ciu_disable_gpio,
821 .irq_mask = octeon_irq_ciu_disable_local,
822 .irq_unmask = octeon_irq_ciu_enable,
823 .irq_ack = octeon_irq_ciu_gpio_ack,
824 .irq_set_type = octeon_irq_ciu_gpio_set_type,
826 .irq_set_affinity = octeon_irq_ciu_set_affinity,
827 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
829 .flags = IRQCHIP_SET_TYPE_MASKED,
833 * Watchdog interrupts are special. They are associated with a single
834 * core, so we hardwire the affinity to that core.
836 static void octeon_irq_ciu_wd_enable(struct irq_data *data)
840 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
841 int cpu = octeon_cpu_for_coreid(coreid);
842 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
844 raw_spin_lock_irqsave(lock, flags);
845 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
846 __set_bit(coreid, pen);
848 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
852 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
853 raw_spin_unlock_irqrestore(lock, flags);
857 * Watchdog interrupts are special. They are associated with a single
858 * core, so we hardwire the affinity to that core.
860 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
862 int coreid = data->irq - OCTEON_IRQ_WDOG0;
863 int cpu = octeon_cpu_for_coreid(coreid);
865 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
866 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
870 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
872 .irq_enable = octeon_irq_ciu1_wd_enable_v2,
873 .irq_disable = octeon_irq_ciu_disable_all_v2,
874 .irq_mask = octeon_irq_ciu_disable_local_v2,
875 .irq_unmask = octeon_irq_ciu_enable_local_v2,
878 static struct irq_chip octeon_irq_chip_ciu_wd = {
880 .irq_enable = octeon_irq_ciu_wd_enable,
881 .irq_disable = octeon_irq_ciu_disable_all,
882 .irq_mask = octeon_irq_ciu_disable_local,
883 .irq_unmask = octeon_irq_ciu_enable_local,
886 static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
892 case 48 ... 49: /* GMX DRP */
893 case 50: /* IPD_DRP */
894 case 52 ... 55: /* Timers */
912 struct octeon_irq_gpio_domain_data {
913 unsigned int base_hwirq;
916 static int octeon_irq_gpio_xlat(struct irq_domain *d,
917 struct device_node *node,
919 unsigned int intsize,
920 unsigned long *out_hwirq,
921 unsigned int *out_type)
925 unsigned int trigger;
927 if (d->of_node != node)
937 trigger = intspec[1];
941 type = IRQ_TYPE_EDGE_RISING;
944 type = IRQ_TYPE_EDGE_FALLING;
947 type = IRQ_TYPE_LEVEL_HIGH;
950 type = IRQ_TYPE_LEVEL_LOW;
953 pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
956 type = IRQ_TYPE_LEVEL_LOW;
965 static int octeon_irq_ciu_xlat(struct irq_domain *d,
966 struct device_node *node,
968 unsigned int intsize,
969 unsigned long *out_hwirq,
970 unsigned int *out_type)
972 unsigned int ciu, bit;
977 if (ciu > 1 || bit > 63)
980 *out_hwirq = (ciu << 6) | bit;
986 static struct irq_chip *octeon_irq_ciu_chip;
987 static struct irq_chip *octeon_irq_gpio_chip;
989 static bool octeon_irq_virq_in_range(unsigned int virq)
991 /* We cannot let it overflow the mapping array. */
992 if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0])))
995 WARN_ONCE(true, "virq out of range %u.\n", virq);
999 static int octeon_irq_ciu_map(struct irq_domain *d,
1000 unsigned int virq, irq_hw_number_t hw)
1002 unsigned int line = hw >> 6;
1003 unsigned int bit = hw & 63;
1005 if (!octeon_irq_virq_in_range(virq))
1008 /* Don't map irq if it is reserved for GPIO. */
1009 if (line == 0 && bit >= 16 && bit <32)
1012 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
1015 if (octeon_irq_ciu_is_edge(line, bit))
1016 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1017 octeon_irq_ciu_chip,
1020 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1021 octeon_irq_ciu_chip,
1027 static int octeon_irq_gpio_map_common(struct irq_domain *d,
1028 unsigned int virq, irq_hw_number_t hw,
1029 int line_limit, struct irq_chip *chip)
1031 struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
1032 unsigned int line, bit;
1034 if (!octeon_irq_virq_in_range(virq))
1037 line = (hw + gpiod->base_hwirq) >> 6;
1038 bit = (hw + gpiod->base_hwirq) & 63;
1039 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
1042 octeon_irq_set_ciu_mapping(virq, line, bit, hw,
1043 chip, octeon_irq_handle_gpio);
1047 static int octeon_irq_gpio_map(struct irq_domain *d,
1048 unsigned int virq, irq_hw_number_t hw)
1050 return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
1053 static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
1054 .map = octeon_irq_ciu_map,
1055 .xlate = octeon_irq_ciu_xlat,
1058 static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
1059 .map = octeon_irq_gpio_map,
1060 .xlate = octeon_irq_gpio_xlat,
1063 static void octeon_irq_ip2_ciu(void)
1065 const unsigned long core_id = cvmx_get_core_num();
1066 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
1068 ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
1069 if (likely(ciu_sum)) {
1070 int bit = fls64(ciu_sum) - 1;
1071 int irq = octeon_irq_ciu_to_irq[0][bit];
1075 spurious_interrupt();
1077 spurious_interrupt();
1081 static void octeon_irq_ip3_ciu(void)
1083 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
1085 ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
1086 if (likely(ciu_sum)) {
1087 int bit = fls64(ciu_sum) - 1;
1088 int irq = octeon_irq_ciu_to_irq[1][bit];
1092 spurious_interrupt();
1094 spurious_interrupt();
1098 static bool octeon_irq_use_ip4;
1100 static void octeon_irq_local_enable_ip4(void *arg)
1102 set_c0_status(STATUSF_IP4);
1105 static void octeon_irq_ip4_mask(void)
1107 clear_c0_status(STATUSF_IP4);
1108 spurious_interrupt();
1111 static void (*octeon_irq_ip2)(void);
1112 static void (*octeon_irq_ip3)(void);
1113 static void (*octeon_irq_ip4)(void);
1115 void (*octeon_irq_setup_secondary)(void);
1117 void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1120 octeon_irq_use_ip4 = true;
1121 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
1124 static void octeon_irq_percpu_enable(void)
1129 static void octeon_irq_init_ciu_percpu(void)
1131 int coreid = cvmx_get_core_num();
1134 __this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
1135 __this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
1137 raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
1139 * Disable All CIU Interrupts. The ones we need will be
1140 * enabled later. Read the SUM register so we know the write
1143 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
1144 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
1145 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
1146 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
1147 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
1150 static void octeon_irq_init_ciu2_percpu(void)
1153 int coreid = cvmx_get_core_num();
1154 u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
1157 * Disable All CIU2 Interrupts. The ones we need will be
1158 * enabled later. Read the SUM register so we know the write
1161 * There are 9 registers and 3 IPX levels with strides 0x1000
1162 * and 0x200 respectivly. Use loops to clear them.
1164 for (regx = 0; regx <= 0x8000; regx += 0x1000) {
1165 for (ipx = 0; ipx <= 0x400; ipx += 0x200)
1166 cvmx_write_csr(base + regx + ipx, 0);
1169 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
1172 static void octeon_irq_setup_secondary_ciu(void)
1174 octeon_irq_init_ciu_percpu();
1175 octeon_irq_percpu_enable();
1177 /* Enable the CIU lines */
1178 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1179 clear_c0_status(STATUSF_IP4);
1182 static void octeon_irq_setup_secondary_ciu2(void)
1184 octeon_irq_init_ciu2_percpu();
1185 octeon_irq_percpu_enable();
1187 /* Enable the CIU lines */
1188 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1189 if (octeon_irq_use_ip4)
1190 set_c0_status(STATUSF_IP4);
1192 clear_c0_status(STATUSF_IP4);
1195 static void __init octeon_irq_init_ciu(void)
1198 struct irq_chip *chip;
1199 struct irq_chip *chip_mbox;
1200 struct irq_chip *chip_wd;
1201 struct device_node *gpio_node;
1202 struct device_node *ciu_node;
1203 struct irq_domain *ciu_domain = NULL;
1205 octeon_irq_init_ciu_percpu();
1206 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1208 octeon_irq_ip2 = octeon_irq_ip2_ciu;
1209 octeon_irq_ip3 = octeon_irq_ip3_ciu;
1210 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
1211 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
1212 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1213 OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
1214 chip = &octeon_irq_chip_ciu_v2;
1215 chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
1216 chip_wd = &octeon_irq_chip_ciu_wd_v2;
1217 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1219 chip = &octeon_irq_chip_ciu;
1220 chip_mbox = &octeon_irq_chip_ciu_mbox;
1221 chip_wd = &octeon_irq_chip_ciu_wd;
1222 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
1224 octeon_irq_ciu_chip = chip;
1225 octeon_irq_ip4 = octeon_irq_ip4_mask;
1228 octeon_irq_init_core();
1230 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1232 struct octeon_irq_gpio_domain_data *gpiod;
1234 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1236 /* gpio domain host_data is the base hwirq number. */
1237 gpiod->base_hwirq = 16;
1238 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
1239 of_node_put(gpio_node);
1241 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1243 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
1245 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
1247 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
1248 irq_set_default_host(ciu_domain);
1249 of_node_put(ciu_node);
1251 panic("Cannot find device node for cavium,octeon-3860-ciu.");
1254 for (i = 0; i < 16; i++)
1255 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
1257 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
1258 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
1260 for (i = 0; i < 4; i++)
1261 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
1262 for (i = 0; i < 4; i++)
1263 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1265 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
1266 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1267 for (i = 0; i < 4; i++)
1268 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1270 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
1271 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
1274 for (i = 0; i < 16; i++)
1275 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq);
1277 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
1279 /* Enable the CIU lines */
1280 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1281 clear_c0_status(STATUSF_IP4);
1285 * Watchdog interrupts are special. They are associated with a single
1286 * core, so we hardwire the affinity to that core.
1288 static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
1292 int coreid = data->irq - OCTEON_IRQ_WDOG0;
1293 union octeon_ciu_chip_data cd;
1295 cd.p = irq_data_get_irq_chip_data(data);
1296 mask = 1ull << (cd.s.bit);
1298 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1299 cvmx_write_csr(en_addr, mask);
1303 static void octeon_irq_ciu2_enable(struct irq_data *data)
1307 int cpu = next_cpu_for_irq(data);
1308 int coreid = octeon_coreid_for_cpu(cpu);
1309 union octeon_ciu_chip_data cd;
1311 cd.p = irq_data_get_irq_chip_data(data);
1312 mask = 1ull << (cd.s.bit);
1314 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1315 cvmx_write_csr(en_addr, mask);
1318 static void octeon_irq_ciu2_enable_local(struct irq_data *data)
1322 int coreid = cvmx_get_core_num();
1323 union octeon_ciu_chip_data cd;
1325 cd.p = irq_data_get_irq_chip_data(data);
1326 mask = 1ull << (cd.s.bit);
1328 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1329 cvmx_write_csr(en_addr, mask);
1333 static void octeon_irq_ciu2_disable_local(struct irq_data *data)
1337 int coreid = cvmx_get_core_num();
1338 union octeon_ciu_chip_data cd;
1340 cd.p = irq_data_get_irq_chip_data(data);
1341 mask = 1ull << (cd.s.bit);
1343 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line);
1344 cvmx_write_csr(en_addr, mask);
1348 static void octeon_irq_ciu2_ack(struct irq_data *data)
1352 int coreid = cvmx_get_core_num();
1353 union octeon_ciu_chip_data cd;
1355 cd.p = irq_data_get_irq_chip_data(data);
1356 mask = 1ull << (cd.s.bit);
1358 en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line);
1359 cvmx_write_csr(en_addr, mask);
1363 static void octeon_irq_ciu2_disable_all(struct irq_data *data)
1367 union octeon_ciu_chip_data cd;
1369 cd.p = irq_data_get_irq_chip_data(data);
1370 mask = 1ull << (cd.s.bit);
1372 for_each_online_cpu(cpu) {
1373 u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1374 cvmx_write_csr(en_addr, mask);
1378 static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
1383 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1385 for_each_online_cpu(cpu) {
1386 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu));
1387 cvmx_write_csr(en_addr, mask);
1391 static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
1396 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1398 for_each_online_cpu(cpu) {
1399 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu));
1400 cvmx_write_csr(en_addr, mask);
1404 static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
1408 int coreid = cvmx_get_core_num();
1410 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1411 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
1412 cvmx_write_csr(en_addr, mask);
1415 static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
1419 int coreid = cvmx_get_core_num();
1421 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1422 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
1423 cvmx_write_csr(en_addr, mask);
1427 static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
1428 const struct cpumask *dest, bool force)
1431 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
1433 union octeon_ciu_chip_data cd;
1438 cd.p = irq_data_get_irq_chip_data(data);
1439 mask = 1ull << cd.s.bit;
1441 for_each_online_cpu(cpu) {
1443 if (cpumask_test_cpu(cpu, dest) && enable_one) {
1445 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1447 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1449 cvmx_write_csr(en_addr, mask);
1456 static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
1458 octeon_irq_gpio_setup(data);
1459 octeon_irq_ciu2_enable(data);
1462 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
1464 union octeon_ciu_chip_data cd;
1465 cd.p = irq_data_get_irq_chip_data(data);
1467 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
1469 octeon_irq_ciu2_disable_all(data);
1472 static struct irq_chip octeon_irq_chip_ciu2 = {
1474 .irq_enable = octeon_irq_ciu2_enable,
1475 .irq_disable = octeon_irq_ciu2_disable_all,
1476 .irq_ack = octeon_irq_ciu2_ack,
1477 .irq_mask = octeon_irq_ciu2_disable_local,
1478 .irq_unmask = octeon_irq_ciu2_enable,
1480 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1481 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1485 static struct irq_chip octeon_irq_chip_ciu2_mbox = {
1487 .irq_enable = octeon_irq_ciu2_mbox_enable_all,
1488 .irq_disable = octeon_irq_ciu2_mbox_disable_all,
1489 .irq_ack = octeon_irq_ciu2_mbox_disable_local,
1490 .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
1492 .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
1493 .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
1494 .flags = IRQCHIP_ONOFFLINE_ENABLED,
1497 static struct irq_chip octeon_irq_chip_ciu2_wd = {
1499 .irq_enable = octeon_irq_ciu2_wd_enable,
1500 .irq_disable = octeon_irq_ciu2_disable_all,
1501 .irq_mask = octeon_irq_ciu2_disable_local,
1502 .irq_unmask = octeon_irq_ciu2_enable_local,
1505 static struct irq_chip octeon_irq_chip_ciu2_gpio = {
1507 .irq_enable = octeon_irq_ciu2_enable_gpio,
1508 .irq_disable = octeon_irq_ciu2_disable_gpio,
1509 .irq_ack = octeon_irq_ciu_gpio_ack,
1510 .irq_mask = octeon_irq_ciu2_disable_local,
1511 .irq_unmask = octeon_irq_ciu2_enable,
1512 .irq_set_type = octeon_irq_ciu_gpio_set_type,
1514 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1515 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1517 .flags = IRQCHIP_SET_TYPE_MASKED,
1520 static int octeon_irq_ciu2_xlat(struct irq_domain *d,
1521 struct device_node *node,
1523 unsigned int intsize,
1524 unsigned long *out_hwirq,
1525 unsigned int *out_type)
1527 unsigned int ciu, bit;
1532 *out_hwirq = (ciu << 6) | bit;
1538 static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
1542 if (line == 3) /* MIO */
1544 case 2: /* IPD_DRP */
1545 case 8 ... 11: /* Timers */
1552 else if (line == 6) /* PKT */
1554 case 52 ... 53: /* ILK_DRP */
1555 case 8 ... 12: /* GMX_DRP */
1564 static int octeon_irq_ciu2_map(struct irq_domain *d,
1565 unsigned int virq, irq_hw_number_t hw)
1567 unsigned int line = hw >> 6;
1568 unsigned int bit = hw & 63;
1570 if (!octeon_irq_virq_in_range(virq))
1574 * Don't map irq if it is reserved for GPIO.
1575 * (Line 7 are the GPIO lines.)
1580 if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
1583 if (octeon_irq_ciu2_is_edge(line, bit))
1584 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1585 &octeon_irq_chip_ciu2,
1588 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1589 &octeon_irq_chip_ciu2,
1594 static int octeon_irq_ciu2_gpio_map(struct irq_domain *d,
1595 unsigned int virq, irq_hw_number_t hw)
1597 return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
1600 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
1601 .map = octeon_irq_ciu2_map,
1602 .xlate = octeon_irq_ciu2_xlat,
1605 static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = {
1606 .map = octeon_irq_ciu2_gpio_map,
1607 .xlate = octeon_irq_gpio_xlat,
1610 static void octeon_irq_ciu2(void)
1615 u64 src_reg, src, sum;
1616 const unsigned long core_id = cvmx_get_core_num();
1618 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
1623 line = fls64(sum) - 1;
1624 src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
1625 src = cvmx_read_csr(src_reg);
1630 bit = fls64(src) - 1;
1631 irq = octeon_irq_ciu_to_irq[line][bit];
1639 spurious_interrupt();
1641 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1642 can stop interrupts from propagating */
1643 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1644 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
1646 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
1650 static void octeon_irq_ciu2_mbox(void)
1654 const unsigned long core_id = cvmx_get_core_num();
1655 u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
1660 line = fls64(sum) - 1;
1662 do_IRQ(OCTEON_IRQ_MBOX0 + line);
1666 spurious_interrupt();
1668 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1669 can stop interrupts from propagating */
1670 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1671 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
1673 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
1677 static void __init octeon_irq_init_ciu2(void)
1680 struct device_node *gpio_node;
1681 struct device_node *ciu_node;
1682 struct irq_domain *ciu_domain = NULL;
1684 octeon_irq_init_ciu2_percpu();
1685 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
1687 octeon_irq_ip2 = octeon_irq_ciu2;
1688 octeon_irq_ip3 = octeon_irq_ciu2_mbox;
1689 octeon_irq_ip4 = octeon_irq_ip4_mask;
1692 octeon_irq_init_core();
1694 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1696 struct octeon_irq_gpio_domain_data *gpiod;
1698 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1700 /* gpio domain host_data is the base hwirq number. */
1701 gpiod->base_hwirq = 7 << 6;
1702 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod);
1703 of_node_put(gpio_node);
1705 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1707 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
1709 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2");
1711 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
1712 irq_set_default_host(ciu_domain);
1713 of_node_put(ciu_node);
1715 panic("Cannot find device node for cavium,octeon-6880-ciu2.");
1718 for (i = 0; i < 64; i++)
1719 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
1721 for (i = 0; i < 32; i++)
1722 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
1723 &octeon_irq_chip_ciu2_wd, handle_level_irq);
1725 for (i = 0; i < 4; i++)
1726 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
1728 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
1730 for (i = 0; i < 4; i++)
1731 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
1733 for (i = 0; i < 4; i++)
1734 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
1736 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1737 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1738 irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1739 irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1741 /* Enable the CIU lines */
1742 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1743 clear_c0_status(STATUSF_IP4);
1746 void __init arch_init_irq(void)
1749 /* Set the default affinity to the boot cpu. */
1750 cpumask_clear(irq_default_affinity);
1751 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
1753 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1754 octeon_irq_init_ciu2();
1756 octeon_irq_init_ciu();
1759 asmlinkage void plat_irq_dispatch(void)
1761 unsigned long cop0_cause;
1762 unsigned long cop0_status;
1765 cop0_cause = read_c0_cause();
1766 cop0_status = read_c0_status();
1767 cop0_cause &= cop0_status;
1768 cop0_cause &= ST0_IM;
1770 if (unlikely(cop0_cause & STATUSF_IP2))
1772 else if (unlikely(cop0_cause & STATUSF_IP3))
1774 else if (unlikely(cop0_cause & STATUSF_IP4))
1776 else if (likely(cop0_cause))
1777 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
1783 #ifdef CONFIG_HOTPLUG_CPU
1785 void octeon_fixup_irqs(void)
1790 #endif /* CONFIG_HOTPLUG_CPU */