2 * linux/kernel/irq/handle.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code.
9 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/rculist.h>
19 #include <linux/hash.h>
21 #include "internals.h"
24 * lockdep: we want to handle all irq_desc locks as a single lock-class:
26 struct lock_class_key irq_desc_lock_class;
29 * handle_bad_irq - handle spurious and unhandled irqs
30 * @irq: the interrupt number
31 * @desc: description of the interrupt
33 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
35 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
37 print_irq_desc(irq, desc);
38 kstat_incr_irqs_this_cpu(irq, desc);
43 * Linux has a controller-independent interrupt architecture.
44 * Every controller has a 'controller-template', that is used
45 * by the main code to do the right thing. Each driver-visible
46 * interrupt source is transparently wired to the appropriate
47 * controller. Thus drivers need not be aware of the
48 * interrupt-controller.
50 * The code is designed to be easily extended with new/different
51 * interrupt controllers, without having to do assembly magic or
52 * having to touch the generic code.
54 * Controller mappings for all interrupt sources:
56 int nr_irqs = NR_IRQS;
57 EXPORT_SYMBOL_GPL(nr_irqs);
59 #ifdef CONFIG_SPARSE_IRQ
60 static struct irq_desc irq_desc_init = {
62 .status = IRQ_DISABLED,
64 .handle_irq = handle_bad_irq,
66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
68 .affinity = CPU_MASK_ALL
72 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
77 node = cpu_to_node(cpu);
78 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
81 * don't overwite if can not get new one
82 * init_copy_kstat_irqs() could still use old one
85 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
87 desc->kstat_irqs = ptr;
91 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
93 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
95 spin_lock_init(&desc->lock);
100 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
101 init_kstat_irqs(desc, cpu, nr_cpu_ids);
102 if (!desc->kstat_irqs) {
103 printk(KERN_ERR "can not alloc kstat_irqs\n");
106 arch_init_chip_data(desc, cpu);
110 * Protect the sparse_irqs:
112 DEFINE_SPINLOCK(sparse_irq_lock);
114 struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
116 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
117 [0 ... NR_IRQS_LEGACY-1] = {
119 .status = IRQ_DISABLED,
120 .chip = &no_irq_chip,
121 .handle_irq = handle_bad_irq,
123 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
125 .affinity = CPU_MASK_ALL
130 /* FIXME: use bootmem alloc ...*/
131 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
133 int __init early_irq_init(void)
135 struct irq_desc *desc;
139 desc = irq_desc_legacy;
140 legacy_count = ARRAY_SIZE(irq_desc_legacy);
142 for (i = 0; i < legacy_count; i++) {
144 desc[i].kstat_irqs = kstat_irqs_legacy[i];
145 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
147 irq_desc_ptrs[i] = desc + i;
150 for (i = legacy_count; i < NR_IRQS; i++)
151 irq_desc_ptrs[i] = NULL;
153 return arch_early_irq_init();
156 struct irq_desc *irq_to_desc(unsigned int irq)
158 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
161 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
163 struct irq_desc *desc;
167 if (irq >= NR_IRQS) {
168 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
174 desc = irq_desc_ptrs[irq];
178 spin_lock_irqsave(&sparse_irq_lock, flags);
180 /* We have to check it to avoid races with another CPU */
181 desc = irq_desc_ptrs[irq];
185 node = cpu_to_node(cpu);
186 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
187 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
190 printk(KERN_ERR "can not alloc irq_desc\n");
193 init_one_irq_desc(irq, desc, cpu);
195 irq_desc_ptrs[irq] = desc;
198 spin_unlock_irqrestore(&sparse_irq_lock, flags);
203 #else /* !CONFIG_SPARSE_IRQ */
205 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
206 [0 ... NR_IRQS-1] = {
207 .status = IRQ_DISABLED,
208 .chip = &no_irq_chip,
209 .handle_irq = handle_bad_irq,
211 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
213 .affinity = CPU_MASK_ALL
218 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
219 int __init early_irq_init(void)
221 struct irq_desc *desc;
226 count = ARRAY_SIZE(irq_desc);
228 for (i = 0; i < count; i++) {
230 desc[i].kstat_irqs = kstat_irqs_all[i];
233 return arch_early_irq_init();
236 struct irq_desc *irq_to_desc(unsigned int irq)
238 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
241 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
243 return irq_to_desc(irq);
245 #endif /* !CONFIG_SPARSE_IRQ */
247 void clear_kstat_irqs(struct irq_desc *desc)
249 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
253 * What should we do if we get a hw irq event on an illegal vector?
254 * Each architecture has to answer this themself.
256 static void ack_bad(unsigned int irq)
258 struct irq_desc *desc = irq_to_desc(irq);
260 print_irq_desc(irq, desc);
267 static void noop(unsigned int irq)
271 static unsigned int noop_ret(unsigned int irq)
277 * Generic no controller implementation
279 struct irq_chip no_irq_chip = {
290 * Generic dummy implementation which can be used for
291 * real dumb interrupt sources
293 struct irq_chip dummy_irq_chip = {
306 * Special, empty irq handler:
308 irqreturn_t no_action(int cpl, void *dev_id)
314 * handle_IRQ_event - irq action chain handler
315 * @irq: the interrupt number
316 * @action: the interrupt action chain for this irq
318 * Handles the action chain of an irq event
320 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
322 irqreturn_t ret, retval = IRQ_NONE;
323 unsigned int status = 0;
325 if (!(action->flags & IRQF_DISABLED))
326 local_irq_enable_in_hardirq();
329 ret = action->handler(irq, action->dev_id);
330 if (ret == IRQ_HANDLED)
331 status |= action->flags;
333 action = action->next;
336 if (status & IRQF_SAMPLE_RANDOM)
337 add_interrupt_randomness(irq);
343 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
345 * __do_IRQ - original all in one highlevel IRQ handler
346 * @irq: the interrupt number
348 * __do_IRQ handles all normal device IRQ's (the special
349 * SMP cross-CPU interrupts have their own specific
352 * This is the original x86 implementation which is used for every
355 unsigned int __do_IRQ(unsigned int irq)
357 struct irq_desc *desc = irq_to_desc(irq);
358 struct irqaction *action;
361 kstat_incr_irqs_this_cpu(irq, desc);
363 if (CHECK_IRQ_PER_CPU(desc->status)) {
364 irqreturn_t action_ret;
367 * No locking required for CPU-local interrupts:
369 if (desc->chip->ack) {
370 desc->chip->ack(irq);
372 desc = irq_remap_to_desc(irq, desc);
374 if (likely(!(desc->status & IRQ_DISABLED))) {
375 action_ret = handle_IRQ_event(irq, desc->action);
377 note_interrupt(irq, desc, action_ret);
379 desc->chip->end(irq);
383 spin_lock(&desc->lock);
384 if (desc->chip->ack) {
385 desc->chip->ack(irq);
386 desc = irq_remap_to_desc(irq, desc);
389 * REPLAY is when Linux resends an IRQ that was dropped earlier
390 * WAITING is used by probe to mark irqs that are being tested
392 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
393 status |= IRQ_PENDING; /* we _want_ to handle it */
396 * If the IRQ is disabled for whatever reason, we cannot
397 * use the action we have.
400 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
401 action = desc->action;
402 status &= ~IRQ_PENDING; /* we commit to handling */
403 status |= IRQ_INPROGRESS; /* we are handling it */
405 desc->status = status;
408 * If there is no IRQ handler or it was disabled, exit early.
409 * Since we set PENDING, if another processor is handling
410 * a different instance of this same irq, the other processor
411 * will take care of it.
413 if (unlikely(!action))
417 * Edge triggered interrupts need to remember
419 * This applies to any hw interrupts that allow a second
420 * instance of the same irq to arrive while we are in do_IRQ
421 * or in the handler. But the code here only handles the _second_
422 * instance of the irq, not the third or fourth. So it is mostly
423 * useful for irq hardware that does not mask cleanly in an
427 irqreturn_t action_ret;
429 spin_unlock(&desc->lock);
431 action_ret = handle_IRQ_event(irq, action);
433 note_interrupt(irq, desc, action_ret);
435 spin_lock(&desc->lock);
436 if (likely(!(desc->status & IRQ_PENDING)))
438 desc->status &= ~IRQ_PENDING;
440 desc->status &= ~IRQ_INPROGRESS;
444 * The ->end() handler has to deal with interrupts which got
445 * disabled while the handler was running.
447 desc->chip->end(irq);
448 spin_unlock(&desc->lock);
454 void early_init_irq_lock_class(void)
456 struct irq_desc *desc;
459 for_each_irq_desc(i, desc) {
460 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
464 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
466 struct irq_desc *desc = irq_to_desc(irq);
467 return desc ? desc->kstat_irqs[cpu] : 0;
469 EXPORT_SYMBOL(kstat_irqs_cpu);