2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/smp.h>
15 #include <linux/percpu.h>
16 #include <linux/hrtimer.h>
17 #include <linux/kref.h>
18 #include <linux/workqueue.h>
20 #include <linux/atomic.h>
21 #include <asm/ptrace.h>
24 * These correspond to the IORESOURCE_IRQ_* defines in
25 * linux/ioport.h to select the interrupt line behaviour. When
26 * requesting an interrupt without specifying a IRQF_TRIGGER, the
27 * setting should be assumed to be "as already configured", which
28 * may be as per machine or firmware initialisation.
30 #define IRQF_TRIGGER_NONE 0x00000000
31 #define IRQF_TRIGGER_RISING 0x00000001
32 #define IRQF_TRIGGER_FALLING 0x00000002
33 #define IRQF_TRIGGER_HIGH 0x00000004
34 #define IRQF_TRIGGER_LOW 0x00000008
35 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37 #define IRQF_TRIGGER_PROBE 0x00000010
40 * These flags used only by the kernel as part of the
41 * irq handling routines.
43 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
44 * DEPRECATED. This flag is a NOOP and scheduled to be removed
45 * IRQF_SHARED - allow sharing the irq among several devices
46 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
47 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
48 * IRQF_PERCPU - Interrupt is per cpu
49 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
50 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
51 * registered first in an shared interrupt is considered for
52 * performance reasons)
53 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
54 * Used by threaded interrupts which need to keep the
55 * irq line disabled until the threaded handler has been run.
56 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
57 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
58 * IRQF_NO_THREAD - Interrupt cannot be threaded
59 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
62 #define IRQF_DISABLED 0x00000020
63 #define IRQF_SHARED 0x00000080
64 #define IRQF_PROBE_SHARED 0x00000100
65 #define __IRQF_TIMER 0x00000200
66 #define IRQF_PERCPU 0x00000400
67 #define IRQF_NOBALANCING 0x00000800
68 #define IRQF_IRQPOLL 0x00001000
69 #define IRQF_ONESHOT 0x00002000
70 #define IRQF_NO_SUSPEND 0x00004000
71 #define IRQF_FORCE_RESUME 0x00008000
72 #define IRQF_NO_THREAD 0x00010000
73 #define IRQF_EARLY_RESUME 0x00020000
75 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
78 * These values can be returned by request_any_context_irq() and
79 * describe the context the interrupt will be run in.
81 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
82 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
89 typedef irqreturn_t (*irq_handler_t)(int, void *);
92 * struct irqaction - per interrupt action descriptor
93 * @handler: interrupt handler function
94 * @name: name of the device
95 * @dev_id: cookie to identify the device
96 * @percpu_dev_id: cookie to identify the device
97 * @next: pointer to the next irqaction for shared interrupts
98 * @irq: interrupt number
99 * @flags: flags (see IRQF_* above)
100 * @thread_fn: interrupt handler function for threaded interrupts
101 * @thread: thread pointer for threaded interrupts
102 * @thread_flags: flags related to @thread
103 * @thread_mask: bitmask for keeping track of @thread activity
104 * @dir: pointer to the proc/irq/NN/name entry
107 irq_handler_t handler;
109 void __percpu *percpu_dev_id;
110 struct irqaction *next;
111 irq_handler_t thread_fn;
112 struct task_struct *thread;
115 unsigned long thread_flags;
116 unsigned long thread_mask;
118 struct proc_dir_entry *dir;
119 } ____cacheline_internodealigned_in_smp;
121 extern irqreturn_t no_action(int cpl, void *dev_id);
123 #ifdef CONFIG_GENERIC_HARDIRQS
124 extern int __must_check
125 request_threaded_irq(unsigned int irq, irq_handler_t handler,
126 irq_handler_t thread_fn,
127 unsigned long flags, const char *name, void *dev);
129 static inline int __must_check
130 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
131 const char *name, void *dev)
133 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
136 extern int __must_check
137 request_any_context_irq(unsigned int irq, irq_handler_t handler,
138 unsigned long flags, const char *name, void *dev_id);
140 extern int __must_check
141 request_percpu_irq(unsigned int irq, irq_handler_t handler,
142 const char *devname, void __percpu *percpu_dev_id);
145 extern int __must_check
146 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
147 const char *name, void *dev);
150 * Special function to avoid ifdeffery in kernel/irq/devres.c which
151 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
152 * m68k). I really love these $@%#!* obvious Makefile references:
153 * ../../../kernel/irq/devres.o
155 static inline int __must_check
156 request_threaded_irq(unsigned int irq, irq_handler_t handler,
157 irq_handler_t thread_fn,
158 unsigned long flags, const char *name, void *dev)
160 return request_irq(irq, handler, flags, name, dev);
163 static inline int __must_check
164 request_any_context_irq(unsigned int irq, irq_handler_t handler,
165 unsigned long flags, const char *name, void *dev_id)
167 return request_irq(irq, handler, flags, name, dev_id);
170 static inline int __must_check
171 request_percpu_irq(unsigned int irq, irq_handler_t handler,
172 const char *devname, void __percpu *percpu_dev_id)
174 return request_irq(irq, handler, 0, devname, percpu_dev_id);
178 extern void free_irq(unsigned int, void *);
179 extern void free_percpu_irq(unsigned int, void __percpu *);
183 extern int __must_check
184 devm_request_threaded_irq(struct device *dev, unsigned int irq,
185 irq_handler_t handler, irq_handler_t thread_fn,
186 unsigned long irqflags, const char *devname,
189 static inline int __must_check
190 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
191 unsigned long irqflags, const char *devname, void *dev_id)
193 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
197 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
200 * On lockdep we dont want to enable hardirqs in hardirq
201 * context. Use local_irq_enable_in_hardirq() to annotate
202 * kernel code that has to do this nevertheless (pretty much
203 * the only valid case is for old/broken hardware that is
206 * NOTE: in theory this might break fragile code that relies
207 * on hardirq delivery - in practice we dont seem to have such
208 * places left. So the only effect should be slightly increased
209 * irqs-off latencies.
211 #ifdef CONFIG_LOCKDEP
212 # define local_irq_enable_in_hardirq() do { } while (0)
214 # define local_irq_enable_in_hardirq() local_irq_enable()
217 extern void disable_irq_nosync(unsigned int irq);
218 extern void disable_irq(unsigned int irq);
219 extern void disable_percpu_irq(unsigned int irq);
220 extern void enable_irq(unsigned int irq);
221 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
223 /* The following three functions are for the core kernel use only. */
224 #ifdef CONFIG_GENERIC_HARDIRQS
225 extern void suspend_device_irqs(void);
226 extern void resume_device_irqs(void);
227 #ifdef CONFIG_PM_SLEEP
228 extern int check_wakeup_irqs(void);
230 static inline int check_wakeup_irqs(void) { return 0; }
233 static inline void suspend_device_irqs(void) { };
234 static inline void resume_device_irqs(void) { };
235 static inline int check_wakeup_irqs(void) { return 0; }
238 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
240 extern cpumask_var_t irq_default_affinity;
242 /* Internal implementation. Use the helpers below */
243 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
247 * irq_set_affinity - Set the irq affinity of a given irq
248 * @irq: Interrupt to set affinity
251 * Fails if cpumask does not contain an online CPU
254 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
256 return __irq_set_affinity(irq, cpumask, false);
260 * irq_force_affinity - Force the irq affinity of a given irq
261 * @irq: Interrupt to set affinity
264 * Same as irq_set_affinity, but without checking the mask against
267 * Solely for low level cpu hotplug code, where we need to make per
268 * cpu interrupts affine before the cpu becomes online.
271 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
273 return __irq_set_affinity(irq, cpumask, true);
276 extern int irq_can_set_affinity(unsigned int irq);
277 extern int irq_select_affinity(unsigned int irq);
279 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
282 * struct irq_affinity_notify - context for notification of IRQ affinity changes
283 * @irq: Interrupt to which notification applies
284 * @kref: Reference count, for internal use
285 * @work: Work item, for internal use
286 * @notify: Function to be called on change. This will be
287 * called in process context.
288 * @release: Function to be called on release. This will be
289 * called in process context. Once registered, the
290 * structure must only be freed when this function is
293 struct irq_affinity_notify {
296 struct work_struct work;
297 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
298 void (*release)(struct kref *ref);
302 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
304 #else /* CONFIG_SMP */
306 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
311 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
316 static inline int irq_can_set_affinity(unsigned int irq)
321 static inline int irq_select_affinity(unsigned int irq) { return 0; }
323 static inline int irq_set_affinity_hint(unsigned int irq,
324 const struct cpumask *m)
328 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
330 #ifdef CONFIG_GENERIC_HARDIRQS
332 * Special lockdep variants of irq disabling/enabling.
333 * These should be used for locking constructs that
334 * know that a particular irq context which is disabled,
335 * and which is the only irq-context user of a lock,
336 * that it's safe to take the lock in the irq-disabled
337 * section without disabling hardirqs.
339 * On !CONFIG_LOCKDEP they are equivalent to the normal
340 * irq disable/enable methods.
342 static inline void disable_irq_nosync_lockdep(unsigned int irq)
344 disable_irq_nosync(irq);
345 #ifdef CONFIG_LOCKDEP
350 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
352 disable_irq_nosync(irq);
353 #ifdef CONFIG_LOCKDEP
354 local_irq_save(*flags);
358 static inline void disable_irq_lockdep(unsigned int irq)
361 #ifdef CONFIG_LOCKDEP
366 static inline void enable_irq_lockdep(unsigned int irq)
368 #ifdef CONFIG_LOCKDEP
374 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
376 #ifdef CONFIG_LOCKDEP
377 local_irq_restore(*flags);
382 /* IRQ wakeup (PM) control: */
383 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
385 static inline int enable_irq_wake(unsigned int irq)
387 return irq_set_irq_wake(irq, 1);
390 static inline int disable_irq_wake(unsigned int irq)
392 return irq_set_irq_wake(irq, 0);
395 #else /* !CONFIG_GENERIC_HARDIRQS */
397 * NOTE: non-genirq architectures, if they want to support the lock
398 * validator need to define the methods below in their asm/irq.h
399 * files, under an #ifdef CONFIG_LOCKDEP section.
401 #ifndef CONFIG_LOCKDEP
402 # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
403 # define disable_irq_nosync_lockdep_irqsave(irq, flags) \
404 disable_irq_nosync(irq)
405 # define disable_irq_lockdep(irq) disable_irq(irq)
406 # define enable_irq_lockdep(irq) enable_irq(irq)
407 # define enable_irq_lockdep_irqrestore(irq, flags) \
411 static inline int enable_irq_wake(unsigned int irq)
416 static inline int disable_irq_wake(unsigned int irq)
420 #endif /* CONFIG_GENERIC_HARDIRQS */
423 #ifdef CONFIG_IRQ_FORCED_THREADING
424 extern bool force_irqthreads;
426 #define force_irqthreads (0)
429 #ifndef __ARCH_SET_SOFTIRQ_PENDING
430 #define set_softirq_pending(x) (local_softirq_pending() = (x))
431 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
434 /* Some architectures might implement lazy enabling/disabling of
435 * interrupts. In some cases, such as stop_machine, we might want
436 * to ensure that after a local_irq_disable(), interrupts have
437 * really been disabled in hardware. Such architectures need to
438 * implement the following hook.
440 #ifndef hard_irq_disable
441 #define hard_irq_disable() do { } while(0)
444 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
445 frequency threaded job scheduling. For almost all the purposes
446 tasklets are more than enough. F.e. all serial device BHs et
447 al. should be converted to tasklets, not to softirqs.
457 BLOCK_IOPOLL_SOFTIRQ,
461 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
466 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
468 /* map softirq index to softirq name. update 'softirq_to_name' in
469 * kernel/softirq.c when adding a new softirq.
471 extern char *softirq_to_name[NR_SOFTIRQS];
473 /* softirq mask and active fields moved to irq_cpustat_t in
474 * asm/hardirq.h to get better cache usage. KAO
477 struct softirq_action
479 void (*action)(struct softirq_action *);
482 asmlinkage void do_softirq(void);
483 asmlinkage void __do_softirq(void);
484 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
485 extern void softirq_init(void);
486 extern void __raise_softirq_irqoff(unsigned int nr);
488 extern void raise_softirq_irqoff(unsigned int nr);
489 extern void raise_softirq(unsigned int nr);
491 /* This is the worklist that queues up per-cpu softirq work.
493 * send_remote_sendirq() adds work to these lists, and
494 * the softirq handler itself dequeues from them. The queues
495 * are protected by disabling local cpu interrupts and they must
496 * only be accessed by the local cpu that they are for.
498 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
500 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
502 static inline struct task_struct *this_cpu_ksoftirqd(void)
504 return this_cpu_read(ksoftirqd);
507 /* Try to send a softirq to a remote cpu. If this cannot be done, the
508 * work will be queued to the local cpu.
510 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
512 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
513 * and compute the current cpu, passed in as 'this_cpu'.
515 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
516 int this_cpu, int softirq);
518 /* Tasklets --- multithreaded analogue of BHs.
520 Main feature differing them of generic softirqs: tasklet
521 is running only on one CPU simultaneously.
523 Main feature differing them of BHs: different tasklets
524 may be run simultaneously on different CPUs.
527 * If tasklet_schedule() is called, then tasklet is guaranteed
528 to be executed on some cpu at least once after this.
529 * If the tasklet is already scheduled, but its execution is still not
530 started, it will be executed only once.
531 * If this tasklet is already running on another CPU (or schedule is called
532 from tasklet itself), it is rescheduled for later.
533 * Tasklet is strictly serialized wrt itself, but not
534 wrt another tasklets. If client needs some intertask synchronization,
535 he makes it with spinlocks.
538 struct tasklet_struct
540 struct tasklet_struct *next;
543 void (*func)(unsigned long);
547 #define DECLARE_TASKLET(name, func, data) \
548 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
550 #define DECLARE_TASKLET_DISABLED(name, func, data) \
551 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
556 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
557 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
561 static inline int tasklet_trylock(struct tasklet_struct *t)
563 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
566 static inline void tasklet_unlock(struct tasklet_struct *t)
568 smp_mb__before_clear_bit();
569 clear_bit(TASKLET_STATE_RUN, &(t)->state);
572 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
574 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
577 #define tasklet_trylock(t) 1
578 #define tasklet_unlock_wait(t) do { } while (0)
579 #define tasklet_unlock(t) do { } while (0)
582 extern void __tasklet_schedule(struct tasklet_struct *t);
584 static inline void tasklet_schedule(struct tasklet_struct *t)
586 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
587 __tasklet_schedule(t);
590 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
592 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
594 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
595 __tasklet_hi_schedule(t);
598 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
601 * This version avoids touching any other tasklets. Needed for kmemcheck
602 * in order not to take any page faults while enqueueing this tasklet;
603 * consider VERY carefully whether you really need this or
604 * tasklet_hi_schedule()...
606 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
608 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
609 __tasklet_hi_schedule_first(t);
613 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
615 atomic_inc(&t->count);
616 smp_mb__after_atomic_inc();
619 static inline void tasklet_disable(struct tasklet_struct *t)
621 tasklet_disable_nosync(t);
622 tasklet_unlock_wait(t);
626 static inline void tasklet_enable(struct tasklet_struct *t)
628 smp_mb__before_atomic_dec();
629 atomic_dec(&t->count);
632 static inline void tasklet_hi_enable(struct tasklet_struct *t)
634 smp_mb__before_atomic_dec();
635 atomic_dec(&t->count);
638 extern void tasklet_kill(struct tasklet_struct *t);
639 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
640 extern void tasklet_init(struct tasklet_struct *t,
641 void (*func)(unsigned long), unsigned long data);
643 struct tasklet_hrtimer {
644 struct hrtimer timer;
645 struct tasklet_struct tasklet;
646 enum hrtimer_restart (*function)(struct hrtimer *);
650 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
651 enum hrtimer_restart (*function)(struct hrtimer *),
652 clockid_t which_clock, enum hrtimer_mode mode);
655 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
656 const enum hrtimer_mode mode)
658 return hrtimer_start(&ttimer->timer, time, mode);
662 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
664 hrtimer_cancel(&ttimer->timer);
665 tasklet_kill(&ttimer->tasklet);
669 * Autoprobing for irqs:
671 * probe_irq_on() and probe_irq_off() provide robust primitives
672 * for accurate IRQ probing during kernel initialization. They are
673 * reasonably simple to use, are not "fooled" by spurious interrupts,
674 * and, unlike other attempts at IRQ probing, they do not get hung on
675 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
677 * For reasonably foolproof probing, use them as follows:
679 * 1. clear and/or mask the device's internal interrupt.
681 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
682 * 4. enable the device and cause it to trigger an interrupt.
683 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
684 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
685 * 7. service the device to clear its pending interrupt.
686 * 8. loop again if paranoia is required.
688 * probe_irq_on() returns a mask of allocated irq's.
690 * probe_irq_off() takes the mask as a parameter,
691 * and returns the irq number which occurred,
692 * or zero if none occurred, or a negative irq number
693 * if more than one irq occurred.
696 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
697 static inline unsigned long probe_irq_on(void)
701 static inline int probe_irq_off(unsigned long val)
705 static inline unsigned int probe_irq_mask(unsigned long val)
710 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
711 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
712 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
715 #ifdef CONFIG_PROC_FS
716 /* Initialize /proc/irq/ */
717 extern void init_irq_proc(void);
719 static inline void init_irq_proc(void)
725 int show_interrupts(struct seq_file *p, void *v);
726 int arch_show_interrupts(struct seq_file *p, int prec);
728 extern int early_irq_init(void);
729 extern int arch_probe_nr_irqs(void);
730 extern int arch_early_irq_init(void);