genirq: Add flag to force mask in disable_irq[_nosync]()
authorThomas Gleixner <tglx@linutronix.de>
Fri, 9 Oct 2015 21:28:58 +0000 (23:28 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Sun, 11 Oct 2015 09:33:42 +0000 (11:33 +0200)
If an irq chip does not implement the irq_disable callback, then we
use a lazy approach for disabling the interrupt. That means that the
interrupt is marked disabled, but the interrupt line is not
immediately masked in the interrupt chip. It only becomes masked if
the interrupt is raised while it's marked disabled. We use this to avoid
possibly expensive mask/unmask operations for common case operations.

Unfortunately there are devices which do not allow the interrupt to be
disabled easily at the device level. They are forced to use
disable_irq_nosync(). This can result in taking each interrupt twice.

Instead of enforcing the non lazy mode on all interrupts of a irq
chip, provide a settings flag, which can be set by the driver for that
particular interrupt line.

Reported-and-tested-by: Duc Dang <dhdang@apm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Jason Cooper <jason@lakedaemon.net>
Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1510092348370.6097@nanos
include/linux/irq.h
kernel/irq/chip.c
kernel/irq/manage.c
kernel/irq/settings.h

index ba72b60b57b1f38e753918ff9a9dcbacf8daadf5..3c1c96786248cb02d2d9792cce455892323ef275 100644 (file)
@@ -72,6 +72,7 @@ enum irqchip_irq_state;
  * IRQ_IS_POLLED               - Always polled by another interrupt. Exclude
  *                               it from the spurious interrupt detection
  *                               mechanism and from core side polling.
+ * IRQ_DISABLE_UNLAZY          - Disable lazy irq disable
  */
 enum {
        IRQ_TYPE_NONE           = 0x00000000,
@@ -97,13 +98,14 @@ enum {
        IRQ_NOTHREAD            = (1 << 16),
        IRQ_PER_CPU_DEVID       = (1 << 17),
        IRQ_IS_POLLED           = (1 << 18),
+       IRQ_DISABLE_UNLAZY      = (1 << 19),
 };
 
 #define IRQF_MODIFY_MASK       \
        (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
         IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
         IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-        IRQ_IS_POLLED)
+        IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
 
 #define IRQ_NO_BALANCING_MASK  (IRQ_PER_CPU | IRQ_NO_BALANCING)
 
index 4aa00d325b8c8974e36b797ddde0a49848b5794d..15206453b12aab09cf96dd09cc3fa2da92fd9c14 100644 (file)
@@ -241,6 +241,13 @@ void irq_enable(struct irq_desc *desc)
  * disabled. If an interrupt happens, then the interrupt flow
  * handler masks the line at the hardware level and marks it
  * pending.
+ *
+ * If the interrupt chip does not implement the irq_disable callback,
+ * a driver can disable the lazy approach for a particular irq line by
+ * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
+ * be used for devices which cannot disable the interrupt at the
+ * device level under certain circumstances and have to use
+ * disable_irq[_nosync] instead.
  */
 void irq_disable(struct irq_desc *desc)
 {
@@ -248,6 +255,8 @@ void irq_disable(struct irq_desc *desc)
        if (desc->irq_data.chip->irq_disable) {
                desc->irq_data.chip->irq_disable(&desc->irq_data);
                irq_state_set_masked(desc);
+       } else if (irq_settings_disable_unlazy(desc)) {
+               mask_irq(desc);
        }
 }
 
index 312f9cb1280567ff0efc1523069483ea333d63c3..a71175ff98d58f7c274786cadd5b6b520678c665 100644 (file)
@@ -1463,6 +1463,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 
        /* If this was the last handler, shut down the IRQ line: */
        if (!desc->action) {
+               irq_settings_clr_disable_unlazy(desc);
                irq_shutdown(desc);
                irq_release_resources(desc);
        }
index 3320b84cc60f79ac09501d6f4d55acc01b0cebe6..320579d8909100d1ebb181510dd8ce4433ab5456 100644 (file)
@@ -15,6 +15,7 @@ enum {
        _IRQ_NESTED_THREAD      = IRQ_NESTED_THREAD,
        _IRQ_PER_CPU_DEVID      = IRQ_PER_CPU_DEVID,
        _IRQ_IS_POLLED          = IRQ_IS_POLLED,
+       _IRQ_DISABLE_UNLAZY     = IRQ_DISABLE_UNLAZY,
        _IRQF_MODIFY_MASK       = IRQF_MODIFY_MASK,
 };
 
@@ -28,6 +29,7 @@ enum {
 #define IRQ_NESTED_THREAD      GOT_YOU_MORON
 #define IRQ_PER_CPU_DEVID      GOT_YOU_MORON
 #define IRQ_IS_POLLED          GOT_YOU_MORON
+#define IRQ_DISABLE_UNLAZY     GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK       GOT_YOU_MORON
 
@@ -154,3 +156,13 @@ static inline bool irq_settings_is_polled(struct irq_desc *desc)
 {
        return desc->status_use_accessors & _IRQ_IS_POLLED;
 }
+
+static inline bool irq_settings_disable_unlazy(struct irq_desc *desc)
+{
+       return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY;
+}
+
+static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc)
+{
+       desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY;
+}