arm64: rockchip_defconfig: enable CC_STACKPROTECTOR_STRONG
[firefly-linux-kernel-4.4.55.git] / kernel / irq / manage.c
index fa17855ca65a235bd5c1f9a7daa042b57722b924..6ead200370da0cfe15b9c6732ec5ec4e3a7523a9 100644 (file)
@@ -32,24 +32,10 @@ static int __init setup_forced_irqthreads(char *arg)
 early_param("threadirqs", setup_forced_irqthreads);
 #endif
 
-/**
- *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
- *     @irq: interrupt number to wait for
- *
- *     This function waits for any pending IRQ handlers for this interrupt
- *     to complete before returning. If you use this function while
- *     holding a resource the IRQ handler may need you will deadlock.
- *
- *     This function may be called - with care - from IRQ context.
- */
-void synchronize_irq(unsigned int irq)
+static void __synchronize_hardirq(struct irq_desc *desc)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        bool inprogress;
 
-       if (!desc)
-               return;
-
        do {
                unsigned long flags;
 
@@ -67,18 +53,76 @@ void synchronize_irq(unsigned int irq)
 
                /* Oops, that failed? */
        } while (inprogress);
+}
 
-       /*
-        * We made sure that no hardirq handler is running. Now verify
-        * that no threaded handlers are active.
-        */
-       wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
+/**
+ *     synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
+ *     @irq: interrupt number to wait for
+ *
+ *     This function waits for any pending hard IRQ handlers for this
+ *     interrupt to complete before returning. If you use this
+ *     function while holding a resource the IRQ handler may need you
+ *     will deadlock. It does not take associated threaded handlers
+ *     into account.
+ *
+ *     Do not use this for shutdown scenarios where you must be sure
+ *     that all parts (hardirq and threaded handler) have completed.
+ *
+ *     Returns: false if a threaded handler is active.
+ *
+ *     This function may be called - with care - from IRQ context.
+ */
+bool synchronize_hardirq(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (desc) {
+               __synchronize_hardirq(desc);
+               return !atomic_read(&desc->threads_active);
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(synchronize_hardirq);
+
+/**
+ *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
+ *     @irq: interrupt number to wait for
+ *
+ *     This function waits for any pending IRQ handlers for this interrupt
+ *     to complete before returning. If you use this function while
+ *     holding a resource the IRQ handler may need you will deadlock.
+ *
+ *     This function may be called - with care - from IRQ context.
+ */
+void synchronize_irq(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (desc) {
+               __synchronize_hardirq(desc);
+               /*
+                * We made sure that no hardirq handler is
+                * running. Now verify that no threaded handlers are
+                * active.
+                */
+               wait_event(desc->wait_for_threads,
+                          !atomic_read(&desc->threads_active));
+       }
 }
 EXPORT_SYMBOL(synchronize_irq);
 
 #ifdef CONFIG_SMP
 cpumask_var_t irq_default_affinity;
 
+static int __irq_can_set_affinity(struct irq_desc *desc)
+{
+       if (!desc || !irqd_can_balance(&desc->irq_data) ||
+           !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
+               return 0;
+       return 1;
+}
+
 /**
  *     irq_can_set_affinity - Check if the affinity of a given irq can be set
  *     @irq:           Interrupt to check
@@ -86,13 +130,7 @@ cpumask_var_t irq_default_affinity;
  */
 int irq_can_set_affinity(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       if (!desc || !irqd_can_balance(&desc->irq_data) ||
-           !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
-               return 0;
-
-       return 1;
+       return __irq_can_set_affinity(irq_to_desc(irq));
 }
 
 /**
@@ -150,10 +188,11 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        int ret;
 
-       ret = chip->irq_set_affinity(data, mask, false);
+       ret = chip->irq_set_affinity(data, mask, force);
        switch (ret) {
        case IRQ_SET_MASK_OK:
-               cpumask_copy(data->affinity, mask);
+       case IRQ_SET_MASK_OK_DONE:
+               cpumask_copy(desc->irq_common_data.affinity, mask);
        case IRQ_SET_MASK_OK_NOCOPY:
                irq_set_thread_affinity(desc);
                ret = 0;
@@ -162,7 +201,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        return ret;
 }
 
-int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+                           bool force)
 {
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        struct irq_desc *desc = irq_data_to_desc(data);
@@ -172,7 +212,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
                return -EINVAL;
 
        if (irq_can_move_pcntxt(data)) {
-               ret = irq_do_set_affinity(data, mask, false);
+               ret = irq_do_set_affinity(data, mask, force);
        } else {
                irqd_set_move_pending(data);
                irq_copy_pending(desc, mask);
@@ -187,13 +227,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
        return ret;
 }
 
-/**
- *     irq_set_affinity - Set the irq affinity of a given irq
- *     @irq:           Interrupt to set affinity
- *     @mask:          cpumask
- *
- */
-int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
+int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
@@ -203,7 +237,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
                return -EINVAL;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
+       ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        return ret;
 }
@@ -217,6 +251,9 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
                return -EINVAL;
        desc->affinity_hint = m;
        irq_put_desc_unlock(desc, flags);
+       /* set the initial affinity to prevent every interrupt being on CPU0 */
+       if (m)
+               __irq_set_affinity(irq, m, false);
        return 0;
 }
 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
@@ -236,7 +273,7 @@ static void irq_affinity_notify(struct work_struct *work)
        if (irq_move_pending(&desc->irq_data))
                irq_get_pending(cpumask, desc);
        else
-               cpumask_copy(cpumask, desc->irq_data.affinity);
+               cpumask_copy(cpumask, desc->irq_common_data.affinity);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
        notify->notify(notify, cpumask);
@@ -293,14 +330,13 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 /*
  * Generic version of the affinity autoselector.
  */
-static int
-setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
+static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 {
        struct cpumask *set = irq_default_affinity;
-       int node = desc->irq_data.node;
+       int node = irq_desc_get_node(desc);
 
        /* Excludes PER_CPU and NO_BALANCE interrupts */
-       if (!irq_can_set_affinity(irq))
+       if (!__irq_can_set_affinity(desc))
                return 0;
 
        /*
@@ -308,9 +344,9 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
         * one of the targets is online.
         */
        if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
-               if (cpumask_intersects(desc->irq_data.affinity,
+               if (cpumask_intersects(desc->irq_common_data.affinity,
                                       cpu_online_mask))
-                       set = desc->irq_data.affinity;
+                       set = desc->irq_common_data.affinity;
                else
                        irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
        }
@@ -327,10 +363,10 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
        return 0;
 }
 #else
-static inline int
-setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
+/* Wrapper for ALPHA specific affinity selector magic */
+static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
 {
-       return irq_select_affinity(irq);
+       return irq_select_affinity(irq_desc_get_irq(d));
 }
 #endif
 
@@ -344,27 +380,52 @@ int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
        int ret;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       ret = setup_affinity(irq, desc, mask);
+       ret = setup_affinity(desc, mask);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        return ret;
 }
 
 #else
 static inline int
-setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
+setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 {
        return 0;
 }
 #endif
 
-void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
+/**
+ *     irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
+ *     @irq: interrupt number to set affinity
+ *     @vcpu_info: vCPU specific data
+ *
+ *     This function uses the vCPU specific data to set the vCPU
+ *     affinity for an irq. The vCPU specific data is passed from
+ *     outside, such as KVM. One example code path is as below:
+ *     KVM -> IOMMU -> irq_set_vcpu_affinity().
+ */
+int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
 {
-       if (suspend) {
-               if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
-                       return;
-               desc->istate |= IRQS_SUSPENDED;
-       }
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+       struct irq_data *data;
+       struct irq_chip *chip;
+       int ret = -ENOSYS;
 
+       if (!desc)
+               return -EINVAL;
+
+       data = irq_desc_get_irq_data(desc);
+       chip = irq_data_get_irq_chip(data);
+       if (chip && chip->irq_set_vcpu_affinity)
+               ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
+       irq_put_desc_unlock(desc, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
+
+void __disable_irq(struct irq_desc *desc)
+{
        if (!desc->depth++)
                irq_disable(desc);
 }
@@ -376,7 +437,7 @@ static int __disable_irq_nosync(unsigned int irq)
 
        if (!desc)
                return -EINVAL;
-       __disable_irq(desc, irq, false);
+       __disable_irq(desc);
        irq_put_desc_busunlock(desc, flags);
        return 0;
 }
@@ -417,24 +478,39 @@ void disable_irq(unsigned int irq)
 }
 EXPORT_SYMBOL(disable_irq);
 
-void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
+/**
+ *     disable_hardirq - disables an irq and waits for hardirq completion
+ *     @irq: Interrupt to disable
+ *
+ *     Disable the selected interrupt line.  Enables and Disables are
+ *     nested.
+ *     This function waits for any pending hard IRQ handlers for this
+ *     interrupt to complete before returning. If you use this function while
+ *     holding a resource the hard IRQ handler may need you will deadlock.
+ *
+ *     When used to optimistically disable an interrupt from atomic context
+ *     the return value must be checked.
+ *
+ *     Returns: false if a threaded handler is active.
+ *
+ *     This function may be called - with care - from IRQ context.
+ */
+bool disable_hardirq(unsigned int irq)
 {
-       if (resume) {
-               if (!(desc->istate & IRQS_SUSPENDED)) {
-                       if (!desc->action)
-                               return;
-                       if (!(desc->action->flags & IRQF_FORCE_RESUME))
-                               return;
-                       /* Pretend that it got disabled ! */
-                       desc->depth++;
-               }
-               desc->istate &= ~IRQS_SUSPENDED;
-       }
+       if (!__disable_irq_nosync(irq))
+               return synchronize_hardirq(irq);
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(disable_hardirq);
 
+void __enable_irq(struct irq_desc *desc)
+{
        switch (desc->depth) {
        case 0:
  err_out:
-               WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
+               WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
+                    irq_desc_get_irq(desc));
                break;
        case 1: {
                if (desc->istate & IRQS_SUSPENDED)
@@ -442,7 +518,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
                /* Prevent probing on this irq: */
                irq_settings_set_noprobe(desc);
                irq_enable(desc);
-               check_irq_resend(desc, irq);
+               check_irq_resend(desc);
                /* fall-through */
        }
        default:
@@ -472,7 +548,7 @@ void enable_irq(unsigned int irq)
                 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
                goto out;
 
-       __enable_irq(desc, irq, false);
+       __enable_irq(desc);
 out:
        irq_put_desc_busunlock(desc, flags);
 }
@@ -555,16 +631,15 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
                return 0;
 
        if (irq_settings_can_request(desc)) {
-               if (desc->action)
-                       if (irqflags & desc->action->flags & IRQF_SHARED)
-                               canrequest =1;
+               if (!desc->action ||
+                   irqflags & desc->action->flags & IRQF_SHARED)
+                       canrequest = 1;
        }
        irq_put_desc_unlock(desc, flags);
        return canrequest;
 }
 
-int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
-                     unsigned long flags)
+int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
 {
        struct irq_chip *chip = desc->irq_data.chip;
        int ret, unmask = 0;
@@ -574,7 +649,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                 * IRQF_TRIGGER_* but the PIC does not support multiple
                 * flow-types?
                 */
-               pr_debug("No set_type function for IRQ %d (%s)\n", irq,
+               pr_debug("No set_type function for IRQ %d (%s)\n",
+                        irq_desc_get_irq(desc),
                         chip ? (chip->name ? : "unknown") : "unknown");
                return 0;
        }
@@ -593,6 +669,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 
        switch (ret) {
        case IRQ_SET_MASK_OK:
+       case IRQ_SET_MASK_OK_DONE:
                irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
                irqd_set(&desc->irq_data, flags);
 
@@ -610,7 +687,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                break;
        default:
                pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
-                      flags, irq, chip->irq_set_type);
+                      flags, irq_desc_get_irq(desc), chip->irq_set_type);
        }
        if (unmask)
                unmask_irq(desc);
@@ -653,6 +730,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
+static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
+{
+       WARN(1, "Secondary action handler called for irq %d\n", irq);
+       return IRQ_NONE;
+}
+
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
        set_current_state(TASK_INTERRUPTIBLE);
@@ -679,7 +762,8 @@ static int irq_wait_for_interrupt(struct irqaction *action)
 static void irq_finalize_oneshot(struct irq_desc *desc,
                                 struct irqaction *action)
 {
-       if (!(desc->istate & IRQS_ONESHOT))
+       if (!(desc->istate & IRQS_ONESHOT) ||
+           action->handler == irq_forced_secondary_handler)
                return;
 again:
        chip_bus_lock(desc);
@@ -718,7 +802,7 @@ again:
 
        if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
            irqd_irq_masked(&desc->irq_data))
-               unmask_irq(desc);
+               unmask_threaded_irq(desc);
 
 out_unlock:
        raw_spin_unlock_irq(&desc->lock);
@@ -727,7 +811,7 @@ out_unlock:
 
 #ifdef CONFIG_SMP
 /*
- * Check whether we need to chasnge the affinity of the interrupt thread.
+ * Check whether we need to change the affinity of the interrupt thread.
  */
 static void
 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
@@ -752,8 +836,8 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
         * This code is triggered unconditionally. Check the affinity
         * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
         */
-       if (desc->irq_data.affinity)
-               cpumask_copy(mask, desc->irq_data.affinity);
+       if (desc->irq_common_data.affinity)
+               cpumask_copy(mask, desc->irq_common_data.affinity);
        else
                valid = false;
        raw_spin_unlock_irq(&desc->lock);
@@ -786,7 +870,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 }
 
 /*
- * Interrupts explicitely requested as threaded interupts want to be
+ * Interrupts explicitly requested as threaded interrupts want to be
  * preemtible - many of them need to sleep and wait for slow busses to
  * complete.
  */
@@ -802,8 +886,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 
 static void wake_threads_waitq(struct irq_desc *desc)
 {
-       if (atomic_dec_and_test(&desc->threads_active) &&
-           waitqueue_active(&desc->wait_for_threads))
+       if (atomic_dec_and_test(&desc->threads_active))
                wake_up(&desc->wait_for_threads);
 }
 
@@ -834,15 +917,24 @@ static void irq_thread_dtor(struct callback_head *unused)
        irq_finalize_oneshot(desc, action);
 }
 
+static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
+{
+       struct irqaction *secondary = action->secondary;
+
+       if (WARN_ON_ONCE(!secondary))
+               return;
+
+       raw_spin_lock_irq(&desc->lock);
+       __irq_wake_thread(desc, secondary);
+       raw_spin_unlock_irq(&desc->lock);
+}
+
 /*
  * Interrupt handler thread
  */
 static int irq_thread(void *data)
 {
        struct callback_head on_exit_work;
-       static const struct sched_param param = {
-               .sched_priority = MAX_USER_RT_PRIO/2,
-       };
        struct irqaction *action = data;
        struct irq_desc *desc = irq_to_desc(action->irq);
        irqreturn_t (*handler_fn)(struct irq_desc *desc,
@@ -854,8 +946,6 @@ static int irq_thread(void *data)
        else
                handler_fn = irq_thread_fn;
 
-       sched_setscheduler(current, SCHED_FIFO, &param);
-
        init_task_work(&on_exit_work, irq_thread_dtor);
        task_work_add(current, &on_exit_work, false);
 
@@ -867,8 +957,10 @@ static int irq_thread(void *data)
                irq_thread_check_affinity(desc, action);
 
                action_ret = handler_fn(desc, action);
-               if (!noirqdebug)
-                       note_interrupt(action->irq, desc, action_ret);
+               if (action_ret == IRQ_HANDLED)
+                       atomic_inc(&desc->threads_handled);
+               if (action_ret == IRQ_WAKE_THREAD)
+                       irq_wake_secondary(desc, action);
 
                wake_threads_waitq(desc);
        }
@@ -886,20 +978,122 @@ static int irq_thread(void *data)
        return 0;
 }
 
-static void irq_setup_forced_threading(struct irqaction *new)
+/**
+ *     irq_wake_thread - wake the irq thread for the action identified by dev_id
+ *     @irq:           Interrupt line
+ *     @dev_id:        Device identity for which the thread should be woken
+ *
+ */
+void irq_wake_thread(unsigned int irq, void *dev_id)
 {
-       if (!force_irqthreads)
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irqaction *action;
+       unsigned long flags;
+
+       if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
                return;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       for (action = desc->action; action; action = action->next) {
+               if (action->dev_id == dev_id) {
+                       if (action->thread)
+                               __irq_wake_thread(desc, action);
+                       break;
+               }
+       }
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(irq_wake_thread);
+
+static int irq_setup_forced_threading(struct irqaction *new)
+{
+       if (!force_irqthreads)
+               return 0;
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
-               return;
+               return 0;
 
        new->flags |= IRQF_ONESHOT;
 
-       if (!new->thread_fn) {
-               set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
-               new->thread_fn = new->handler;
-               new->handler = irq_default_primary_handler;
+       /*
+        * Handle the case where we have a real primary handler and a
+        * thread handler. We force thread them as well by creating a
+        * secondary action.
+        */
+       if (new->handler != irq_default_primary_handler && new->thread_fn) {
+               /* Allocate the secondary action */
+               new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+               if (!new->secondary)
+                       return -ENOMEM;
+               new->secondary->handler = irq_forced_secondary_handler;
+               new->secondary->thread_fn = new->thread_fn;
+               new->secondary->dev_id = new->dev_id;
+               new->secondary->irq = new->irq;
+               new->secondary->name = new->name;
        }
+       /* Deal with the primary handler */
+       set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+       new->thread_fn = new->handler;
+       new->handler = irq_default_primary_handler;
+       return 0;
+}
+
+static int irq_request_resources(struct irq_desc *desc)
+{
+       struct irq_data *d = &desc->irq_data;
+       struct irq_chip *c = d->chip;
+
+       return c->irq_request_resources ? c->irq_request_resources(d) : 0;
+}
+
+static void irq_release_resources(struct irq_desc *desc)
+{
+       struct irq_data *d = &desc->irq_data;
+       struct irq_chip *c = d->chip;
+
+       if (c->irq_release_resources)
+               c->irq_release_resources(d);
+}
+
+static int
+setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
+{
+       struct task_struct *t;
+       struct sched_param param = {
+               .sched_priority = MAX_USER_RT_PRIO/2,
+       };
+
+       if (!secondary) {
+               t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+                                  new->name);
+       } else {
+               t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
+                                  new->name);
+               param.sched_priority -= 1;
+       }
+
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+
+       sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
+
+       /*
+        * We keep the reference to the task struct even if
+        * the thread dies to avoid that the interrupt code
+        * references an already freed task_struct.
+        */
+       get_task_struct(t);
+       new->thread = t;
+       /*
+        * Tell the thread to set its affinity. This is
+        * important for shared interrupt handlers as we do
+        * not invoke setup_affinity() for the secondary
+        * handlers as everything is already set up. Even for
+        * interrupts marked with IRQF_NO_BALANCE this is
+        * correct as we want the thread to move to the cpu(s)
+        * on which the requesting code placed the interrupt.
+        */
+       set_bit(IRQTF_AFFINITY, &new->thread_flags);
+       return 0;
 }
 
 /*
@@ -922,6 +1116,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        if (!try_module_get(desc->owner))
                return -ENODEV;
 
+       new->irq = irq;
+
        /*
         * Check whether the interrupt nests into another interrupt
         * thread.
@@ -939,8 +1135,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 */
                new->handler = irq_nested_primary_handler;
        } else {
-               if (irq_settings_can_thread(desc))
-                       irq_setup_forced_threading(new);
+               if (irq_settings_can_thread(desc)) {
+                       ret = irq_setup_forced_threading(new);
+                       if (ret)
+                               goto out_mput;
+               }
        }
 
        /*
@@ -949,31 +1148,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         * thread.
         */
        if (new->thread_fn && !nested) {
-               struct task_struct *t;
-
-               t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
-                                  new->name);
-               if (IS_ERR(t)) {
-                       ret = PTR_ERR(t);
+               ret = setup_irq_thread(new, irq, false);
+               if (ret)
                        goto out_mput;
+               if (new->secondary) {
+                       ret = setup_irq_thread(new->secondary, irq, true);
+                       if (ret)
+                               goto out_thread;
                }
-               /*
-                * We keep the reference to the task struct even if
-                * the thread dies to avoid that the interrupt code
-                * references an already freed task_struct.
-                */
-               get_task_struct(t);
-               new->thread = t;
-               /*
-                * Tell the thread to set its affinity. This is
-                * important for shared interrupt handlers as we do
-                * not invoke setup_affinity() for the secondary
-                * handlers as everything is already set up. Even for
-                * interrupts marked with IRQF_NO_BALANCE this is
-                * correct as we want the thread to move to the cpu(s)
-                * on which the requesting code placed the interrupt.
-                */
-               set_bit(IRQTF_AFFINITY, &new->thread_flags);
        }
 
        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
@@ -1091,12 +1273,19 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        }
 
        if (!shared) {
+               ret = irq_request_resources(desc);
+               if (ret) {
+                       pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
+                              new->name, irq, desc->irq_data.chip->name);
+                       goto out_mask;
+               }
+
                init_waitqueue_head(&desc->wait_for_threads);
 
                /* Setup the type (level, edge polarity) if configured: */
                if (new->flags & IRQF_TRIGGER_MASK) {
-                       ret = __irq_set_trigger(desc, irq,
-                                       new->flags & IRQF_TRIGGER_MASK);
+                       ret = __irq_set_trigger(desc,
+                                               new->flags & IRQF_TRIGGER_MASK);
 
                        if (ret)
                                goto out_mask;
@@ -1127,7 +1316,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                }
 
                /* Set default affinity mask once everything is setup */
-               setup_affinity(irq, desc, mask);
+               setup_affinity(desc, mask);
 
        } else if (new->flags & IRQF_TRIGGER_MASK) {
                unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
@@ -1139,9 +1328,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                                   irq, nmsk, omsk);
        }
 
-       new->irq = irq;
        *old_ptr = new;
 
+       irq_pm_install_action(desc, new);
+
        /* Reset broken irq detection when installing new handler */
        desc->irq_count = 0;
        desc->irqs_unhandled = 0;
@@ -1152,7 +1342,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         */
        if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
                desc->istate &= ~IRQS_SPURIOUS_DISABLED;
-               __enable_irq(desc, irq, false);
+               __enable_irq(desc);
        }
 
        raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -1163,6 +1353,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         */
        if (new->thread)
                wake_up_process(new->thread);
+       if (new->secondary)
+               wake_up_process(new->secondary->thread);
 
        register_irq_proc(irq, desc);
        new->dir = NULL;
@@ -1193,6 +1385,13 @@ out_thread:
                kthread_stop(t);
                put_task_struct(t);
        }
+       if (new->secondary && new->secondary->thread) {
+               struct task_struct *t = new->secondary->thread;
+
+               new->secondary->thread = NULL;
+               kthread_stop(t);
+               put_task_struct(t);
+       }
 out_mput:
        module_put(desc->owner);
        return ret;
@@ -1235,6 +1434,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
        if (!desc)
                return NULL;
 
+       chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
 
        /*
@@ -1248,7 +1448,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
                if (!action) {
                        WARN(1, "Trying to free already-free IRQ %d\n", irq);
                        raw_spin_unlock_irqrestore(&desc->lock, flags);
-
+                       chip_bus_sync_unlock(desc);
                        return NULL;
                }
 
@@ -1260,9 +1460,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
        /* Found it - now remove it from the list of entries: */
        *action_ptr = action->next;
 
+       irq_pm_remove_action(desc, action);
+
        /* If this was the last handler, shut down the IRQ line: */
-       if (!desc->action)
+       if (!desc->action) {
+               irq_settings_clr_disable_unlazy(desc);
                irq_shutdown(desc);
+               irq_release_resources(desc);
+       }
 
 #ifdef CONFIG_SMP
        /* make sure affinity_hint is cleaned up */
@@ -1271,6 +1476,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 #endif
 
        raw_spin_unlock_irqrestore(&desc->lock, flags);
+       chip_bus_sync_unlock(desc);
 
        unregister_handler_proc(irq, action);
 
@@ -1296,9 +1502,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
        if (action->thread) {
                kthread_stop(action->thread);
                put_task_struct(action->thread);
+               if (action->secondary && action->secondary->thread) {
+                       kthread_stop(action->secondary->thread);
+                       put_task_struct(action->secondary->thread);
+               }
        }
 
        module_put(desc->owner);
+       kfree(action->secondary);
        return action;
 }
 
@@ -1344,9 +1555,7 @@ void free_irq(unsigned int irq, void *dev_id)
                desc->affinity_notify = NULL;
 #endif
 
-       chip_bus_lock(desc);
        kfree(__free_irq(irq, dev_id));
-       chip_bus_sync_unlock(desc);
 }
 EXPORT_SYMBOL(free_irq);
 
@@ -1405,8 +1614,13 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
         * otherwise we'll have trouble later trying to figure out
         * which interrupt is which (messes up the interrupt freeing
         * logic etc).
+        *
+        * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
+        * it cannot be set along with IRQF_NO_SUSPEND.
         */
-       if ((irqflags & IRQF_SHARED) && !dev_id)
+       if (((irqflags & IRQF_SHARED) && !dev_id) ||
+           (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
+           ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
                return -EINVAL;
 
        desc = irq_to_desc(irq);
@@ -1437,8 +1651,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        retval = __setup_irq(irq, desc, action);
        chip_bus_sync_unlock(desc);
 
-       if (retval)
+       if (retval) {
+               kfree(action->secondary);
                kfree(action);
+       }
 
 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
        if (!retval && (irqflags & IRQF_SHARED)) {
@@ -1513,7 +1729,7 @@ void enable_percpu_irq(unsigned int irq, unsigned int type)
        if (type != IRQ_TYPE_NONE) {
                int ret;
 
-               ret = __irq_set_trigger(desc, irq, type);
+               ret = __irq_set_trigger(desc, type);
 
                if (ret) {
                        WARN(1, "failed to set type for IRQ%d\n", irq);
@@ -1622,6 +1838,7 @@ void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
        kfree(__free_percpu_irq(irq, dev_id));
        chip_bus_sync_unlock(desc);
 }
+EXPORT_SYMBOL_GPL(free_percpu_irq);
 
 /**
  *     setup_percpu_irq - setup a per-cpu interrupt
@@ -1651,9 +1868,10 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
  *     @devname: An ascii name for the claiming device
  *     @dev_id: A percpu cookie passed back to the handler function
  *
- *     This call allocates interrupt resources, but doesn't
- *     automatically enable the interrupt. It has to be done on each
- *     CPU using enable_percpu_irq().
+ *     This call allocates interrupt resources and enables the
+ *     interrupt on the local CPU. If the interrupt is supposed to be
+ *     enabled on other CPUs, it has to be done on each CPU using
+ *     enable_percpu_irq().
  *
  *     Dev_id must be globally unique. It is a per-cpu variable, and
  *     the handler gets called with the interrupted CPU's instance of
@@ -1692,3 +1910,97 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
 
        return retval;
 }
+EXPORT_SYMBOL_GPL(request_percpu_irq);
+
+/**
+ *     irq_get_irqchip_state - returns the irqchip state of a interrupt.
+ *     @irq: Interrupt line that is forwarded to a VM
+ *     @which: One of IRQCHIP_STATE_* the caller wants to know about
+ *     @state: a pointer to a boolean where the state is to be storeed
+ *
+ *     This call snapshots the internal irqchip state of an
+ *     interrupt, returning into @state the bit corresponding to
+ *     stage @which
+ *
+ *     This function should be called with preemption disabled if the
+ *     interrupt controller has per-cpu registers.
+ */
+int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+                         bool *state)
+{
+       struct irq_desc *desc;
+       struct irq_data *data;
+       struct irq_chip *chip;
+       unsigned long flags;
+       int err = -EINVAL;
+
+       desc = irq_get_desc_buslock(irq, &flags, 0);
+       if (!desc)
+               return err;
+
+       data = irq_desc_get_irq_data(desc);
+
+       do {
+               chip = irq_data_get_irq_chip(data);
+               if (chip->irq_get_irqchip_state)
+                       break;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+               data = data->parent_data;
+#else
+               data = NULL;
+#endif
+       } while (data);
+
+       if (data)
+               err = chip->irq_get_irqchip_state(data, which, state);
+
+       irq_put_desc_busunlock(desc, flags);
+       return err;
+}
+EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
+
+/**
+ *     irq_set_irqchip_state - set the state of a forwarded interrupt.
+ *     @irq: Interrupt line that is forwarded to a VM
+ *     @which: State to be restored (one of IRQCHIP_STATE_*)
+ *     @val: Value corresponding to @which
+ *
+ *     This call sets the internal irqchip state of an interrupt,
+ *     depending on the value of @which.
+ *
+ *     This function should be called with preemption disabled if the
+ *     interrupt controller has per-cpu registers.
+ */
+int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+                         bool val)
+{
+       struct irq_desc *desc;
+       struct irq_data *data;
+       struct irq_chip *chip;
+       unsigned long flags;
+       int err = -EINVAL;
+
+       desc = irq_get_desc_buslock(irq, &flags, 0);
+       if (!desc)
+               return err;
+
+       data = irq_desc_get_irq_data(desc);
+
+       do {
+               chip = irq_data_get_irq_chip(data);
+               if (chip->irq_set_irqchip_state)
+                       break;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+               data = data->parent_data;
+#else
+               data = NULL;
+#endif
+       } while (data);
+
+       if (data)
+               err = chip->irq_set_irqchip_state(data, which, val);
+
+       irq_put_desc_busunlock(desc, flags);
+       return err;
+}
+EXPORT_SYMBOL_GPL(irq_set_irqchip_state);