Merge branch 'x86/asm' into x86/apic, to resolve a conflict
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / apic / vector.c
index d0e5ea0fb9475ccb421cd1f344c150b8fa405739..2766747e1a3b316d65be99b0d3f6feb4a021a533 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/compiler.h>
-#include <linux/irqdomain.h>
 #include <linux/slab.h>
+#include <asm/irqdomain.h>
 #include <asm/hw_irq.h>
 #include <asm/apic.h>
 #include <asm/i8259.h>
 #include <asm/desc.h>
 #include <asm/irq_remapping.h>
 
+struct apic_chip_data {
+       struct irq_cfg          cfg;
+       cpumask_var_t           domain;
+       cpumask_var_t           old_domain;
+       u8                      move_in_progress : 1;
+};
+
 struct irq_domain *x86_vector_domain;
 static DEFINE_RAW_SPINLOCK(vector_lock);
+static cpumask_var_t vector_cpumask;
 static struct irq_chip lapic_controller;
 #ifdef CONFIG_X86_IO_APIC
-static struct irq_cfg *legacy_irq_cfgs[NR_IRQS_LEGACY];
+static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
 #endif
 
 void lock_vector_lock(void)
@@ -41,12 +49,7 @@ void unlock_vector_lock(void)
        raw_spin_unlock(&vector_lock);
 }
 
-struct irq_cfg *irq_cfg(unsigned int irq)
-{
-       return irqd_cfg(irq_get_irq_data(irq));
-}
-
-struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
+static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
 {
        if (!irq_data)
                return NULL;
@@ -57,60 +60,48 @@ struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
        return irq_data->chip_data;
 }
 
-static struct irq_cfg *alloc_irq_cfg(int node)
+struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
 {
-       struct irq_cfg *cfg;
+       struct apic_chip_data *data = apic_chip_data(irq_data);
 
-       cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
-       if (!cfg)
-               return NULL;
-       if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
-               goto out_cfg;
-       if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
-               goto out_domain;
-#ifdef CONFIG_X86_IO_APIC
-       INIT_LIST_HEAD(&cfg->irq_2_pin);
-#endif
-       return cfg;
-out_domain:
-       free_cpumask_var(cfg->domain);
-out_cfg:
-       kfree(cfg);
-       return NULL;
+       return data ? &data->cfg : NULL;
 }
 
-struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
+struct irq_cfg *irq_cfg(unsigned int irq)
 {
-       int res = irq_alloc_desc_at(at, node);
-       struct irq_cfg *cfg;
+       return irqd_cfg(irq_get_irq_data(irq));
+}
 
-       if (res < 0) {
-               if (res != -EEXIST)
-                       return NULL;
-               cfg = irq_cfg(at);
-               if (cfg)
-                       return cfg;
-       }
+static struct apic_chip_data *alloc_apic_chip_data(int node)
+{
+       struct apic_chip_data *data;
 
-       cfg = alloc_irq_cfg(node);
-       if (cfg)
-               irq_set_chip_data(at, cfg);
-       else
-               irq_free_desc(at);
-       return cfg;
+       data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
+       if (!data)
+               return NULL;
+       if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
+               goto out_data;
+       if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
+               goto out_domain;
+       return data;
+out_domain:
+       free_cpumask_var(data->domain);
+out_data:
+       kfree(data);
+       return NULL;
 }
 
-static void free_irq_cfg(struct irq_cfg *cfg)
+static void free_apic_chip_data(struct apic_chip_data *data)
 {
-       if (cfg) {
-               free_cpumask_var(cfg->domain);
-               free_cpumask_var(cfg->old_domain);
-               kfree(cfg);
+       if (data) {
+               free_cpumask_var(data->domain);
+               free_cpumask_var(data->old_domain);
+               kfree(data);
        }
 }
 
-static int
-__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+static int __assign_irq_vector(int irq, struct apic_chip_data *d,
+                              const struct cpumask *mask)
 {
        /*
         * NOTE! The local APIC isn't very good at handling
@@ -126,36 +117,33 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
        static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
        static int current_offset = VECTOR_OFFSET_START % 16;
        int cpu, err;
-       cpumask_var_t tmp_mask;
 
-       if (cfg->move_in_progress)
+       if (d->move_in_progress)
                return -EBUSY;
 
-       if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
-               return -ENOMEM;
-
        /* Only try and allocate irqs on cpus that are present */
        err = -ENOSPC;
-       cpumask_clear(cfg->old_domain);
+       cpumask_clear(d->old_domain);
        cpu = cpumask_first_and(mask, cpu_online_mask);
        while (cpu < nr_cpu_ids) {
                int new_cpu, vector, offset;
 
-               apic->vector_allocation_domain(cpu, tmp_mask, mask);
+               apic->vector_allocation_domain(cpu, vector_cpumask, mask);
 
-               if (cpumask_subset(tmp_mask, cfg->domain)) {
+               if (cpumask_subset(vector_cpumask, d->domain)) {
                        err = 0;
-                       if (cpumask_equal(tmp_mask, cfg->domain))
+                       if (cpumask_equal(vector_cpumask, d->domain))
                                break;
                        /*
                         * New cpumask using the vector is a proper subset of
                         * the current in use mask. So cleanup the vector
                         * allocation for the members that are not used anymore.
                         */
-                       cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
-                       cfg->move_in_progress =
-                          cpumask_intersects(cfg->old_domain, cpu_online_mask);
-                       cpumask_and(cfg->domain, cfg->domain, tmp_mask);
+                       cpumask_andnot(d->old_domain, d->domain,
+                                      vector_cpumask);
+                       d->move_in_progress =
+                          cpumask_intersects(d->old_domain, cpu_online_mask);
+                       cpumask_and(d->domain, d->domain, vector_cpumask);
                        break;
                }
 
@@ -169,16 +157,18 @@ next:
                }
 
                if (unlikely(current_vector == vector)) {
-                       cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
-                       cpumask_andnot(tmp_mask, mask, cfg->old_domain);
-                       cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
+                       cpumask_or(d->old_domain, d->old_domain,
+                                  vector_cpumask);
+                       cpumask_andnot(vector_cpumask, mask, d->old_domain);
+                       cpu = cpumask_first_and(vector_cpumask,
+                                               cpu_online_mask);
                        continue;
                }
 
                if (test_bit(vector, used_vectors))
                        goto next;
 
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
+               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
                        if (per_cpu(vector_irq, new_cpu)[vector] >
                            VECTOR_UNDEFINED)
                                goto next;
@@ -186,61 +176,61 @@ next:
                /* Found one! */
                current_vector = vector;
                current_offset = offset;
-               if (cfg->vector) {
-                       cpumask_copy(cfg->old_domain, cfg->domain);
-                       cfg->move_in_progress =
-                          cpumask_intersects(cfg->old_domain, cpu_online_mask);
+               if (d->cfg.vector) {
+                       cpumask_copy(d->old_domain, d->domain);
+                       d->move_in_progress =
+                          cpumask_intersects(d->old_domain, cpu_online_mask);
                }
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq;
-               cfg->vector = vector;
-               cpumask_copy(cfg->domain, tmp_mask);
+               d->cfg.vector = vector;
+               cpumask_copy(d->domain, vector_cpumask);
                err = 0;
                break;
        }
-       free_cpumask_var(tmp_mask);
 
        if (!err) {
                /* cache destination APIC IDs into cfg->dest_apicid */
-               err = apic->cpu_mask_to_apicid_and(mask, cfg->domain,
-                                                  &cfg->dest_apicid);
+               err = apic->cpu_mask_to_apicid_and(mask, d->domain,
+                                                  &d->cfg.dest_apicid);
        }
 
        return err;
 }
 
-int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+static int assign_irq_vector(int irq, struct apic_chip_data *data,
+                            const struct cpumask *mask)
 {
        int err;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       err = __assign_irq_vector(irq, cfg, mask);
+       err = __assign_irq_vector(irq, data, mask);
        raw_spin_unlock_irqrestore(&vector_lock, flags);
        return err;
 }
 
-void clear_irq_vector(int irq, struct irq_cfg *cfg)
+static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
        int cpu, vector;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       BUG_ON(!cfg->vector);
+       BUG_ON(!data->cfg.vector);
 
-       vector = cfg->vector;
-       for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+       vector = data->cfg.vector;
+       for_each_cpu_and(cpu, data->domain, cpu_online_mask)
                per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
 
-       cfg->vector = 0;
-       cpumask_clear(cfg->domain);
+       data->cfg.vector = 0;
+       cpumask_clear(data->domain);
 
-       if (likely(!cfg->move_in_progress)) {
+       if (likely(!data->move_in_progress)) {
                raw_spin_unlock_irqrestore(&vector_lock, flags);
                return;
        }
 
-       for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+       for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
                for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
                     vector++) {
                        if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -249,7 +239,7 @@ void clear_irq_vector(int irq, struct irq_cfg *cfg)
                        break;
                }
        }
-       cfg->move_in_progress = 0;
+       data->move_in_progress = 0;
        raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
@@ -283,12 +273,11 @@ static void x86_vector_free_irqs(struct irq_domain *domain,
        for (i = 0; i < nr_irqs; i++) {
                irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
                if (irq_data && irq_data->chip_data) {
-                       free_remapped_irq(virq);
                        clear_irq_vector(virq + i, irq_data->chip_data);
-                       free_irq_cfg(irq_data->chip_data);
+                       free_apic_chip_data(irq_data->chip_data);
 #ifdef CONFIG_X86_IO_APIC
                        if (virq + i < nr_legacy_irqs())
-                               legacy_irq_cfgs[virq + i] = NULL;
+                               legacy_irq_data[virq + i] = NULL;
 #endif
                        irq_domain_reset_irq_data(irq_data);
                }
@@ -299,9 +288,9 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
                                 unsigned int nr_irqs, void *arg)
 {
        struct irq_alloc_info *info = arg;
+       struct apic_chip_data *data;
        const struct cpumask *mask;
        struct irq_data *irq_data;
-       struct irq_cfg *cfg;
        int i, err;
 
        if (disable_apic)
@@ -316,20 +305,20 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
                irq_data = irq_domain_get_irq_data(domain, virq + i);
                BUG_ON(!irq_data);
 #ifdef CONFIG_X86_IO_APIC
-               if (virq + i < nr_legacy_irqs() && legacy_irq_cfgs[virq + i])
-                       cfg = legacy_irq_cfgs[virq + i];
+               if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
+                       data = legacy_irq_data[virq + i];
                else
 #endif
-                       cfg = alloc_irq_cfg(irq_data->node);
-               if (!cfg) {
+                       data = alloc_apic_chip_data(irq_data->node);
+               if (!data) {
                        err = -ENOMEM;
                        goto error;
                }
 
                irq_data->chip = &lapic_controller;
-               irq_data->chip_data = cfg;
+               irq_data->chip_data = data;
                irq_data->hwirq = virq + i;
-               err = assign_irq_vector(virq, cfg, mask);
+               err = assign_irq_vector(virq, data, mask);
                if (err)
                        goto error;
        }
@@ -341,9 +330,9 @@ error:
        return err;
 }
 
-static struct irq_domain_ops x86_vector_domain_ops = {
-       .alloc = x86_vector_alloc_irqs,
-       .free = x86_vector_free_irqs,
+static const struct irq_domain_ops x86_vector_domain_ops = {
+       .alloc  = x86_vector_alloc_irqs,
+       .free   = x86_vector_free_irqs,
 };
 
 int __init arch_probe_nr_irqs(void)
@@ -373,22 +362,19 @@ int __init arch_probe_nr_irqs(void)
 static void init_legacy_irqs(void)
 {
        int i, node = cpu_to_node(0);
-       struct irq_cfg *cfg;
+       struct apic_chip_data *data;
 
        /*
         * For legacy IRQ's, start with assigning irq0 to irq15 to
-        * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
+        * ISA_IRQ_VECTOR(i) for all cpu's.
         */
        for (i = 0; i < nr_legacy_irqs(); i++) {
-               cfg = legacy_irq_cfgs[i] = alloc_irq_cfg(node);
-               BUG_ON(!cfg);
-               /*
-                * For legacy IRQ's, start with assigning irq0 to irq15 to
-                * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
-                */
-               cfg->vector = IRQ0_VECTOR + i;
-               cpumask_setall(cfg->domain);
-               irq_set_chip_data(i, cfg);
+               data = legacy_irq_data[i] = alloc_apic_chip_data(node);
+               BUG_ON(!data);
+
+               data->cfg.vector = ISA_IRQ_VECTOR(i);
+               cpumask_setall(data->domain);
+               irq_set_chip_data(i, data);
        }
 }
 #else
@@ -407,6 +393,8 @@ int __init arch_early_irq_init(void)
        arch_init_msi_domain(x86_vector_domain);
        arch_init_htirq_domain(x86_vector_domain);
 
+       BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+
        return arch_early_ioapic_init();
 }
 
@@ -414,7 +402,7 @@ static void __setup_vector_irq(int cpu)
 {
        /* Initialize vector_irq on a new cpu */
        int irq, vector;
-       struct irq_cfg *cfg;
+       struct apic_chip_data *data;
 
        /*
         * vector_lock will make sure that we don't run into irq vector
@@ -424,13 +412,13 @@ static void __setup_vector_irq(int cpu)
        raw_spin_lock(&vector_lock);
        /* Mark the inuse vectors */
        for_each_active_irq(irq) {
-               cfg = irq_cfg(irq);
-               if (!cfg)
+               data = apic_chip_data(irq_get_irq_data(irq));
+               if (!data)
                        continue;
 
-               if (!cpumask_test_cpu(cpu, cfg->domain))
+               if (!cpumask_test_cpu(cpu, data->domain))
                        continue;
-               vector = cfg->vector;
+               vector = data->cfg.vector;
                per_cpu(vector_irq, cpu)[vector] = irq;
        }
        /* Mark the free vectors */
@@ -439,8 +427,8 @@ static void __setup_vector_irq(int cpu)
                if (irq <= VECTOR_UNDEFINED)
                        continue;
 
-               cfg = irq_cfg(irq);
-               if (!cpumask_test_cpu(cpu, cfg->domain))
+               data = apic_chip_data(irq_get_irq_data(irq));
+               if (!cpumask_test_cpu(cpu, data->domain))
                        per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
        }
        raw_spin_unlock(&vector_lock);
@@ -461,20 +449,20 @@ void setup_vector_irq(int cpu)
         * legacy vector to irq mapping:
         */
        for (irq = 0; irq < nr_legacy_irqs(); irq++)
-               per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
+               per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq;
 
        __setup_vector_irq(cpu);
 }
 
-int apic_retrigger_irq(struct irq_data *data)
+static int apic_retrigger_irq(struct irq_data *irq_data)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
+       struct apic_chip_data *data = apic_chip_data(irq_data);
        unsigned long flags;
        int cpu;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
-       apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
+       cpu = cpumask_first_and(data->domain, cpu_online_mask);
+       apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
        raw_spin_unlock_irqrestore(&vector_lock, flags);
 
        return 1;
@@ -487,44 +475,10 @@ void apic_ack_edge(struct irq_data *data)
        ack_APIC_irq();
 }
 
-/*
- * Either sets data->affinity to a valid value, and returns
- * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
- * leaves data->affinity untouched.
- */
-int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
-                     unsigned int *dest_id)
+static int apic_set_affinity(struct irq_data *irq_data,
+                            const struct cpumask *dest, bool force)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
-       unsigned int irq = data->irq;
-       int err;
-
-       if (!config_enabled(CONFIG_SMP))
-               return -EPERM;
-
-       if (!cpumask_intersects(mask, cpu_online_mask))
-               return -EINVAL;
-
-       err = assign_irq_vector(irq, cfg, mask);
-       if (err)
-               return err;
-
-       err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
-       if (err) {
-               if (assign_irq_vector(irq, cfg, data->affinity))
-                       pr_err("Failed to recover vector for irq %d\n", irq);
-               return err;
-       }
-
-       cpumask_copy(data->affinity, mask);
-
-       return 0;
-}
-
-static int vector_set_affinity(struct irq_data *irq_data,
-                              const struct cpumask *dest, bool force)
-{
-       struct irq_cfg *cfg = irq_data->chip_data;
+       struct apic_chip_data *data = irq_data->chip_data;
        int err, irq = irq_data->irq;
 
        if (!config_enabled(CONFIG_SMP))
@@ -533,11 +487,11 @@ static int vector_set_affinity(struct irq_data *irq_data,
        if (!cpumask_intersects(dest, cpu_online_mask))
                return -EINVAL;
 
-       err = assign_irq_vector(irq, cfg, dest);
+       err = assign_irq_vector(irq, data, dest);
        if (err) {
                struct irq_data *top = irq_get_irq_data(irq);
 
-               if (assign_irq_vector(irq, cfg, top->affinity))
+               if (assign_irq_vector(irq, data, top->affinity))
                        pr_err("Failed to recover vector for irq %d\n", irq);
                return err;
        }
@@ -547,27 +501,36 @@ static int vector_set_affinity(struct irq_data *irq_data,
 
 static struct irq_chip lapic_controller = {
        .irq_ack                = apic_ack_edge,
-       .irq_set_affinity       = vector_set_affinity,
+       .irq_set_affinity       = apic_set_affinity,
        .irq_retrigger          = apic_retrigger_irq,
 };
 
 #ifdef CONFIG_SMP
-void send_cleanup_vector(struct irq_cfg *cfg)
+static void __send_cleanup_vector(struct apic_chip_data *data)
 {
        cpumask_var_t cleanup_mask;
 
        if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
                unsigned int i;
 
-               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
+               for_each_cpu_and(i, data->old_domain, cpu_online_mask)
                        apic->send_IPI_mask(cpumask_of(i),
                                            IRQ_MOVE_CLEANUP_VECTOR);
        } else {
-               cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
+               cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
                apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
                free_cpumask_var(cleanup_mask);
        }
-       cfg->move_in_progress = 0;
+       data->move_in_progress = 0;
+}
+
+void send_cleanup_vector(struct irq_cfg *cfg)
+{
+       struct apic_chip_data *data;
+
+       data = container_of(cfg, struct apic_chip_data, cfg);
+       if (data->move_in_progress)
+               __send_cleanup_vector(data);
 }
 
 asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
@@ -583,7 +546,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                int irq;
                unsigned int irr;
                struct irq_desc *desc;
-               struct irq_cfg *cfg;
+               struct apic_chip_data *data;
 
                irq = __this_cpu_read(vector_irq[vector]);
 
@@ -594,8 +557,8 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                if (!desc)
                        continue;
 
-               cfg = irq_cfg(irq);
-               if (!cfg)
+               data = apic_chip_data(&desc->irq_data);
+               if (!data)
                        continue;
 
                raw_spin_lock(&desc->lock);
@@ -604,10 +567,11 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                 * Check if the irq migration is in progress. If so, we
                 * haven't received the cleanup request yet for this irq.
                 */
-               if (cfg->move_in_progress)
+               if (data->move_in_progress)
                        goto unlock;
 
-               if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+               if (vector == data->cfg.vector &&
+                   cpumask_test_cpu(me, data->domain))
                        goto unlock;
 
                irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
@@ -633,14 +597,15 @@ unlock:
 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
 {
        unsigned me;
+       struct apic_chip_data *data;
 
-       if (likely(!cfg->move_in_progress))
+       data = container_of(cfg, struct apic_chip_data, cfg);
+       if (likely(!data->move_in_progress))
                return;
 
        me = smp_processor_id();
-
-       if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
-               send_cleanup_vector(cfg);
+       if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
+               __send_cleanup_vector(data);
 }
 
 void irq_complete_move(struct irq_cfg *cfg)
@@ -652,10 +617,8 @@ void irq_force_complete_move(int irq)
 {
        struct irq_cfg *cfg = irq_cfg(irq);
 
-       if (!cfg)
-               return;
-
-       __irq_complete_move(cfg, cfg->vector);
+       if (cfg)
+               __irq_complete_move(cfg, cfg->vector);
 }
 #endif