]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
x86: Remove move_cleanup_count from irq_cfg
authorSuresh Siddha <suresh.b.siddha@intel.com>
Mon, 26 Oct 2009 22:24:33 +0000 (14:24 -0800)
committerIngo Molnar <mingo@elte.hu>
Mon, 2 Nov 2009 14:56:35 +0000 (15:56 +0100)
move_cleanup_count for each irq in irq_cfg is keeping track of
the total number of cpus that need to free the corresponding
vectors associated with the irq which has now been migrated to
new destination. As long as this move_cleanup_count is non-zero
(i.e., as long as we have n't freed the vector allocations on
the old destinations) we were preventing the irq's further
migration.

This cleanup count is unnecessary and it is enough to not allow
the irq migration till we send the cleanup vector to the
previous irq destination, for which we already have irq_cfg's
move_in_progress.  All we need to make sure is that we free the
vector at the old desintation but we don't need to wait till
that gets freed.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Acked-by: Gary Hade <garyhade@us.ibm.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
LKML-Reference: <20091026230001.752968906@sbs-t61.sc.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/include/asm/hw_irq.h
arch/x86/kernel/apic/io_apic.c

index 1984ce9a13d28a49ec4243438e20b70412f96d8e..6e124269fd4bea3d52e28a1c977f22ec79fce07e 100644 (file)
@@ -94,7 +94,6 @@ struct irq_cfg {
        struct irq_pin_list     *irq_2_pin;
        cpumask_var_t           domain;
        cpumask_var_t           old_domain;
-       unsigned                move_cleanup_count;
        u8                      vector;
        u8                      move_in_progress : 1;
 };
index ce16b65cfdcc38a6d0a7ba5d6397f7a129f7e698..e9e5b02c3af255a5a631e2f9d023c79bf5755348 100644 (file)
@@ -1161,7 +1161,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
        int cpu, err;
        cpumask_var_t tmp_mask;
 
-       if ((cfg->move_in_progress) || cfg->move_cleanup_count)
+       if (cfg->move_in_progress)
                return -EBUSY;
 
        if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
@@ -2234,14 +2234,10 @@ void send_cleanup_vector(struct irq_cfg *cfg)
 
        if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
                unsigned int i;
-               cfg->move_cleanup_count = 0;
-               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
-                       cfg->move_cleanup_count++;
                for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
                        apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
        } else {
                cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
-               cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
                apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
                free_cpumask_var(cleanup_mask);
        }
@@ -2430,8 +2426,6 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
 
                cfg = irq_cfg(irq);
                spin_lock(&desc->lock);
-               if (!cfg->move_cleanup_count)
-                       goto unlock;
 
                if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
                        goto unlock;
@@ -2449,7 +2443,6 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
                        goto unlock;
                }
                __get_cpu_var(vector_irq)[vector] = -1;
-               cfg->move_cleanup_count--;
 unlock:
                spin_unlock(&desc->lock);
        }