]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - net/ipv4/netfilter/ip_tables.c
netfilter: {ip,ip6,arp}_tables: dont block bottom half more than necessary
[net-next-2.6.git] / net / ipv4 / netfilter / ip_tables.c
index 3c584a6765b07b49d755b83eb2dfce436c08b101..c439721b165a6369acd1bd2ea1d64d4b0580b1bb 100644 (file)
@@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t,
        struct ipt_entry *iter;
        unsigned int cpu;
        unsigned int i;
-       unsigned int curcpu;
+       unsigned int curcpu = get_cpu();
 
        /* Instead of clearing (by a previous call to memset())
         * the counters and using adds, we set the counters
@@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t,
         * if new softirq were to run and call ipt_do_table
         */
        local_bh_disable();
-       curcpu = smp_processor_id();
-
        i = 0;
        xt_entry_foreach(iter, t->entries[curcpu], t->size) {
                SET_COUNTER(counters[i], iter->counters.bcnt,
                            iter->counters.pcnt);
                ++i;
        }
+       local_bh_enable();
+       /* Processing counters from other cpus, we can let bottom half enabled,
+        * (preemption is disabled)
+        */
 
        for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
@@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t,
                }
                xt_info_wrunlock(cpu);
        }
-       local_bh_enable();
+       put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)