]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - net/ipv4/netfilter/arp_tables.c
netfilter: {ip,ip6,arp}_tables: dont block bottom half more than necessary
[net-next-2.6.git] / net / ipv4 / netfilter / arp_tables.c
index c868dd53e432ea79da5f99512059c23fbb3fc103..6bccba31d13208d03f042002f5808c3396c1f37f 100644 (file)
@@ -710,7 +710,7 @@ static void get_counters(const struct xt_table_info *t,
        struct arpt_entry *iter;
        unsigned int cpu;
        unsigned int i;
-       unsigned int curcpu;
+       unsigned int curcpu = get_cpu();
 
        /* Instead of clearing (by a previous call to memset())
         * the counters and using adds, we set the counters
@@ -720,14 +720,16 @@ static void get_counters(const struct xt_table_info *t,
         * if new softirq were to run and call ipt_do_table
         */
        local_bh_disable();
-       curcpu = smp_processor_id();
-
        i = 0;
        xt_entry_foreach(iter, t->entries[curcpu], t->size) {
                SET_COUNTER(counters[i], iter->counters.bcnt,
                            iter->counters.pcnt);
                ++i;
        }
+       local_bh_enable();
+       /* Processing counters from other cpus, we can let bottom half enabled,
+        * (preemption is disabled)
+        */
 
        for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
@@ -741,7 +743,7 @@ static void get_counters(const struct xt_table_info *t,
                }
                xt_info_wrunlock(cpu);
        }
-       local_bh_enable();
+       put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)