We currently disable BH for the whole duration of get_counters()
On machines with a lot of cpus and large tables, this might be too long.
We can disable preemption during the whole function, and disable BH only
while fetching counters for the current cpu.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
struct arpt_entry *iter;
unsigned int cpu;
unsigned int i;
struct arpt_entry *iter;
unsigned int cpu;
unsigned int i;
+ unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
- curcpu = smp_processor_id();
-
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
+ local_bh_enable();
+ /* Processing counters from other cpus, we can let bottom half enabled,
+ * (preemption is disabled)
+ */
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
}
xt_info_wrunlock(cpu);
}
}
xt_info_wrunlock(cpu);
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
struct ipt_entry *iter;
unsigned int cpu;
unsigned int i;
struct ipt_entry *iter;
unsigned int cpu;
unsigned int i;
+ unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
- curcpu = smp_processor_id();
-
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
+ local_bh_enable();
+ /* Processing counters from other cpus, we can let bottom half enabled,
+ * (preemption is disabled)
+ */
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
}
xt_info_wrunlock(cpu);
}
}
xt_info_wrunlock(cpu);
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
struct ip6t_entry *iter;
unsigned int cpu;
unsigned int i;
struct ip6t_entry *iter;
unsigned int cpu;
unsigned int i;
+ unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
- curcpu = smp_processor_id();
-
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
+ local_bh_enable();
+ /* Processing counters from other cpus, we can let bottom half enabled,
+ * (preemption is disabled)
+ */
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
}
xt_info_wrunlock(cpu);
}
}
xt_info_wrunlock(cpu);
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
}
static struct xt_counters *alloc_counters(const struct xt_table *table)