]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
sched: Reduce update_group_power() calls
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 9 Jul 2010 13:15:43 +0000 (15:15 +0200)
committerIngo Molnar <mingo@elte.hu>
Sat, 17 Jul 2010 10:05:14 +0000 (12:05 +0200)
Currently we update cpu_power() too often, update_group_power() only
updates the local group's cpu_power but it gets called for all groups.

Furthermore, CPU_NEWLY_IDLE invocations will result in all cpus
calling it, even though a slow update of cpu_power is sufficient.

Therefore move the update under 'idle != CPU_NEWLY_IDLE &&
local_group' to reduce superfluous invocations.

Reported-by: Venkatesh Pallipadi <venki@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <1278612989.1900.176.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index e44a591531a13dca3aae7b75af4570079cc97818..c9ac097609537e90031d8ba9cd8f491f2fcc3535 100644 (file)
@@ -2425,14 +2425,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
         * domains. In the newly idle case, we will allow all the cpu's
         * to do the newly idle load balance.
         */
-       if (idle != CPU_NEWLY_IDLE && local_group &&
-           balance_cpu != this_cpu) {
-               *balance = 0;
-               return;
+       if (idle != CPU_NEWLY_IDLE && local_group) {
+               if (balance_cpu != this_cpu) {
+                       *balance = 0;
+                       return;
+               }
+               update_group_power(sd, this_cpu);
        }
 
-       update_group_power(sd, this_cpu);
-
        /* Adjust by relative CPU power of the group */
        sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;