diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 13:41:29 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 14:31:41 +0200 |
commit | 42a3ac7d5cee89849448b41b86faeb86f98e92f6 (patch) | |
tree | a601a02fd090b6da93b81ef9857133b8de7b5079 | |
parent | 408ed066b11cf9ee4536573b4269ee3613bd735e (diff) | |
download | linux-42a3ac7d5cee89849448b41b86faeb86f98e92f6.tar.bz2 |
sched: fix load scaling in group balancing
doing the load balance will change cfs_rq->load.weight (that's the whole point)
but since that's part of the scale factor, we'll scale back with a different
amount.
Weight getting smaller would result in an inflated moved_load which causes
it to stop balancing too soon.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched_fair.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 865cb53a7ccf..734e4c556fcb 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1444,6 +1444,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, list_for_each_entry(tg, &task_groups, list) { struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; + unsigned long busiest_h_load = busiest_cfs_rq->h_load; + unsigned long busiest_weight = busiest_cfs_rq->load.weight; long rem_load, moved_load; /* @@ -1452,8 +1454,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, if (!busiest_cfs_rq->task_weight) continue; - rem_load = rem_load_move * busiest_cfs_rq->load.weight; - rem_load /= busiest_cfs_rq->h_load + 1; + rem_load = rem_load_move * busiest_weight; + rem_load /= busiest_h_load + 1; moved_load = __load_balance_fair(this_rq, this_cpu, busiest, rem_load, sd, idle, all_pinned, this_best_prio, @@ -1462,8 +1464,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, if (!moved_load) continue; - moved_load *= busiest_cfs_rq->h_load; - moved_load /= busiest_cfs_rq->load.weight + 1; + moved_load *= busiest_h_load; + moved_load /= busiest_weight + 1; rem_load_move -= moved_load; if (rem_load_move < 0) |