summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2005-09-10 00:26:21 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 10:06:24 -0700
commit0c117f1b4d14380baeed9c883f765ee023da8761 (patch)
tree8bd81914e49493bdae4b04db307a48dcfc0b6316 /kernel/sched.c
parentfa3b6ddc3f4a8eadba52234134cdb59c28b5332d (diff)
downloadlinux-0c117f1b4d14380baeed9c883f765ee023da8761.tar.bz2
[PATCH] sched: allow the load to grow upto its cpu_power
Don't pull tasks from a group if that would cause the group's total load to drop below its total cpu_power (ie. cause the group to start going idle). Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1dc29dec38a9..dbd4490afec1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1910,6 +1910,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
{
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+ unsigned long max_pull;
int load_idx;
max_load = this_load = total_load = total_pwr = 0;
@@ -1959,7 +1960,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
group = group->next;
} while (group != sd->groups);
- if (!busiest || this_load >= max_load)
+ if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE)
goto out_balanced;
avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -1979,8 +1980,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
+
+ /* Don't want to pull so many tasks that a group would go idle */
+ max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE);
+
/* How much load to actually move to equalise the imbalance */
- *imbalance = min((max_load - avg_load) * busiest->cpu_power,
+ *imbalance = min(max_pull * busiest->cpu_power,
(avg_load - this_load) * this->cpu_power)
/ SCHED_LOAD_SCALE;