summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-03-14 22:16:08 +0100
committerIngo Molnar <mingo@elte.hu>2008-03-15 03:02:50 +0100
commite22ecef1d2658ba54ed7d3fdb5d60829fb434c23 (patch)
tree49069e160107578ec8212d969b599e03d7a62fae /kernel/sched_fair.c
parentaa2ac25229cd4d0280f6174c42712744ad61b140 (diff)
downloadlinux-e22ecef1d2658ba54ed7d3fdb5d60829fb434c23.tar.bz2
sched: fix fair sleepers
Fair sleepers need to scale their latency target down by runqueue weight. Otherwise busy systems will gain ever larger sleep bonus. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 31c4a2988b64..31aa1b9fa762 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -528,8 +528,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS))
- vruntime -= sysctl_sched_latency;
+ if (sched_feat(NEW_FAIR_SLEEPERS)) {
+ vruntime -= calc_delta_fair(sysctl_sched_latency,
+ &cfs_rq->load);
+ }
/* ensure we never gain time by being placed backwards. */
vruntime = max_vruntime(se->vruntime, vruntime);