diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-15 17:00:14 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:14 +0200 |
commit | ce6c131131df442f0d49d064129ecc52d9fe8ca9 (patch) | |
tree | 14b93a32144e7270dd821901ff247f506270a3a3 /kernel | |
parent | e62dd02ed0af35631c6ca473e50758c9594773cf (diff) | |
download | linux-ce6c131131df442f0d49d064129ecc52d9fe8ca9.tar.bz2 |
sched: disable forced preemption by default
Implement feature bit to disable forced preemption. This way
it can be checked whether a workload is overscheduling or not.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/sched_fair.c | 24 |
2 files changed, 16 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b7dff36c7c8c..0bd8f2c0fb40 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -444,13 +444,15 @@ enum { SCHED_FEAT_START_DEBIT = 2, SCHED_FEAT_TREE_AVG = 4, SCHED_FEAT_APPROX_AVG = 8, + SCHED_FEAT_WAKEUP_PREEMPT = 16, }; const_debug unsigned int sysctl_sched_features = SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | SCHED_FEAT_START_DEBIT *1 | SCHED_FEAT_TREE_AVG *0 | - SCHED_FEAT_APPROX_AVG *0; + SCHED_FEAT_APPROX_AVG *0 | + SCHED_FEAT_WAKEUP_PREEMPT *1; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3ac096e74faf..3843ec71aad5 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -626,7 +626,7 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) */ update_curr(cfs_rq); - if (cfs_rq->nr_running > 1) + if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) check_preempt_tick(cfs_rq, curr); } @@ -828,18 +828,20 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) return; } - while (!is_same_group(se, pse)) { - se = parent_entity(se); - pse = parent_entity(pse); - } + if (sched_feat(WAKEUP_PREEMPT)) { + while (!is_same_group(se, pse)) { + se = parent_entity(se); + pse = parent_entity(pse); + } - delta = se->vruntime - pse->vruntime; - gran = sysctl_sched_wakeup_granularity; - if (unlikely(se->load.weight != NICE_0_LOAD)) - gran = calc_delta_fair(gran, &se->load); + delta = se->vruntime - pse->vruntime; + gran = sysctl_sched_wakeup_granularity; + if (unlikely(se->load.weight != NICE_0_LOAD)) + gran = calc_delta_fair(gran, &se->load); - if (delta > gran) - resched_task(curr); + if (delta > gran) + resched_task(curr); + } } static struct task_struct *pick_next_task_fair(struct rq *rq) |