diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-09-05 14:32:49 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-09-05 14:32:49 +0200 |
commit | a206c07213cf6372289f189c3774c4c3255a7ae1 (patch) | |
tree | a13c33021cc7e7b656305cf20f96205c5f5745a9 /kernel | |
parent | a0dc72601d48b171b4870dfdd0824901a2b2b1a9 (diff) | |
download | linux-a206c07213cf6372289f189c3774c4c3255a7ae1.tar.bz2 |
sched: debug: fix cfs_rq->wait_runtime accounting
the cfs_rq->wait_runtime debug/statistics counter was not maintained
properly - fix this.
this also removes some code:
text data bss dec hex filename
13420 228 1204 14852 3a04 sched.o.before
13404 228 1204 14836 39f4 sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 1 | ||||
-rw-r--r-- | kernel/sched_fair.c | 10 |
2 files changed, 5 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c8759ec6d8a9..97986f1f0be8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq) static void set_load_weight(struct task_struct *p) { - task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; p->se.wait_runtime = 0; if (task_has_rt_policy(p)) { diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 810b52d994e0..bac2aff8273c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -194,6 +194,8 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_add(&cfs_rq->load, se->load.weight); cfs_rq->nr_running++; se->on_rq = 1; + + schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); } static inline void @@ -205,6 +207,8 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_sub(&cfs_rq->load, se->load.weight); cfs_rq->nr_running--; se->on_rq = 0; + + schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime); } static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) @@ -574,7 +578,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) prev_runtime = se->wait_runtime; __add_wait_runtime(cfs_rq, se, delta_fair); - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); delta_fair = se->wait_runtime - prev_runtime; /* @@ -662,7 +665,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) if (tsk->state & TASK_UNINTERRUPTIBLE) se->block_start = rq_of(cfs_rq)->clock; } - cfs_rq->wait_runtime -= se->wait_runtime; #endif } __dequeue_entity(cfs_rq, se); @@ -1121,10 +1123,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) * The statistical average of wait_runtime is about * -granularity/2, so initialize the task with that: */ - if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) { + if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) se->wait_runtime = -(sched_granularity(cfs_rq) / 2); - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); - } __enqueue_entity(cfs_rq, se); } |