From 213c8af67f21c1dc0d50940b159d9521c95f3c89 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 28 Aug 2007 12:53:24 +0200 Subject: sched: small schedstat fix small schedstat fix: the cfs_rq->wait_runtime 'sum of all runtimes' statistics counters missed newly forked tasks and thus had a constant negative skew. Fix this. Signed-off-by: Ingo Molnar Signed-off-by: Peter Zijlstra Signed-off-by: Mike Galbraith --- kernel/sched_fair.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0c718857176f..75f025da6f7c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1121,8 +1121,10 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) * The statistical average of wait_runtime is about * -granularity/2, so initialize the task with that: */ - if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) + if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) { p->se.wait_runtime = -(sched_granularity(cfs_rq) / 2); + schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); + } __enqueue_entity(cfs_rq, se); } -- cgit v1.2.3