From 73a2bcb0edb9ffb0b007b3546b430e2c6e415eee Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 29 Oct 2007 21:18:11 +0100 Subject: sched: keep utime/stime monotonic keep utime/stime monotonic. cpustats use utime/stime as a ratio against sum_exec_runtime, as a consequence it can happen - when the ratio changes faster than time accumulates - that either can be appear to go backwards. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- fs/proc/array.c | 3 ++- include/linux/sched.h | 1 + kernel/fork.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/proc/array.c b/fs/proc/array.c index 63c95afb561f..d80baaabf835 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -358,7 +358,8 @@ static cputime_t task_utime(struct task_struct *p) } utime = (clock_t)temp; - return clock_t_to_cputime(utime); + p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); + return p->prev_utime; } static cputime_t task_stime(struct task_struct *p) diff --git a/include/linux/sched.h b/include/linux/sched.h index 3c07d595979f..b0b1fe6e0b17 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1009,6 +1009,7 @@ struct task_struct { unsigned int rt_priority; cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; + cputime_t prev_utime; unsigned long nvcsw, nivcsw; /* context switch counts */ struct timespec start_time; /* monotonic time */ struct timespec real_start_time; /* boot based time */ diff --git a/kernel/fork.c b/kernel/fork.c index ddafdfac9456..a65bfc47177c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1056,6 +1056,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->gtime = cputime_zero; p->utimescaled = cputime_zero; p->stimescaled = cputime_zero; + p->prev_utime = cputime_zero; #ifdef CONFIG_TASK_XACCT p->rchar = 0; /* I/O counter: bytes read */ -- cgit v1.2.3