summaryrefslogtreecommitdiffstats
path: root/kernel/sched/cputime.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2017-01-31 04:09:23 +0100
committerIngo Molnar <mingo@kernel.org>2017-02-01 09:13:49 +0100
commit5613fda9a503cd6137b120298902a34a1386b2c1 (patch)
tree5431dd50508a6cfa052a3b9966e58842b430804d /kernel/sched/cputime.c
parenta1cecf2ba78e0a6de00ff99df34b662728535aa5 (diff)
downloadlinux-5613fda9a503cd6137b120298902a34a1386b2c1.tar.bz2
sched/cputime: Convert task/group cputime to nsecs
Now that most cputime readers use the transition API which return the task cputime in old style cputime_t, we can safely store the cputime in nsecs. This will eventually make cputime statistics less opaque and more granular. Back and forth convertions between cputime_t and nsecs in order to deal with cputime_t random granularity won't be needed anymore. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Link: http://lkml.kernel.org/r/1485832191-26889-8-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/cputime.c')
-rw-r--r--kernel/sched/cputime.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 8bcd98e2b821..0bdef50d88bc 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -134,7 +134,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
int index;
/* Add user time to process. */
- p->utime += cputime;
+ p->utime += cputime_to_nsecs(cputime);
account_group_user_time(p, cputime);
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
@@ -156,7 +156,7 @@ void account_guest_time(struct task_struct *p, cputime_t cputime)
u64 *cpustat = kcpustat_this_cpu->cpustat;
/* Add guest time to process. */
- p->utime += cputime;
+ p->utime += cputime_to_nsecs(cputime);
account_group_user_time(p, cputime);
p->gtime += cputime_to_nsecs(cputime);
@@ -180,7 +180,7 @@ void account_system_index_time(struct task_struct *p,
cputime_t cputime, enum cpu_usage_stat index)
{
/* Add system time to process. */
- p->stime += cputime;
+ p->stime += cputime_to_nsecs(cputime);
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
@@ -315,7 +315,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{
struct signal_struct *sig = tsk->signal;
- cputime_t utime, stime;
+ u64 utime, stime;
struct task_struct *t;
unsigned int seq, nextseq;
unsigned long flags;
@@ -465,14 +465,14 @@ void vtime_account_irq_enter(struct task_struct *tsk)
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
*ut = p->utime;
*st = p->stime;
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime;
@@ -543,7 +543,7 @@ void account_idle_ticks(unsigned long ticks)
* Perform (stime * rtime) / total, but avoid multiplication overflow by
* loosing precision when the numbers are big.
*/
-static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
+static u64 scale_stime(u64 stime, u64 rtime, u64 total)
{
u64 scaled;
@@ -580,7 +580,7 @@ drop_precision:
* followed by a 64/32->64 divide.
*/
scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
- return (__force cputime_t) scaled;
+ return scaled;
}
/*
@@ -605,14 +605,14 @@ drop_precision:
*/
static void cputime_adjust(struct task_cputime *curr,
struct prev_cputime *prev,
- cputime_t *ut, cputime_t *st)
+ u64 *ut, u64 *st)
{
- cputime_t rtime, stime, utime;
+ u64 rtime, stime, utime;
unsigned long flags;
/* Serialize concurrent callers such that we can honour our guarantees */
raw_spin_lock_irqsave(&prev->lock, flags);
- rtime = nsecs_to_cputime(curr->sum_exec_runtime);
+ rtime = curr->sum_exec_runtime;
/*
* This is possible under two circumstances:
@@ -643,8 +643,7 @@ static void cputime_adjust(struct task_cputime *curr,
goto update;
}
- stime = scale_stime((__force u64)stime, (__force u64)rtime,
- (__force u64)(stime + utime));
+ stime = scale_stime(stime, rtime, stime + utime);
update:
/*
@@ -677,7 +676,7 @@ out:
raw_spin_unlock_irqrestore(&prev->lock, flags);
}
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime = {
.sum_exec_runtime = p->se.sum_exec_runtime,
@@ -688,7 +687,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime;
@@ -849,9 +848,9 @@ u64 task_gtime(struct task_struct *t)
* add up the pending nohz execution time since the last
* cputime snapshot.
*/
-void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
+void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
{
- cputime_t delta;
+ u64 delta;
unsigned int seq;
if (!vtime_accounting_enabled()) {
@@ -870,7 +869,7 @@ void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t))
continue;
- delta = vtime_delta(t);
+ delta = cputime_to_nsecs(vtime_delta(t));
/*
* Task runs either in user or kernel space, add pending nohz time to