summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 18:51:59 +0200
committerIngo Molnar <mingo@elte.hu>2007-07-09 18:51:59 +0200
commit1b9f19c2125dd1021b9327111dc40b14b557ee12 (patch)
tree139bfbf6f43fa957821f95ebc9c5f1d43b177271 /kernel
parentbb29ab26863c022743143f27956cc0ca362f258c (diff)
downloadlinux-1b9f19c2125dd1021b9327111dc40b14b557ee12.tar.bz2
sched: turn on the use of unstable events
make use of sched-clock-unstable events. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6150cd70f448..2d23450e7614 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -68,13 +68,6 @@ unsigned long long __attribute__((weak)) sched_clock(void)
}
/*
- * CPU frequency is/was unstable - start new by setting prev_clock_raw:
- */
-void sched_clock_unstable_event(void)
-{
-}
-
-/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
* and back.
@@ -630,6 +623,20 @@ static inline struct rq *this_rq_lock(void)
}
/*
+ * CPU frequency is/was unstable - start new by setting prev_clock_raw:
+ */
+void sched_clock_unstable_event(void)
+{
+ unsigned long flags;
+ struct rq *rq;
+
+ rq = task_rq_lock(current, &flags);
+ rq->prev_clock_raw = sched_clock();
+ rq->clock_unstable_events++;
+ task_rq_unlock(rq, &flags);
+}
+
+/*
* resched_task - mark a task 'to be rescheduled now'.
*
* On UP this means the setting of the need_resched flag, on SMP it