summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-27 15:44:43 +0100
committerIngo Molnar <mingo@elte.hu>2009-12-09 10:03:04 +0100
commitab19cb23313733c55e0517607844b86720b35f5f (patch)
tree0d6780ab6d1e1e97b624e978ed3418e5786646d4 /kernel/sched.c
parent5afcdab706d6002cb02b567ba46e650215e694e8 (diff)
downloadlinux-ab19cb23313733c55e0517607844b86720b35f5f.tar.bz2
sched: Clean up ttwu() rq locking
Since set_task_clock() doesn't rely on rq->clock anymore we can simplyfy the mess in ttwu(). Optimize things a bit by not fiddling with the IRQ state there. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1f9c6d99f15d..c92670f8e097 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2371,17 +2371,14 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
p->state = TASK_WAKING;
- task_rq_unlock(rq, &flags);
+ __task_rq_unlock(rq);
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
- if (cpu != orig_cpu) {
- local_irq_save(flags);
- rq = cpu_rq(cpu);
- update_rq_clock(rq);
+ if (cpu != orig_cpu)
set_task_cpu(p, cpu);
- local_irq_restore(flags);
- }
- rq = task_rq_lock(p, &flags);
+
+ rq = __task_rq_lock(p);
+ update_rq_clock(rq);
WARN_ON(p->state != TASK_WAKING);
cpu = task_cpu(p);