summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 17:00:14 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:14 +0200
commit00bf7bfc2eaf775b634774e9ec435d720b6ecee7 (patch)
tree1f816516361276a56b9dee6b39e90361dc3468be /kernel
parent0702e3ebc1e42576a04d29f8adacf13be825b800 (diff)
downloadlinux-00bf7bfc2eaf775b634774e9ec435d720b6ecee7.tar.bz2
sched: fix: move the CPU check into ->task_new_fair()
noticed by Peter Zijlstra: fix: move the CPU check into ->task_new_fair(), this way we can call place_entity() and get child ->vruntime right at initial wakeup time. (without this there can be large latencies) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sched_fair.c3
2 files changed, 3 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f2b8db4d6802..b41ef663b993 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1660,17 +1660,14 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
unsigned long flags;
struct rq *rq;
- int this_cpu;
rq = task_rq_lock(p, &flags);
BUG_ON(p->state != TASK_RUNNING);
- this_cpu = smp_processor_id(); /* parent's CPU */
update_rq_clock(rq);
p->prio = effective_prio(p);
- if (task_cpu(p) != this_cpu || !p->sched_class->task_new ||
- !current->se.on_rq) {
+ if (!p->sched_class->task_new || !current->se.on_rq || !rq->cfs.curr) {
activate_task(rq, p, 0);
} else {
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a9dfb7746c5c..f5f491762e35 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1007,13 +1007,14 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
+ int this_cpu = smp_processor_id();
sched_info_queued(p);
update_curr(cfs_rq);
place_entity(cfs_rq, se, 1);
- if (sysctl_sched_child_runs_first &&
+ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
curr->vruntime < se->vruntime) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place