diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-12-08 20:15:26 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-12-08 20:15:29 +0100 |
commit | 8e9255e6a2141e050d51bc4d96dbef494a87d653 (patch) | |
tree | f190b142830153eaab05555a93c4f71a144ba3d4 /kernel/sched_fair.c | |
parent | 5091faa449ee0b7d73bc296a93bca9540fc51d0a (diff) | |
parent | 6313e3c21743cc88bb5bd8aa72948ee1e83937b6 (diff) | |
download | linux-8e9255e6a2141e050d51bc4d96dbef494a87d653.tar.bz2 |
Merge branch 'linus' into sched/core
Merge reason: we want to queue up dependent cleanup
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index fdbdb5084c49..c88671718bc9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1903,10 +1903,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); check_preempt_curr(this_rq, p, 0); - - /* re-arm NEWIDLE balancing when moving tasks */ - src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost; - this_rq->idle_stamp = 0; } /* @@ -3408,8 +3404,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq) interval = msecs_to_jiffies(sd->balance_interval); if (time_after(next_balance, sd->last_balance + interval)) next_balance = sd->last_balance + interval; - if (pulled_task) + if (pulled_task) { + this_rq->idle_stamp = 0; break; + } } raw_spin_lock(&this_rq->lock); |