summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-09-04 14:31:00 +0200
committerIngo Molnar <mingo@kernel.org>2012-09-04 14:31:00 +0200
commit59f979455d7209171ab10a72c8df5c2512976cb4 (patch)
treec4c7fc48bd79bf8acbe848a1b979fa9e8ab4ac6a /kernel/sched/core.c
parentb9bb50db9126c4ccad78af2dfb77277ca17c9b64 (diff)
parent9450d57eab5cad36774c297da123062744472588 (diff)
downloadlinux-59f979455d7209171ab10a72c8df5c2512976cb4.tar.bz2
Merge branch 'sched/urgent' into sched/core
Merge in the current fixes branch, we are going to apply dependent patches. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c34
1 files changed, 10 insertions, 24 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78d9c965433a..ae66229238a0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4784,27 +4784,17 @@ void idle_task_exit(void)
}
/*
- * While a dead CPU has no uninterruptible tasks queued at this point,
- * it might still have a nonzero ->nr_uninterruptible counter, because
- * for performance reasons the counter is not stricly tracking tasks to
- * their home CPUs. So we just add the counter to another CPU's counter,
- * to keep the global sum constant after CPU-down:
- */
-static void migrate_nr_uninterruptible(struct rq *rq_src)
-{
- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
-
- rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
- rq_src->nr_uninterruptible = 0;
-}
-
-/*
- * remove the tasks which were accounted by rq from calc_load_tasks.
+ * Since this CPU is going 'away' for a while, fold any nr_active delta
+ * we might have. Assumes we're called after migrate_tasks() so that the
+ * nr_active count is stable.
+ *
+ * Also see the comment "Global load-average calculations".
*/
-static void calc_global_load_remove(struct rq *rq)
+static void calc_load_migrate(struct rq *rq)
{
- atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
- rq->calc_load_active = 0;
+ long delta = calc_load_fold_active(rq);
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
}
/*
@@ -4832,9 +4822,6 @@ static void migrate_tasks(unsigned int dead_cpu)
*/
rq->stop = NULL;
- /* Ensure any throttled groups are reachable by pick_next_task */
- unthrottle_offline_cfs_rqs(rq);
-
for ( ; ; ) {
/*
* There's this thread running, bail when that's the only
@@ -5098,8 +5085,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(rq->nr_running != 1); /* the migration thread */
raw_spin_unlock_irqrestore(&rq->lock, flags);
- migrate_nr_uninterruptible(rq);
- calc_global_load_remove(rq);
+ calc_load_migrate(rq);
break;
#endif
}