summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-09-25 16:42:31 +0200
committerPeter Zijlstra <peterz@infradead.org>2020-11-10 18:38:59 +0100
commit120455c514f7321981c907a01c543b05aff3f254 (patch)
treefbc0bfe58c8e457bea81218250965b0c70abffe7 /kernel/sched/core.c
parent1cf12e08bc4d50a76b80c42a3109c53d8794a0c9 (diff)
downloadlinux-120455c514f7321981c907a01c543b05aff3f254.tar.bz2
sched: Fix hotplug vs CPU bandwidth control
Since we now migrate tasks away before DYING, we should also move bandwidth unthrottle, otherwise we can gain tasks from unthrottle after we expect all tasks to be gone already. Also; it looks like the RT balancers don't respect cpu_active() and instead rely on rq->online in part, complete this. This too requires we do set_rq_offline() earlier to match the cpu_active() semantics. (The bigger patch is to convert RT to cpu_active() entirely) Since set_rq_online() is called from sched_cpu_activate(), place set_rq_offline() in sched_cpu_deactivate(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com> Link: https://lkml.kernel.org/r/20201023102346.639538965@infradead.org
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6c89806c834b..dcb88a06ef14 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6977,6 +6977,8 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu)
{
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
int ret;
set_cpu_active(cpu, false);
@@ -6991,6 +6993,14 @@ int sched_cpu_deactivate(unsigned int cpu)
balance_push_set(cpu, true);
+ rq_lock_irqsave(rq, &rf);
+ if (rq->rd) {
+ update_rq_clock(rq);
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_offline(rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
@@ -7072,10 +7082,6 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_offline(rq);
- }
BUG_ON(rq->nr_running != 1);
rq_unlock_irqrestore(rq, &rf);