summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-01-20 15:05:41 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-01-22 15:09:43 +0100
commit975707f227b07a8212060f94447171d15d7a681b (patch)
treeb9703358195945550846d0e59259178cf5274d83 /kernel
parent640f17c82460e9724fd256f0a1f5d99e7ff0bda4 (diff)
downloadlinux-975707f227b07a8212060f94447171d15d7a681b.tar.bz2
sched: Prepare to use balance_push in ttwu()
In preparation of using the balance_push state in ttwu() we need it to provide a reliable and consistent state. The immediate problem is that rq->balance_callback gets cleared every schedule() and then re-set in the balance_push_callback() itself. This is not a reliable signal, so add a variable that stays set during the entire time. Also move setting it before the synchronize_rcu() in sched_cpu_deactivate(), such that we get guaranteed visibility to ttwu(), which is a preempt-disable region. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210121103506.966069627@infradead.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/sched.h1
2 files changed, 7 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8da0fd7f3cca..16946b55e8e7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7320,6 +7320,7 @@ static void balance_push_set(int cpu, bool on)
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
+ rq->balance_push = on;
if (on) {
WARN_ON_ONCE(rq->balance_callback);
rq->balance_callback = &balance_push_callback;
@@ -7489,17 +7490,17 @@ int sched_cpu_deactivate(unsigned int cpu)
int ret;
set_cpu_active(cpu, false);
+ balance_push_set(cpu, true);
+
/*
- * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
- * users of this state to go away such that all new such users will
- * observe it.
+ * We've cleared cpu_active_mask / set balance_push, wait for all
+ * preempt-disabled and RCU users of this state to go away such that
+ * all new such users will observe it.
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
synchronize_rcu();
- balance_push_set(cpu, true);
-
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
update_rq_clock(rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 12ada79d40f3..bb09988451a0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -975,6 +975,7 @@ struct rq {
unsigned long cpu_capacity_orig;
struct callback_head *balance_callback;
+ unsigned char balance_push;
unsigned char nohz_idle_balance;
unsigned char idle_balance;