summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-06-17 08:10:08 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-07-28 13:49:53 -0700
commit955dbdf4ce87fd9be4bc8378e26b8c2eb8b3d184 (patch)
tree93318ed623afbe3cb7433ec5b89d248e6f1ed143 /kernel/sched
parent4e3274705324c695f56b3e1c194cd99b76eeea0f (diff)
downloadlinux-955dbdf4ce87fd9be4bc8378e26b8c2eb8b3d184.tar.bz2
sched: Allow migrating kthreads into online but inactive CPUs
Per-cpu workqueues have been tripping CPU affinity sanity checks while a CPU is being offlined. A per-cpu kworker ends up running on a CPU which isn't its target CPU while the CPU is online but inactive. While the scheduler allows kthreads to wake up on an online but inactive CPU, it doesn't allow a running kthread to be migrated to such a CPU, which leads to an odd situation where setting affinity on a sleeping and running kthread leads to different results. Each mem-reclaim workqueue has one rescuer which guarantees forward progress and the rescuer needs to bind itself to the CPU which needs help in making forward progress; however, due to the above issue, while set_cpus_allowed_ptr() succeeds, the rescuer doesn't end up on the correct CPU if the CPU is in the process of going offline, tripping the sanity check and executing the work item on the wrong CPU. This patch updates __migrate_task() so that kthreads can be migrated into an inactive but online CPU. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 17c667b427b4..bfee6ea7db49 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -951,8 +951,13 @@ struct migration_arg {
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int dest_cpu)
{
- if (unlikely(!cpu_active(dest_cpu)))
- return rq;
+ if (p->flags & PF_KTHREAD) {
+ if (unlikely(!cpu_online(dest_cpu)))
+ return rq;
+ } else {
+ if (unlikely(!cpu_active(dest_cpu)))
+ return rq;
+ }
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))