summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-12-04 14:41:11 -0800
committerTejun Heo <tj@kernel.org>2017-12-04 14:41:11 -0800
commit11db855c3d06e82f432cb1bafd73296586d5ceec (patch)
tree0b3f09d6e7da0c4bf5fdcae5b617ce046e6b8aa8 /kernel
parent52cf373c37a684f8fc279d541307fad39d206376 (diff)
downloadlinux-11db855c3d06e82f432cb1bafd73296586d5ceec.tar.bz2
Revert "cpuset: Make cpuset hotplug synchronous"
This reverts commit 1599a185f0e6113be185b9fb809c621c73865829. This and the previous commit led to another circular locking scenario and the scenario which is fixed by this commit no longer exists after e8b3f8db7aad ("workqueue/hotplug: simplify workqueue_offline_cpu()") which removes work item flushing from hotplug path. Revert it for now. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup/cpuset.c41
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/sched/core.c1
3 files changed, 24 insertions, 20 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 227bc25d951d..cab5fd1ee767 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2277,8 +2277,15 @@ retry:
mutex_unlock(&cpuset_mutex);
}
+static bool force_rebuild;
+
+void cpuset_force_rebuild(void)
+{
+ force_rebuild = true;
+}
+
/**
- * cpuset_hotplug - handle CPU/memory hotunplug for a cpuset
+ * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
*
* This function is called after either CPU or memory configuration has
* changed and updates cpuset accordingly. The top_cpuset is always
@@ -2293,7 +2300,7 @@ retry:
* Note that CPU offlining during suspend is ignored. We don't modify
* cpusets across suspend/resume cycles at all.
*/
-static void cpuset_hotplug(bool use_cpu_hp_lock)
+static void cpuset_hotplug_workfn(struct work_struct *work)
{
static cpumask_t new_cpus;
static nodemask_t new_mems;
@@ -2351,31 +2358,25 @@ static void cpuset_hotplug(bool use_cpu_hp_lock)
}
/* rebuild sched domains if cpus_allowed has changed */
- if (cpus_updated) {
- if (use_cpu_hp_lock)
- rebuild_sched_domains();
- else {
- /* Acquiring cpu_hotplug_lock is not required.
- * When cpuset_hotplug() is called in hotplug path,
- * cpu_hotplug_lock is held by the hotplug context
- * which is waiting for cpuhp_thread_fun to indicate
- * completion of callback.
- */
- mutex_lock(&cpuset_mutex);
- rebuild_sched_domains_cpuslocked();
- mutex_unlock(&cpuset_mutex);
- }
+ if (cpus_updated || force_rebuild) {
+ force_rebuild = false;
+ rebuild_sched_domains();
}
}
-static void cpuset_hotplug_workfn(struct work_struct *work)
+void cpuset_update_active_cpus(void)
{
- cpuset_hotplug(true);
+ /*
+ * We're inside cpu hotplug critical region which usually nests
+ * inside cgroup synchronization. Bounce actual hotplug processing
+ * to a work item to avoid reverse locking order.
+ */
+ schedule_work(&cpuset_hotplug_work);
}
-void cpuset_update_active_cpus(void)
+void cpuset_wait_for_hotplug(void)
{
- cpuset_hotplug(false);
+ flush_work(&cpuset_hotplug_work);
}
/*
diff --git a/kernel/power/process.c b/kernel/power/process.c
index c326d7235c5f..7381d49a44db 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -204,6 +204,8 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
+ cpuset_wait_for_hotplug();
+
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 88b3450b29ab..75554f366fd3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5624,6 +5624,7 @@ static void cpuset_cpu_active(void)
* restore the original sched domains by considering the
* cpuset configurations.
*/
+ cpuset_force_rebuild();
}
cpuset_update_active_cpus();
}