summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@linux.alibaba.com>2021-12-07 15:35:40 +0800
committerTejun Heo <tj@kernel.org>2021-12-09 12:20:24 -1000
commitb4ac9384ac057c5bf035fbe82fc162fa2f7b15a9 (patch)
tree5dcefbd3496a2e983bf7bd6a6d74b66b843c4b72 /kernel/workqueue.c
parent11b45b0bf402b53c94c86737a440363fc36f03cd (diff)
downloadlinux-b4ac9384ac057c5bf035fbe82fc162fa2f7b15a9.tar.bz2
workqueue: Remove schedule() in unbind_workers()
The commit 6d25be5782e4 ("sched/core, workqueues: Distangle worker accounting from rq lock") changed the schedule callbacks for workqueue and moved the schedule callback from the wakeup code to at end of schedule() in the worker's process context. It means that the callback wq_worker_running() is guaranteed that it sees the %WORKER_UNBOUND flag after scheduled since unbind_workers() is running on the same CPU that all the pool's workers bound to. Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ae58c6ace23f..499a264183ef 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4979,6 +4979,9 @@ static void unbind_workers(int cpu)
* We've blocked all attach/detach operations. Make all workers
* unbound and set DISASSOCIATED. Before this, all workers
* must be on the cpu. After this, they may become diasporas.
+ * And the preemption disabled section in their sched callbacks
+ * are guaranteed to see WORKER_UNBOUND since the code here
+ * is on the same cpu.
*/
for_each_pool_worker(worker, pool)
worker->flags |= WORKER_UNBOUND;
@@ -4995,14 +4998,6 @@ static void unbind_workers(int cpu)
mutex_unlock(&wq_pool_attach_mutex);
/*
- * Call schedule() so that we cross rq->lock and thus can
- * guarantee sched callbacks see the %WORKER_UNBOUND flag.
- * This is necessary as scheduler callbacks may be invoked
- * from other cpus.
- */
- schedule();
-
- /*
* Sched callbacks are disabled now. Zap nr_running.
* After this, nr_running stays zero and need_more_worker()
* and keep_working() are always true as long as the