summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-01-12 11:26:49 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-01-22 15:09:42 +0100
commit5c25b5ff89f004c30b04759dc34ace8585a4085f (patch)
treea2132a7d24a2e50ef4d3703f3c00dbb55ce7a44e /kernel/workqueue.c
parentac687e6e8c26181a33270efd1a2e2241377924b0 (diff)
downloadlinux-5c25b5ff89f004c30b04759dc34ace8585a4085f.tar.bz2
workqueue: Tag bound workers with KTHREAD_IS_PER_CPU
Mark the per-cpu workqueue workers as KTHREAD_IS_PER_CPU. Workqueues have unfortunate semantics in that per-cpu workers are not default flushed and parked during hotplug, however a subset does manual flush on hotplug and hard relies on them for correctness. Therefore play silly games.. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210121103506.693465814@infradead.org
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1646331546eb..cce34336fbd7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1861,6 +1861,8 @@ static void worker_attach_to_pool(struct worker *worker,
*/
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
+ else
+ kthread_set_per_cpu(worker->task, pool->cpu);
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
@@ -1883,6 +1885,7 @@ static void worker_detach_from_pool(struct worker *worker)
mutex_lock(&wq_pool_attach_mutex);
+ kthread_set_per_cpu(worker->task, -1);
list_del(&worker->node);
worker->pool = NULL;
@@ -4919,8 +4922,10 @@ static void unbind_workers(int cpu)
raw_spin_unlock_irq(&pool->lock);
- for_each_pool_worker(worker, pool)
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, -1);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+ }
mutex_unlock(&wq_pool_attach_mutex);
@@ -4972,9 +4977,11 @@ static void rebind_workers(struct worker_pool *pool)
* of all workers first and then clear UNBOUND. As we're called
* from CPU_ONLINE, the following shouldn't fail.
*/
- for_each_pool_worker(worker, pool)
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
+ }
raw_spin_lock_irq(&pool->lock);