diff options
author | Daniel Jordan <daniel.m.jordan@oracle.com> | 2019-09-05 21:40:27 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2019-09-13 21:15:40 +1000 |
commit | 45d153c08bc73c8ced640dc20d8f2b749a6cb0d0 (patch) | |
tree | 52e892c2a0ed227a9da6a081fd7b842bc69fa5b4 /kernel/padata.c | |
parent | cc491d8e6486c56e07e60d9992cd56f63dc9fd6c (diff) | |
download | linux-45d153c08bc73c8ced640dc20d8f2b749a6cb0d0.tar.bz2 |
padata: use separate workqueues for parallel and serial work
padata currently uses one per-CPU workqueue per instance for all work.
Prepare for running parallel jobs on an unbound workqueue by introducing
dedicated workqueues for parallel and serial work.
Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'kernel/padata.c')
-rw-r--r-- | kernel/padata.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/padata.c b/kernel/padata.c index 8a362923c488..669f5d53d357 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -152,7 +152,7 @@ int padata_do_parallel(struct padata_instance *pinst, list_add_tail(&padata->list, &queue->parallel.list); spin_unlock(&queue->parallel.lock); - queue_work_on(target_cpu, pinst->wq, &queue->work); + queue_work_on(target_cpu, pinst->parallel_wq, &queue->work); out: rcu_read_unlock_bh(); @@ -261,7 +261,7 @@ static void padata_reorder(struct parallel_data *pd) list_add_tail(&padata->list, &squeue->serial.list); spin_unlock(&squeue->serial.lock); - queue_work_on(cb_cpu, pinst->wq, &squeue->work); + queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); } spin_unlock_bh(&pd->lock); @@ -278,7 +278,7 @@ static void padata_reorder(struct parallel_data *pd) next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); if (!list_empty(&next_queue->reorder.list)) - queue_work(pinst->wq, &pd->reorder_work); + queue_work(pinst->serial_wq, &pd->reorder_work); } static void invoke_padata_reorder(struct work_struct *work) @@ -818,7 +818,8 @@ static void __padata_free(struct padata_instance *pinst) padata_free_pd(pinst->pd); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); - destroy_workqueue(pinst->wq); + destroy_workqueue(pinst->serial_wq); + destroy_workqueue(pinst->parallel_wq); kfree(pinst); } @@ -967,18 +968,23 @@ static struct padata_instance *padata_alloc(const char *name, if (!pinst) goto err; - pinst->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, - 1, name); - if (!pinst->wq) + pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_MEM_RECLAIM | + WQ_CPU_INTENSIVE, 1, name); + if (!pinst->parallel_wq) goto err_free_inst; get_online_cpus(); - if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) + pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM | + WQ_CPU_INTENSIVE, 1, name); + if (!pinst->serial_wq) goto err_put_cpus; + + if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) + goto err_free_serial_wq; if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { free_cpumask_var(pinst->cpumask.pcpu); - goto err_put_cpus; + goto err_free_serial_wq; } if (!padata_validate_cpumask(pinst, pcpumask) || !padata_validate_cpumask(pinst, cbcpumask)) @@ -1010,9 +1016,11 @@ static struct padata_instance *padata_alloc(const char *name, err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); +err_free_serial_wq: + destroy_workqueue(pinst->serial_wq); err_put_cpus: put_online_cpus(); - destroy_workqueue(pinst->wq); + destroy_workqueue(pinst->parallel_wq); err_free_inst: kfree(pinst); err: |