diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-02-07 13:14:20 -0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-02-07 13:17:51 -0800 |
commit | 8594fade39d3ad02ef856b8c53b5d7cc538a55f5 (patch) | |
tree | 7f14598186e3fbc5feb91b1c25905b51d106a104 /kernel/workqueue.c | |
parent | 54d5b7d079dffa74597715a892473b474babd5b5 (diff) | |
download | linux-8594fade39d3ad02ef856b8c53b5d7cc538a55f5.tar.bz2 |
workqueue: pick cwq instead of pool in __queue_work()
Currently, __queue_work() chooses the pool to queue a work item to and
then determines cwq from the target wq and the chosen pool. This is a
bit backwards in that we can determine cwq first and simply use
cwq->pool. This way, we can skip get_std_worker_pool() in queueing
path which will be a hurdle when implementing custom worker pools.
Update __queue_work() such that it chooses the target cwq and then use
cwq->pool instead of the other way around. While at it, add missing
{} in an if statement.
This patch doesn't introduce any functional changes.
tj: The original patch had two get_cwq() calls - the first to
determine the pool by doing get_cwq(cpu, wq)->pool and the second
to determine the matching cwq from get_cwq(pool->cpu, wq).
Updated the function such that it chooses cwq instead of pool and
removed the second call. Rewrote the description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1801c37b28c4..d6fdce12ca7e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1193,8 +1193,6 @@ static bool is_chained_work(struct workqueue_struct *wq) static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { - bool highpri = wq->flags & WQ_HIGHPRI; - struct worker_pool *pool; struct cpu_workqueue_struct *cwq; struct list_head *worklist; unsigned int work_flags; @@ -1215,7 +1213,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, WARN_ON_ONCE(!is_chained_work(wq))) return; - /* determine pool to use */ + /* determine the cwq to use */ if (!(wq->flags & WQ_UNBOUND)) { struct worker_pool *last_pool; @@ -1228,37 +1226,36 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, * work needs to be queued on that cpu to guarantee * non-reentrancy. */ - pool = get_std_worker_pool(cpu, highpri); + cwq = get_cwq(cpu, wq); last_pool = get_work_pool(work); - if (last_pool && last_pool != pool) { + if (last_pool && last_pool != cwq->pool) { struct worker *worker; spin_lock(&last_pool->lock); worker = find_worker_executing_work(last_pool, work); - if (worker && worker->current_cwq->wq == wq) - pool = last_pool; - else { + if (worker && worker->current_cwq->wq == wq) { + cwq = get_cwq(last_pool->cpu, wq); + } else { /* meh... not running there, queue here */ spin_unlock(&last_pool->lock); - spin_lock(&pool->lock); + spin_lock(&cwq->pool->lock); } } else { - spin_lock(&pool->lock); + spin_lock(&cwq->pool->lock); } } else { - pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri); - spin_lock(&pool->lock); + cwq = get_cwq(WORK_CPU_UNBOUND, wq); + spin_lock(&cwq->pool->lock); } - /* pool determined, get cwq and queue */ - cwq = get_cwq(pool->cpu, wq); + /* cwq determined, queue */ trace_workqueue_queue_work(req_cpu, cwq, work); if (WARN_ON(!list_empty(&work->entry))) { - spin_unlock(&pool->lock); + spin_unlock(&cwq->pool->lock); return; } @@ -1276,7 +1273,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, insert_work(cwq, work, worklist, work_flags); - spin_unlock(&pool->lock); + spin_unlock(&cwq->pool->lock); } /** |