summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-04-08 17:48:09 +0800
committerJens Axboe <axboe@kernel.dk>2018-04-10 08:38:46 -0600
commit476f8c98a9bccccbb97866974ffc80879adf2bbb (patch)
tree2c8163291812b7d1b54790b5ccb57f4f91cb44bf /block/blk-mq.c
parentbffa9909a6b48d8ca3398dec601bc9162a4020c4 (diff)
downloadlinux-476f8c98a9bccccbb97866974ffc80879adf2bbb.tar.bz2
blk-mq: avoid to write intermediate result to hctx->next_cpu
This patch figures out the final selected CPU, then writes it to hctx->next_cpu once, then we can avoid to intermediate next cpu observed from other dispatch paths. Cc: Stefan Haberland <sth@linux.vnet.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f489ec920807..db178c577068 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1344,26 +1344,24 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
bool tried = false;
+ int next_cpu = hctx->next_cpu;
if (hctx->queue->nr_hw_queues == 1)
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
- int next_cpu;
select_cpu:
- next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
+ next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
cpu_online_mask);
if (next_cpu >= nr_cpu_ids)
- next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
+ next_cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
/*
* No online CPU is found, so have to make sure hctx->next_cpu
* is set correctly for not breaking workqueue.
*/
if (next_cpu >= nr_cpu_ids)
- hctx->next_cpu = cpumask_first(hctx->cpumask);
- else
- hctx->next_cpu = next_cpu;
+ next_cpu = cpumask_first(hctx->cpumask);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
@@ -1371,7 +1369,7 @@ select_cpu:
* Do unbound schedule if we can't find a online CPU for this hctx,
* and it should only happen in the path of handling CPU DEAD.
*/
- if (!cpu_online(hctx->next_cpu)) {
+ if (!cpu_online(next_cpu)) {
if (!tried) {
tried = true;
goto select_cpu;
@@ -1381,10 +1379,13 @@ select_cpu:
* Make sure to re-select CPU next time once after CPUs
* in hctx->cpumask become online again.
*/
+ hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = 1;
return WORK_CPU_UNBOUND;
}
- return hctx->next_cpu;
+
+ hctx->next_cpu = next_cpu;
+ return next_cpu;
}
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,