summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-04-04 21:34:48 -0600
committerJens Axboe <axboe@fb.com>2014-04-07 08:17:18 -0600
commitbccb5f7c8bdfe460d95f986c6edf2e75d8052897 (patch)
tree7c745e10cab2c6cd91dbe2d8664601dda6d89a68
parent60b0ea120c80cba1cf5fe5ae82a35b1179263de3 (diff)
downloadlinux-bccb5f7c8bdfe460d95f986c6edf2e75d8052897.tar.bz2
blk-mq: fix potential stall during CPU unplug with IO pending
When a CPU is unplugged, we move the blk_mq_ctx request entries to the current queue. The current code forgets to remap the blk_mq_hw_ctx before marking the software context pending, which breaks if old-cpu and new-cpu don't map to the same hardware queue. Additionally, if we mark entries as pending in the new hardware queue, then make sure we schedule it for running. Otherwise request could be sitting there until someone else queues IO for that hardware queue. Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b1bcc619d0ea..1d2a9bdbee57 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
unsigned int cpu)
{
struct blk_mq_hw_ctx *hctx = data;
+ struct request_queue *q = hctx->queue;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
@@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
/*
* Move ctx entries to new CPU, if this one is going away.
*/
- ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+ ctx = __blk_mq_get_ctx(q, cpu);
spin_lock(&ctx->lock);
if (!list_empty(&ctx->rq_list)) {
@@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
if (list_empty(&tmp))
return;
- ctx = blk_mq_get_ctx(hctx->queue);
+ ctx = blk_mq_get_ctx(q);
spin_lock(&ctx->lock);
while (!list_empty(&tmp)) {
@@ -988,10 +989,13 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
list_move_tail(&rq->queuelist, &ctx->rq_list);
}
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(ctx);
+
+ blk_mq_run_hw_queue(hctx, true);
}
static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,