summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2014-02-10 03:24:38 -0800
committerJens Axboe <axboe@fb.com>2014-02-10 09:27:31 -0700
commit30a91cb4ef385fe1b260df204ef314d86fff2850 (patch)
tree97ecc925cb7868f456d5dd3219980be5dc01d900 /block/blk-mq.c
parentc4540a7d8c1e595560e53acedf88901daf15a2b5 (diff)
downloadlinux-30a91cb4ef385fe1b260df204ef314d86fff2850.tar.bz2
blk-mq: rework I/O completions
Rework I/O completions to work more like the old code path. blk_mq_end_io now stays out of the business of deferring completions to others CPUs and calling blk_mark_rq_complete. The latter is very important to allow completing requests that have timed out and thus are already marked completed, the former allows using the IPI callout even for driver specific completions instead of having to reimplement them. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c52
1 files changed, 31 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cee96234bf58..14c8f35946e1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -326,7 +326,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
bio_endio(bio, error);
}
-void blk_mq_complete_request(struct request *rq, int error)
+void blk_mq_end_io(struct request *rq, int error)
{
struct bio *bio = rq->bio;
unsigned int bytes = 0;
@@ -351,46 +351,53 @@ void blk_mq_complete_request(struct request *rq, int error)
else
blk_mq_free_request(rq);
}
+EXPORT_SYMBOL(blk_mq_end_io);
-void __blk_mq_end_io(struct request *rq, int error)
-{
- if (!blk_mark_rq_complete(rq))
- blk_mq_complete_request(rq, error);
-}
-
-static void blk_mq_end_io_remote(void *data)
+static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
- __blk_mq_end_io(rq, rq->errors);
+ rq->q->softirq_done_fn(rq);
}
-/*
- * End IO on this request on a multiqueue enabled driver. We'll either do
- * it directly inline, or punt to a local IPI handler on the matching
- * remote CPU.
- */
-void blk_mq_end_io(struct request *rq, int error)
+void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
int cpu;
- if (!ctx->ipi_redirect)
- return __blk_mq_end_io(rq, error);
+ if (!ctx->ipi_redirect) {
+ rq->q->softirq_done_fn(rq);
+ return;
+ }
cpu = get_cpu();
if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
- rq->errors = error;
- rq->csd.func = blk_mq_end_io_remote;
+ rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
__smp_call_function_single(ctx->cpu, &rq->csd, 0);
} else {
- __blk_mq_end_io(rq, error);
+ rq->q->softirq_done_fn(rq);
}
put_cpu();
}
-EXPORT_SYMBOL(blk_mq_end_io);
+
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions.
+ * The actual completion happens out-of-order, through a IPI handler.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+ if (unlikely(blk_should_fake_timeout(rq->q)))
+ return;
+ if (!blk_mark_rq_complete(rq))
+ __blk_mq_complete_request(rq);
+}
+EXPORT_SYMBOL(blk_mq_complete_request);
static void blk_mq_start_request(struct request *rq)
{
@@ -1399,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
if (reg->timeout)
blk_queue_rq_timeout(q, reg->timeout);
+ if (reg->ops->complete)
+ blk_queue_softirq_done(q, reg->ops->complete);
+
blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, reg->nr_hw_queues);