diff options
author | Christoph Hellwig <hch@lst.de> | 2017-06-16 18:15:26 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-06-18 10:08:55 -0600 |
commit | 5bbf4e5a8e3a780874b2ed77bd1bd57850f3f6da (patch) | |
tree | 06835c621f167a8cc563d57a6d6e29d127078967 /block/blk-mq.c | |
parent | 44e8c2bff80bb384a608406009948f90a78bf8a3 (diff) | |
download | linux-5bbf4e5a8e3a780874b2ed77bd1bd57850f3f6da.tar.bz2 |
blk-mq-sched: unify request prepare methods
This patch makes sure we always allocate requests in the core blk-mq
code and use a common prepare_request method to initialize them for
both mq I/O schedulers. For Kyber and additional limit_depth method
is added that is called before allocating the request.
Also because none of the intializations can really fail the new method
does not return an error - instead the bfq finish method is hardened
to deal with the no-IOC case.
Last but not least this removes the abuse of RQF_QUEUE by the blk-mq
scheduling code as RQF_ELFPRIV is all that is needed now.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 22 |
1 files changed, 6 insertions, 16 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 2f380ab7a603..81d05c19d4b3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -298,16 +298,11 @@ static struct request *blk_mq_get_request(struct request_queue *q, * Flush requests are special and go directly to the * dispatch list. */ - if (!op_is_flush(op) && e->type->ops.mq.get_request) { - rq = e->type->ops.mq.get_request(q, op, data); - if (rq) - rq->rq_flags |= RQF_QUEUED; - goto allocated; - } + if (!op_is_flush(op) && e->type->ops.mq.limit_depth) + e->type->ops.mq.limit_depth(op, data); } rq = __blk_mq_alloc_request(data, op); -allocated: if (!rq) { blk_queue_exit(q); return NULL; @@ -315,17 +310,12 @@ allocated: if (!op_is_flush(op)) { rq->elv.icq = NULL; - if (e && e->type->ops.mq.get_rq_priv) { + if (e && e->type->ops.mq.prepare_request) { if (e->type->icq_cache && rq_ioc(bio)) blk_mq_sched_assign_ioc(rq, bio); - if (e->type->ops.mq.get_rq_priv(q, rq, bio)) { - if (rq->elv.icq) - put_io_context(rq->elv.icq->ioc); - rq->elv.icq = NULL; - } else { - rq->rq_flags |= RQF_ELVPRIV; - } + e->type->ops.mq.prepare_request(rq, bio); + rq->rq_flags |= RQF_ELVPRIV; } } data->hctx->queued++; @@ -413,7 +403,7 @@ void blk_mq_free_request(struct request *rq) struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); const int sched_tag = rq->internal_tag; - if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) { + if (rq->rq_flags & RQF_ELVPRIV) { if (e && e->type->ops.mq.finish_request) e->type->ops.mq.finish_request(rq); if (rq->elv.icq) { |