diff options
author | Omar Sandoval <osandov@fb.com> | 2017-02-02 15:42:39 -0800 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-02-02 16:57:56 -0700 |
commit | 0cacba6cf8252438f8166bd3fa1c3370dd28a769 (patch) | |
tree | c2b1dc91a0dadcc07be4e2fded65622d176c2720 /block | |
parent | e17354961bb50931ec7b33f59c0713dcf98ac7d2 (diff) | |
download | linux-0cacba6cf8252438f8166bd3fa1c3370dd28a769.tar.bz2 |
blk-mq-sched: bypass the scheduler for flushes entirely
There's a weird inconsistency that flushes are mostly hidden from the
scheduler, but it needs to be aware of them in ->insert_requests().
Instead of having every scheduler call blk_mq_sched_bypass_insert(),
let's do it in the common framework.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-sched.c | 25 | ||||
-rw-r--r-- | block/blk-mq-sched.h | 1 | ||||
-rw-r--r-- | block/mq-deadline.c | 3 |
3 files changed, 23 insertions, 6 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 114814ec3d49..3ec52f494094 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -289,7 +289,8 @@ void blk_mq_sched_request_inserted(struct request *rq) } EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); -bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq) +static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, + struct request *rq) { if (rq->tag == -1) { rq->rq_flags |= RQF_SORTED; @@ -305,7 +306,6 @@ bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq) spin_unlock(&hctx->lock); return true; } -EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert); static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) { @@ -363,6 +363,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, return; } + if (e && blk_mq_sched_bypass_insert(hctx, rq)) + goto run; + if (e && e->type->ops.mq.insert_requests) { LIST_HEAD(list); @@ -374,6 +377,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, spin_unlock(&ctx->lock); } +run: if (run_queue) blk_mq_run_hw_queue(hctx, async); } @@ -385,6 +389,23 @@ void blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct elevator_queue *e = hctx->queue->elevator; + if (e) { + struct request *rq, *next; + + /* + * We bypass requests that already have a driver tag assigned, + * which should only be flushes. Flushes are only ever inserted + * as single requests, so we shouldn't ever hit the + * WARN_ON_ONCE() below (but let's handle it just in case). + */ + list_for_each_entry_safe(rq, next, list, queuelist) { + if (WARN_ON_ONCE(rq->tag != -1)) { + list_del_init(&rq->queuelist); + blk_mq_sched_bypass_insert(hctx, rq); + } + } + } + if (e && e->type->ops.mq.insert_requests) e->type->ops.mq.insert_requests(hctx, list, false); else diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 9478aaeb48c5..add5f090a8cd 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -15,7 +15,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bi void blk_mq_sched_put_request(struct request *rq); void blk_mq_sched_request_inserted(struct request *rq); -bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq); bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 49583536698c..8f91f21e8663 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -395,9 +395,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_mq_sched_request_inserted(rq); - if (blk_mq_sched_bypass_insert(hctx, rq)) - return; - if (at_head || blk_rq_is_passthrough(rq)) { if (at_head) list_add(&rq->queuelist, &dd->dispatch); |