summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2019-07-01 08:47:29 -0700
committerJens Axboe <axboe@kernel.dk>2019-07-02 21:03:27 -0600
commitc05f42206f4de12b6807270fc669b45472f1bdb7 (patch)
tree82aaf5161958336f8ef635f9f7a8daaeddad7f64 /block/blk-mq-sched.c
parent417232880c8a646739dbf4666a231505a1917fcb (diff)
downloadlinux-c05f42206f4de12b6807270fc669b45472f1bdb7.tar.bz2
blk-mq: remove blk_mq_put_ctx()
No code that occurs between blk_mq_get_ctx() and blk_mq_put_ctx() depends on preemption being disabled for its correctness. Since removing the CPU preemption calls does not measurably affect performance, simplify the blk-mq code by removing the blk_mq_put_ctx() function and also by not disabling preemption in blk_mq_get_ctx(). Cc: Hannes Reinecke <hare@suse.com> Cc: Omar Sandoval <osandov@fb.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 956a7aa9a637..c9d183d6c499 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -330,10 +330,8 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
bool ret = false;
enum hctx_type type;
- if (e && e->type->ops.bio_merge) {
- blk_mq_put_ctx(ctx);
+ if (e && e->type->ops.bio_merge)
return e->type->ops.bio_merge(hctx, bio, nr_segs);
- }
type = hctx->type;
if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
@@ -344,7 +342,6 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
spin_unlock(&ctx->lock);
}
- blk_mq_put_ctx(ctx);
return ret;
}