diff options
author | Ming Lei <ming.lei@redhat.com> | 2022-06-16 09:44:00 +0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-06-16 14:45:15 -0600 |
commit | 4d337cebcb1c27d9b48c48b9a98e939d4552d584 (patch) | |
tree | 145de66b189943b28e6b089d4345459a7d22d2c6 /block/blk-mq.c | |
parent | 5fd7a84a09e640016fe106dd3e992f5210e23dc7 (diff) | |
download | linux-4d337cebcb1c27d9b48c48b9a98e939d4552d584.tar.bz2 |
blk-mq: avoid to touch q->elevator without any protection
q->elevator is referred in blk_mq_has_sqsched() without any protection,
no .q_usage_counter is held, no queue srcu and rcu read lock is held,
so potential use-after-free may be triggered.
Fix the issue by adding one queue flag for checking if the elevator
uses single queue style dispatch. Meantime the elevator feature flag
of ELEVATOR_F_MQ_AWARE isn't needed any more.
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220616014401.817001-3-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 18 |
1 files changed, 2 insertions, 16 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index c13d03b2e17c..4cdb08a70912 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2143,20 +2143,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) EXPORT_SYMBOL(blk_mq_run_hw_queue); /* - * Is the request queue handled by an IO scheduler that does not respect - * hardware queues when dispatching? - */ -static bool blk_mq_has_sqsched(struct request_queue *q) -{ - struct elevator_queue *e = q->elevator; - - if (e && e->type->ops.dispatch_request && - !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE)) - return true; - return false; -} - -/* * Return prefered queue to dispatch from (if any) for non-mq aware IO * scheduler. */ @@ -2188,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async) unsigned long i; sq_hctx = NULL; - if (blk_mq_has_sqsched(q)) + if (blk_queue_sq_sched(q)) sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) @@ -2216,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) unsigned long i; sq_hctx = NULL; - if (blk_mq_has_sqsched(q)) + if (blk_queue_sq_sched(q)) sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) |