summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-12-02 12:43:46 -0700
committerJens Axboe <axboe@kernel.dk>2021-12-02 19:39:01 -0700
commita08ed9aae8a3d2321ef378d6581cc87a3fb75b44 (patch)
treedf0fe69900a59fa4f9e3098c19b7e82f010d3e7f /block
parent373b5416b4b03ebda5d8f0605b81eff0dc76ebcf (diff)
downloadlinux-a08ed9aae8a3d2321ef378d6581cc87a3fb75b44.tar.bz2
block: fix double bio queue when merging in cached request path
When we attempt to merge off the cached request path, we return NULL if successful. This makes the caller believe that it's should allocate a new request, and hence we end up with the bio both merged and associated with a new request. This, predictably, leads to all sorts of crashes. Pass in a pointer to the bio pointer, and clear it for the merge case. Then the caller knows that the bio is already queued, and no new requests need to get allocated. Fixes: 5b13bc8a3fd5 ("blk-mq: cleanup request allocation") Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ca33cb755c5f..fc4520e992b1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2731,7 +2731,7 @@ queue_exit:
}
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
- struct blk_plug *plug, struct bio *bio, unsigned int nsegs)
+ struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
{
struct request *rq;
@@ -2741,19 +2741,21 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
if (!rq || rq->q != q)
return NULL;
- if (unlikely(!submit_bio_checks(bio)))
+ if (unlikely(!submit_bio_checks(*bio)))
return NULL;
- if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+ if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
+ *bio = NULL;
return NULL;
- if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
+ }
+ if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
return NULL;
- if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+ if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;
- rq->cmd_flags = bio->bi_opf;
+ rq->cmd_flags = (*bio)->bi_opf;
plug->cached_rq = rq_list_next(rq);
INIT_LIST_HEAD(&rq->queuelist);
- rq_qos_throttle(q, bio);
+ rq_qos_throttle(q, *bio);
return rq;
}
@@ -2789,8 +2791,10 @@ void blk_mq_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return;
- rq = blk_mq_get_cached_request(q, plug, bio, nr_segs);
+ rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
if (!rq) {
+ if (!bio)
+ return;
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq))
return;