summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-11-03 05:49:07 -0600
committerJens Axboe <axboe@kernel.dk>2021-11-04 12:50:46 -0600
commitc5fc7b93173661336b9cc4e32fd5082a95e12b94 (patch)
tree81f272f19dd6cc5c3be7b6f82a9b1c2951a4e372
parent3b87c6ea671a18fb77709240d658f4201904f8e4 (diff)
downloadlinux-c5fc7b93173661336b9cc4e32fd5082a95e12b94.tar.bz2
block: have plug stored requests hold references to the queue
Requests that were stored in the cache deliberately didn't hold an enter reference to the queue, instead we grabbed one every time we pulled a request out of there. That made for awkward logic on freeing the remainder of the cached list, if needed, where we had to artificially raise the queue usage count before each free. Grab references up front for cached plug requests. That's safer, and also more efficient. Fixes: 47c122e35d7e ("block: pre-allocate requests if plug is started and is a batch") Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-mq.c7
2 files changed, 11 insertions, 4 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fd389a16013c..35a87c06276e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1643,7 +1643,13 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
flush_plug_callbacks(plug, from_schedule);
if (!rq_list_empty(plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
- if (unlikely(!from_schedule && plug->cached_rq))
+ /*
+ * Unconditionally flush out cached requests, even if the unplug
+ * event came from schedule. Since we know hold references to the
+ * queue for cached requests, we don't want a blocked task holding
+ * up a queue freeze/quiesce event.
+ */
+ if (unlikely(!rq_list_empty(plug->cached_rq)))
blk_mq_free_plug_rqs(plug);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c68aa0a332e1..5498454c2164 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -410,7 +410,10 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
tag_mask &= ~(1UL << i);
rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
rq_list_add(data->cached_rq, rq);
+ nr++;
}
+ /* caller already holds a reference, add for remainder */
+ percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
data->nr_tags -= nr;
return rq_list_pop(data->cached_rq);
@@ -630,10 +633,8 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
{
struct request *rq;
- while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) {
- percpu_ref_get(&rq->q->q_usage_counter);
+ while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
blk_mq_free_request(rq);
- }
}
static void req_bio_endio(struct request *rq, struct bio *bio,