summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-09-13 16:40:11 -0700
committerJens Axboe <axboe@fb.com>2014-09-22 12:00:07 -0600
commit81481eb423c295c5480a3fab9bb961cf286c91e7 (patch)
tree3d4f0eaed9f784ba5beb41aa9fec3e76b867c1e8 /block/blk-mq-tag.c
parentc8a446ad695ada43a885ec12b38411dbd190a11b (diff)
downloadlinux-81481eb423c295c5480a3fab9bb961cf286c91e7.tar.bz2
blk-mq: fix and simplify tag iteration for the timeout handler
Don't do a kmalloc from timer to handle timeouts, chances are we could be under heavy load or similar and thus just miss out on the timeouts. Fortunately it is very easy to just iterate over all in use tags, and doing this properly actually cleans up the blk_mq_busy_iter API as well, and prepares us for the next patch by passing a reserved argument to the iterator. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c44
1 files changed, 18 insertions, 26 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index c1b92426c95e..b08788086414 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -392,45 +392,37 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
__blk_mq_put_reserved_tag(tags, tag);
}
-static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
- unsigned long *free_map, unsigned int off)
+static void bt_for_each(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_bitmap_tags *bt, unsigned int off,
+ busy_iter_fn *fn, void *data, bool reserved)
{
- int i;
+ struct request *rq;
+ int bit, i;
for (i = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
- int bit = 0;
-
- do {
- bit = find_next_zero_bit(&bm->word, bm->depth, bit);
- if (bit >= bm->depth)
- break;
- __set_bit(bit + off, free_map);
- bit++;
- } while (1);
+ for (bit = find_first_bit(&bm->word, bm->depth);
+ bit < bm->depth;
+ bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
+ rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
+ if (rq->q == hctx->queue)
+ fn(hctx, rq, data, reserved);
+ }
off += (1 << bt->bits_per_word);
}
}
-void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
- void (*fn)(void *, unsigned long *), void *data)
+void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+ void *priv)
{
- unsigned long *tag_map;
- size_t map_size;
-
- map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
- tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
- if (!tag_map)
- return;
+ struct blk_mq_tags *tags = hctx->tags;
- bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
if (tags->nr_reserved_tags)
- bt_for_each_free(&tags->breserved_tags, tag_map, 0);
-
- fn(data, tag_map);
- kfree(tag_map);
+ bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
+ bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
+ false);
}
EXPORT_SYMBOL(blk_mq_tag_busy_iter);