summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-12-15 08:30:26 -0700
committerJens Axboe <axboe@fb.com>2014-12-15 08:30:26 -0700
commit35d37c66356eed46700e0d5db87211844d43a241 (patch)
tree6bfdbddaf04501ac9edcf6c526e1e54dd00444b8 /block/blk-mq-tag.c
parent67e2c3883828b39548cee2091b36656787775d95 (diff)
downloadlinux-35d37c66356eed46700e0d5db87211844d43a241.tar.bz2
Revert "blk-mq: Micro-optimize bt_get()"
This reverts commit 52f7eb945f2ba62b324bb9ae16d945326a961dcf. The optimization is only really safe for a single queue, otherwise 'bs' and 'bt' can indeed change, and if we don't do a finish_wait() for each loop, we'll potentially change the wait structure and corrupt task wait list. Reported-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e3d4e4043b49..32e8dbb9ad1c 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -248,8 +248,8 @@ static int bt_get(struct blk_mq_alloc_data *data,
if (!(data->gfp & __GFP_WAIT))
return -1;
+ bs = bt_wait_ptr(bt, hctx);
do {
- bs = bt_wait_ptr(bt, hctx);
prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
tag = __bt_get(hctx, bt, last_tag);
@@ -285,6 +285,8 @@ static int bt_get(struct blk_mq_alloc_data *data,
hctx = data->hctx;
bt = &hctx->tags->bitmap_tags;
}
+ finish_wait(&bs->wait, &wait);
+ bs = bt_wait_ptr(bt, hctx);
} while (1);
finish_wait(&bs->wait, &wait);