summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTahsin Erdogan <tahsin@google.com>2017-01-31 22:36:50 -0800
committerJens Axboe <axboe@fb.com>2017-02-01 15:31:22 -0700
commitbbfc3c5d6c7882dc65c1230e781644e35c29839f (patch)
treed7ccecb7ef61bac4a6d5f93c703ad2df65c77a2a /block/blk-core.c
parent5fad1b64aed8bd63ca7da2ba92107ba9ecd9a2c8 (diff)
downloadlinux-bbfc3c5d6c7882dc65c1230e781644e35c29839f.tar.bz2
block: queue lock must be acquired when iterating over rls
blk_set_queue_dying() does not acquire queue lock before it calls blk_queue_for_each_rl(). This allows a racing blkg_destroy() to remove blkg->q_node from the linked list and have blk_queue_for_each_rl() loop infitely over the removed blkg->q_node list node. Signed-off-by: Tahsin Erdogan <tahsin@google.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 02833ce5664e..b2df55a65250 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -527,12 +527,14 @@ void blk_set_queue_dying(struct request_queue *q)
else {
struct request_list *rl;
+ spin_lock_irq(q->queue_lock);
blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) {
wake_up(&rl->wait[BLK_RW_SYNC]);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
+ spin_unlock_irq(q->queue_lock);
}
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);