summaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-18 15:37:27 -0700
committerJens Axboe <axboe@fb.com>2017-01-18 15:37:27 -0700
commit38dbb7dd4db184da4d2673f4bb963f7006465c37 (patch)
tree7d34184c97b07d1aed08731fff17dad9efbbcdf1 /block/blk-cgroup.c
parent6c0ca7ae292adea09b8bdd33a524bb9326c3e989 (diff)
downloadlinux-38dbb7dd4db184da4d2673f4bb963f7006465c37.tar.bz2
blk-cgroup: don't quiesce the queue on policy activate/deactivate
There's no potential harm in quiescing the queue, but it also doesn't buy us anything. And we can't run the queue async for policy deactivate, since we could be in the path of tearing the queue down. If we schedule an async run of the queue at that time, we're racing with queue teardown AFTER having we've already torn most of it down. Reported-by: Omar Sandoval <osandov@fb.com> Fixes: 4d199c6f1c84 ("blk-cgroup: ensure that we clear the stop bit on quiesced queues") Tested-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index efb97ec37eee..fb59a3edc778 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1223,10 +1223,9 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol))
return 0;
- if (q->mq_ops) {
+ if (q->mq_ops)
blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
- } else
+ else
blk_queue_bypass_start(q);
pd_prealloc:
if (!pd_prealloc) {
@@ -1265,10 +1264,9 @@ pd_prealloc:
spin_unlock_irq(q->queue_lock);
out_bypass_end:
- if (q->mq_ops) {
+ if (q->mq_ops)
blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
- } else
+ else
blk_queue_bypass_end(q);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
@@ -1292,10 +1290,9 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol))
return;
- if (q->mq_ops) {
+ if (q->mq_ops)
blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
- } else
+ else
blk_queue_bypass_start(q);
spin_lock_irq(q->queue_lock);
@@ -1318,10 +1315,9 @@ void blkcg_deactivate_policy(struct request_queue *q,
spin_unlock_irq(q->queue_lock);
- if (q->mq_ops) {
+ if (q->mq_ops)
blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
- } else
+ else
blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);