summaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c106
1 files changed, 62 insertions, 44 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a19f58c6fc3a..4553245d9317 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -10,6 +10,7 @@
#include <linux/bio.h>
#include <linux/blktrace_api.h>
#include "blk-cgroup.h"
+#include "blk.h"
/* Max dispatch from a group in 1 round */
static int throtl_grp_quantum = 8;
@@ -302,16 +303,16 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
return tg;
}
-/*
- * This function returns with queue lock unlocked in case of error, like
- * request queue is no more
- */
static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
{
struct throtl_grp *tg = NULL, *__tg = NULL;
struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
+ /* no throttling for dead queue */
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+ return NULL;
+
rcu_read_lock();
blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
@@ -323,32 +324,22 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc
- *
- * Take the request queue reference to make sure queue does not
- * go away once we return from allocation.
+ * we need to drop rcu lock and queue_lock before we call alloc.
*/
- blk_get_queue(q);
rcu_read_unlock();
spin_unlock_irq(q->queue_lock);
tg = throtl_alloc_tg(td);
- /*
- * We might have slept in group allocation. Make sure queue is not
- * dead
- */
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
- blk_put_queue(q);
- if (tg)
- kfree(tg);
-
- return ERR_PTR(-ENODEV);
- }
- blk_put_queue(q);
/* Group allocated and queue is still alive. take the lock */
spin_lock_irq(q->queue_lock);
+ /* Make sure @q is still alive */
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+ kfree(tg);
+ return NULL;
+ }
+
/*
* Initialize the new group. After sleeping, read the blkcg again.
*/
@@ -1014,11 +1005,6 @@ static void throtl_release_tgs(struct throtl_data *td)
}
}
-static void throtl_td_free(struct throtl_data *td)
-{
- kfree(td);
-}
-
/*
* Blk cgroup controller notification saying that blkio_group object is being
* delinked as associated cgroup object is going away. That also means that
@@ -1123,17 +1109,17 @@ static struct blkio_policy_type blkio_policy_throtl = {
.plid = BLKIO_POLICY_THROTL,
};
-int blk_throtl_bio(struct request_queue *q, struct bio **biop)
+bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
struct throtl_data *td = q->td;
struct throtl_grp *tg;
- struct bio *bio = *biop;
bool rw = bio_data_dir(bio), update_disptime = true;
struct blkio_cgroup *blkcg;
+ bool throttled = false;
if (bio->bi_rw & REQ_THROTTLED) {
bio->bi_rw &= ~REQ_THROTTLED;
- return 0;
+ goto out;
}
/*
@@ -1152,7 +1138,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
rw, rw_is_sync(bio->bi_rw));
rcu_read_unlock();
- return 0;
+ goto out;
}
}
rcu_read_unlock();
@@ -1161,18 +1147,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
-
spin_lock_irq(q->queue_lock);
tg = throtl_get_tg(td);
-
- if (IS_ERR(tg)) {
- if (PTR_ERR(tg) == -ENODEV) {
- /*
- * Queue is gone. No queue lock held here.
- */
- return -ENODEV;
- }
- }
+ if (unlikely(!tg))
+ goto out_unlock;
if (tg->nr_queued[rw]) {
/*
@@ -1200,7 +1178,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
* So keep on trimming slice even if bio is not queued.
*/
throtl_trim_slice(td, tg, rw);
- goto out;
+ goto out_unlock;
}
queue_bio:
@@ -1212,16 +1190,52 @@ queue_bio:
tg->nr_queued[READ], tg->nr_queued[WRITE]);
throtl_add_bio_tg(q->td, tg, bio);
- *biop = NULL;
+ throttled = true;
if (update_disptime) {
tg_update_disptime(td, tg);
throtl_schedule_next_dispatch(td);
}
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
out:
+ return throttled;
+}
+
+/**
+ * blk_throtl_drain - drain throttled bios
+ * @q: request_queue to drain throttled bios for
+ *
+ * Dispatch all currently throttled bios on @q through ->make_request_fn().
+ */
+void blk_throtl_drain(struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
+{
+ struct throtl_data *td = q->td;
+ struct throtl_rb_root *st = &td->tg_service_tree;
+ struct throtl_grp *tg;
+ struct bio_list bl;
+ struct bio *bio;
+
+ WARN_ON_ONCE(!queue_is_locked(q));
+
+ bio_list_init(&bl);
+
+ while ((tg = throtl_rb_first(st))) {
+ throtl_dequeue_tg(td, tg);
+
+ while ((bio = bio_list_peek(&tg->bio_lists[READ])))
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+ while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+ }
spin_unlock_irq(q->queue_lock);
- return 0;
+
+ while ((bio = bio_list_pop(&bl)))
+ generic_make_request(bio);
+
+ spin_lock_irq(q->queue_lock);
}
int blk_throtl_init(struct request_queue *q)
@@ -1296,7 +1310,11 @@ void blk_throtl_exit(struct request_queue *q)
* it.
*/
throtl_shutdown_wq(q);
- throtl_td_free(td);
+}
+
+void blk_throtl_release(struct request_queue *q)
+{
+ kfree(q->td);
}
static int __init throtl_init(void)