summaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2010-10-01 14:49:48 +0200
committerJens Axboe <jaxboe@fusionio.com>2010-10-01 14:49:48 +0200
commit02977e4af7ed3b478c505e50491ffdf3e1314cf4 (patch)
tree5adb947a5c8567cbbff79459e9feaccf354fd81f /block/blk-throttle.c
parent61014e96e6ed55b8db0af31574eec2a75d4e8755 (diff)
downloadlinux-02977e4af7ed3b478c505e50491ffdf3e1314cf4.tar.bz2
blkio: Add root group to td->tg_list
o Currently all the dynamically allocated groups, except root grp is added to td->tg_list. This was not a problem so far but in next patch I will travel through td->tg_list to process any updates of limits on the group. If root group is not in tg_list, then root group's updates are not processed. o It is better to root group also to tg_list instead of doing special processing for it during limit updates. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index af53f37c1b13..bc2936b80add 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -87,7 +87,7 @@ struct throtl_data
unsigned int nr_queued[2];
/*
- * number of total undestroyed groups (excluding root group)
+ * number of total undestroyed groups
*/
unsigned int nr_undestroyed_grps;
@@ -940,7 +940,17 @@ int blk_throtl_init(struct request_queue *q)
/* Practically unlimited BW */
tg->bps[0] = tg->bps[1] = -1;
tg->iops[0] = tg->iops[1] = -1;
- atomic_set(&tg->ref, 1);
+
+ /*
+ * Set root group reference to 2. One reference will be dropped when
+ * all groups on tg_list are being deleted during queue exit. Other
+ * reference will remain there as we don't want to delete this group
+ * as it is statically allocated and gets destroyed when throtl_data
+ * goes away.
+ */
+ atomic_set(&tg->ref, 2);
+ hlist_add_head(&tg->tg_node, &td->tg_list);
+ td->nr_undestroyed_grps++;
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
@@ -966,10 +976,9 @@ void blk_throtl_exit(struct request_queue *q)
spin_lock_irq(q->queue_lock);
throtl_release_tgs(td);
- blkiocg_del_blkio_group(&td->root_tg.blkg);
/* If there are other groups */
- if (td->nr_undestroyed_grps >= 1)
+ if (td->nr_undestroyed_grps > 0)
wait = true;
spin_unlock_irq(q->queue_lock);