summaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 14:55:03 -0700
committerJens Axboe <axboe@fb.com>2015-08-18 15:49:16 -0700
commit322731ed0dd2d8a7f11307e0444257f48580a0de (patch)
tree536ae950d92b337f3896cb37acc8d3bf5c05ac14 /block/cfq-iosched.c
parent2da8de0bb799bf2bdfa893e5a1e294eb6bafba62 (diff)
downloadlinux-322731ed0dd2d8a7f11307e0444257f48580a0de.tar.bz2
cfq-iosched: move cfq_group determination from cfq_find_alloc_queue() to cfq_get_queue()
This is necessary for making async cfq_cgroups per-cfq_group instead of per-cfq_data. While this change makes cfq_get_queue() perform RCU locking and look up cfq_group even when it reuses async queue, the extra overhead is extremely unlikely to be noticeable given that this is already sitting behind cic->cfqq[] cache and the overall cost of cfq operation. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Arianna Avanzini <avanzini.arianna@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c28
1 files changed, 12 insertions, 16 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 146b03d64b7e..f3ea8a198def 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3641,21 +3641,10 @@ static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) {
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
- struct bio *bio)
+cfq_find_alloc_queue(struct cfq_data *cfqd, struct cfq_group *cfqg, bool is_sync,
+ struct cfq_io_cq *cic, struct bio *bio)
{
- struct blkcg *blkcg;
struct cfq_queue *cfqq;
- struct cfq_group *cfqg;
-
- rcu_read_lock();
-
- blkcg = bio_blkcg(bio);
- cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
- if (!cfqg) {
- cfqq = &cfqd->oom_cfqq;
- goto out;
- }
cfqq = cic_to_cfqq(cic, is_sync);
@@ -3675,8 +3664,6 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
} else
cfqq = &cfqd->oom_cfqq;
}
-out:
- rcu_read_unlock();
return cfqq;
}
@@ -3706,6 +3693,14 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq;
struct cfq_queue *cfqq;
+ struct cfq_group *cfqg;
+
+ rcu_read_lock();
+ cfqg = cfq_lookup_create_cfqg(cfqd, bio_blkcg(bio));
+ if (!cfqg) {
+ cfqq = &cfqd->oom_cfqq;
+ goto out;
+ }
if (!is_sync) {
if (!ioprio_valid(cic->ioprio)) {
@@ -3719,7 +3714,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
goto out;
}
- cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio);
+ cfqq = cfq_find_alloc_queue(cfqd, cfqg, is_sync, cic, bio);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -3730,6 +3725,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
}
out:
cfqq->ref++;
+ rcu_read_unlock();
return cfqq;
}