summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 14:55:01 -0700
committerJens Axboe <axboe@fb.com>2015-08-18 15:49:16 -0700
commitd93a11f1cd890d4ea72f7cef75fac56801b099b3 (patch)
treeb555a4ea359e72e0138590c72c11ece42c83eecb /block
parent563180a44b7d7978f44e9776eedfbbc550c2398d (diff)
downloadlinux-d93a11f1cd890d4ea72f7cef75fac56801b099b3.tar.bz2
blkcg, cfq-iosched: use GFP_NOWAIT instead of GFP_ATOMIC for non-critical allocations
blkcg performs several allocations to track IOs per cgroup and enforce resource control. Most of these allocations are performed lazily on demand in the IO path and thus can't involve reclaim path. Currently, these allocations use GFP_ATOMIC; however, blkcg can gracefully deal with occassional failures of these allocations by punting IOs to the root cgroup and there's no reason to reach into the emergency reserve. This patch replaces GFP_ATOMIC with GFP_NOWAIT for the following allocations. * bdi_writeback_congested and blkcg_gq allocations in blkg_create(). * radix tree node allocations for blkcg->blkg_tree. * cfq_queue allocation on ioprio changes. Signed-off-by: Tejun Heo <tj@kernel.org> Suggested-and-Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Suggested-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c8
-rw-r--r--block/cfq-iosched.c2
2 files changed, 5 insertions, 5 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d6283b3f5db5..1db904f95502 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -188,7 +188,7 @@ EXPORT_SYMBOL_GPL(blkg_lookup);
/*
* If @new_blkg is %NULL, this function tries to allocate a new one as
- * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
+ * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
*/
static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct request_queue *q,
@@ -208,7 +208,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
}
wb_congested = wb_congested_get_create(&q->backing_dev_info,
- blkcg->css.id, GFP_ATOMIC);
+ blkcg->css.id, GFP_NOWAIT);
if (!wb_congested) {
ret = -ENOMEM;
goto err_put_css;
@@ -216,7 +216,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* allocate */
if (!new_blkg) {
- new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
+ new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_congested;
@@ -882,7 +882,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
done:
spin_lock_init(&blkcg->lock);
- INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index bf6f49cca311..5f119292a254 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3575,7 +3575,7 @@ static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
cfqq = cic_to_cfqq(cic, false);
if (cfqq) {
cfq_put_queue(cfqq);
- cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, GFP_ATOMIC);
+ cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, GFP_NOWAIT);
cic_set_cfqq(cic, cfqq, false);
}