summaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2018-07-03 11:15:03 -0400
committerJens Axboe <axboe@kernel.dk>2018-07-09 09:07:54 -0600
commitca47e8c72ae141587cabf3dab693f6754d8c416b (patch)
tree92d23a5608cdfa17635fe78da7b979704e61eed5 /mm/readahead.c
parentb351f0c76c3eb94c9ccfb68d0b23899a35e47f27 (diff)
downloadlinux-ca47e8c72ae141587cabf3dab693f6754d8c416b.tar.bz2
mm: skip readahead if the cgroup is congested
We noticed in testing we'd get pretty bad latency stalls under heavy pressure because read ahead would try to do its thing while the cgroup was under severe pressure. If we're under this much pressure we want to do as little IO as possible so we can still make progress on real work if we're a throttled cgroup, so just skip readahead if our group is under pressure. Signed-off-by: Josef Bacik <jbacik@fb.com> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index e273f0de3376..9f62b7151100 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -19,6 +19,7 @@
#include <linux/syscalls.h>
#include <linux/file.h>
#include <linux/mm_inline.h>
+#include <linux/blk-cgroup.h>
#include "internal.h"
@@ -505,6 +506,9 @@ void page_cache_sync_readahead(struct address_space *mapping,
if (!ra->ra_pages)
return;
+ if (blk_cgroup_congested())
+ return;
+
/* be dumb */
if (filp && (filp->f_mode & FMODE_RANDOM)) {
force_page_cache_readahead(mapping, filp, offset, req_size);
@@ -555,6 +559,9 @@ page_cache_async_readahead(struct address_space *mapping,
if (inode_read_congested(mapping->host))
return;
+ if (blk_cgroup_congested())
+ return;
+
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
}