summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>2009-01-07 18:08:30 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 08:31:09 -0800
commit42e9abb628def2c335a4ecf130bb6c88d916d885 (patch)
treee626dcc9d8fff5834c419e39dce57bed765aa09a /mm
parent7f4d454dee2e0bdd21bafd413d1c53e443a26540 (diff)
downloadlinux-42e9abb628def2c335a4ecf130bb6c88d916d885.tar.bz2
memcg: change try_to_free_pages to hierarchical_reclaim
mem_cgroup_hierarchicl_reclaim() works properly even when !use_hierarchy now (by memcg-hierarchy-avoid-unnecessary-reclaim.patch), so, instead of try_to_free_mem_cgroup_pages(), it should be used in many cases. The only exception is force_empty. The group has no children in this case. Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 861037070f66..a7ecf23150c5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1430,8 +1430,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
rcu_read_unlock();
do {
- progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true,
- get_swappiness(mem));
+ progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
progress += mem_cgroup_check_under_limit(mem);
} while (!progress && --retry);
@@ -1475,10 +1474,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
if (!ret)
break;
- progress = try_to_free_mem_cgroup_pages(memcg,
- GFP_KERNEL,
- false,
- get_swappiness(memcg));
+ progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
+ false);
if (!progress) retry_count--;
}
@@ -1519,8 +1516,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
break;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
- try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true,
- get_swappiness(memcg));
+ mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
if (curusage >= oldusage)
retry_count--;