summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>2010-05-26 14:42:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 09:12:43 -0700
commit90254a65833b67502d14736410b3857a15535c67 (patch)
treee03ec501b4a585c0d112469f36100ec41a715345 /mm/memcontrol.c
parent3c11ecf448eff8f12922c498b8274ce98587eb74 (diff)
downloadlinux-90254a65833b67502d14736410b3857a15535c67.tar.bz2
memcg: clean up move charge
This patch cleans up move charge code by: - define functions to handle pte for each types, and make is_target_pte_for_mc() cleaner. - instead of checking the MOVE_CHARGE_TYPE_ANON bit, define a function that checks the bit. Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c96
1 files changed, 59 insertions, 37 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 53eb30ebdb49..e5277e8a42a8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -266,6 +266,12 @@ static struct move_charge_struct {
.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
+static bool move_anon(void)
+{
+ return test_bit(MOVE_CHARGE_TYPE_ANON,
+ &mc.to->move_charge_at_immigrate);
+}
+
/*
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
* limit reclaim to prevent infinite loops, if they ever occur.
@@ -4162,50 +4168,66 @@ enum mc_target_type {
MC_TARGET_SWAP,
};
-static int is_target_pte_for_mc(struct vm_area_struct *vma,
- unsigned long addr, pte_t ptent, union mc_target *target)
+static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent)
{
- struct page *page = NULL;
- struct page_cgroup *pc;
- int ret = 0;
- swp_entry_t ent = { .val = 0 };
- int usage_count = 0;
- bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
- &mc.to->move_charge_at_immigrate);
+ struct page *page = vm_normal_page(vma, addr, ptent);
- if (!pte_present(ptent)) {
- /* TODO: handle swap of shmes/tmpfs */
- if (pte_none(ptent) || pte_file(ptent))
- return 0;
- else if (is_swap_pte(ptent)) {
- ent = pte_to_swp_entry(ptent);
- if (!move_anon || non_swap_entry(ent))
- return 0;
- usage_count = mem_cgroup_count_swap_user(ent, &page);
- }
- } else {
- page = vm_normal_page(vma, addr, ptent);
- if (!page || !page_mapped(page))
- return 0;
+ if (!page || !page_mapped(page))
+ return NULL;
+ if (PageAnon(page)) {
+ /* we don't move shared anon */
+ if (!move_anon() || page_mapcount(page) > 2)
+ return NULL;
+ } else
/*
* TODO: We don't move charges of file(including shmem/tmpfs)
* pages for now.
*/
- if (!move_anon || !PageAnon(page))
- return 0;
- if (!get_page_unless_zero(page))
- return 0;
- usage_count = page_mapcount(page);
- }
- if (usage_count > 1) {
- /*
- * TODO: We don't move charges of shared(used by multiple
- * processes) pages for now.
- */
+ return NULL;
+ if (!get_page_unless_zero(page))
+ return NULL;
+
+ return page;
+}
+
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+ int usage_count;
+ struct page *page = NULL;
+ swp_entry_t ent = pte_to_swp_entry(ptent);
+
+ if (!move_anon() || non_swap_entry(ent))
+ return NULL;
+ usage_count = mem_cgroup_count_swap_user(ent, &page);
+ if (usage_count > 1) { /* we don't move shared anon */
if (page)
put_page(page);
- return 0;
+ return NULL;
}
+ if (do_swap_account)
+ entry->val = ent.val;
+
+ return page;
+}
+
+static int is_target_pte_for_mc(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent, union mc_target *target)
+{
+ struct page *page = NULL;
+ struct page_cgroup *pc;
+ int ret = 0;
+ swp_entry_t ent = { .val = 0 };
+
+ if (pte_present(ptent))
+ page = mc_handle_present_pte(vma, addr, ptent);
+ else if (is_swap_pte(ptent))
+ page = mc_handle_swap_pte(vma, addr, ptent, &ent);
+ /* TODO: handle swap of shmes/tmpfs */
+
+ if (!page && !ent.val)
+ return 0;
if (page) {
pc = lookup_page_cgroup(page);
/*
@@ -4221,8 +4243,8 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
if (!ret || !target)
put_page(page);
}
- /* throught */
- if (ent.val && do_swap_account && !ret &&
+ /* There is a swap entry and a page doesn't exist or isn't charged */
+ if (ent.val && !ret &&
css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
ret = MC_TARGET_SWAP;
if (target)