summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 14:57:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 16:55:16 -0700
commit6a93ca8fde3cfce0f00f02281139a377c83e8d8c (patch)
tree63846308d4ab27360afdb31330cb8dbf4251cbfe /mm/migrate.c
parent23047a96d7cfcfca1a6d026ecaec526ea4803e9e (diff)
downloadlinux-6a93ca8fde3cfce0f00f02281139a377c83e8d8c.tar.bz2
mm: migrate: do not touch page->mem_cgroup of live pages
Changing a page's memcg association complicates dealing with the page, so we want to limit this as much as possible. Page migration e.g. does not have to do that. Just like page cache replacement, it can forcibly charge a replacement page, and then uncharge the old page when it gets freed. Temporarily overcharging the cgroup by a single page is not an issue in practice, and charging is so cheap nowadays that this is much preferrable to the headache of messing with live pages. The only place that still changes the page->mem_cgroup binding of live pages is when pages move along with a task to another cgroup. But that path isolates the page from the LRU, takes the page lock, and the move lock (lock_page_memcg()). That means page->mem_cgroup is always stable in callers that have the page isolated from the LRU or locked. Lighter unlocked paths, like writeback accounting, can use lock_page_memcg(). [akpm@linux-foundation.org: fix build] [vdavydov@virtuozzo.com: fix lockdep splat] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 432ecd0172cd..848327d4a7ed 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -326,12 +326,13 @@ int migrate_page_move_mapping(struct address_space *mapping,
return -EAGAIN;
/* No turning back from here */
- set_page_memcg(newpage, page_memcg(page));
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
SetPageSwapBacked(newpage);
+ mem_cgroup_migrate(page, newpage);
+
return MIGRATEPAGE_SUCCESS;
}
@@ -373,7 +374,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
* Now we know that no one else is looking at the page:
* no turning back from here.
*/
- set_page_memcg(newpage, page_memcg(page));
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
@@ -428,6 +428,8 @@ int migrate_page_move_mapping(struct address_space *mapping,
}
local_irq_enable();
+ mem_cgroup_migrate(page, newpage);
+
return MIGRATEPAGE_SUCCESS;
}
@@ -458,9 +460,9 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
return -EAGAIN;
}
- set_page_memcg(newpage, page_memcg(page));
newpage->index = page->index;
newpage->mapping = page->mapping;
+
get_page(newpage);
radix_tree_replace_slot(pslot, newpage);
@@ -468,6 +470,9 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
page_unfreeze_refs(page, expected_count - 1);
spin_unlock_irq(&mapping->tree_lock);
+
+ mem_cgroup_migrate(page, newpage);
+
return MIGRATEPAGE_SUCCESS;
}
@@ -775,7 +780,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
* page is freed; but stats require that PageAnon be left as PageAnon.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
- set_page_memcg(page, NULL);
if (!PageAnon(page))
page->mapping = NULL;
}
@@ -1842,8 +1846,7 @@ fail_putback:
}
mlock_migrate_page(new_page, page);
- set_page_memcg(new_page, page_memcg(page));
- set_page_memcg(page, NULL);
+ mem_cgroup_migrate(page, new_page);
page_remove_rmap(page, true);
set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);