From b904dcfed6967e9cfc8a54778498f6d289420309 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 21 Sep 2009 17:01:28 -0700 Subject: mm: clean up page_remove_rmap() page_remove_rmap() has multiple PageAnon() tests and it has deep nesting. Clean this up. Signed-off-by: KOSAKI Motohiro Cc: KAMEZAWA Hiroyuki Acked-by: Mel Gorman Reviewed-by: Wu Fengguang Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 57 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 27 deletions(-) (limited to 'mm/rmap.c') diff --git a/mm/rmap.c b/mm/rmap.c index 0895b5c7cbff..1406e67f9613 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -739,34 +739,37 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long */ void page_remove_rmap(struct page *page) { - if (atomic_add_negative(-1, &page->_mapcount)) { - /* - * Now that the last pte has gone, s390 must transfer dirty - * flag from storage key to struct page. We can usually skip - * this if the page is anon, so about to be freed; but perhaps - * not if it's in swapcache - there might be another pte slot - * containing the swap entry, but page not yet written to swap. - */ - if ((!PageAnon(page) || PageSwapCache(page)) && - page_test_dirty(page)) { - page_clear_dirty(page); - set_page_dirty(page); - } - if (PageAnon(page)) - mem_cgroup_uncharge_page(page); - __dec_zone_page_state(page, - PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); - mem_cgroup_update_mapped_file_stat(page, -1); - /* - * It would be tidy to reset the PageAnon mapping here, - * but that might overwrite a racing page_add_anon_rmap - * which increments mapcount after us but sets mapping - * before us: so leave the reset to free_hot_cold_page, - * and remember that it's only reliable while mapped. - * Leaving it set also helps swapoff to reinstate ptes - * faster for those pages still in swapcache. - */ + /* page still mapped by someone else? */ + if (!atomic_add_negative(-1, &page->_mapcount)) + return; + + /* + * Now that the last pte has gone, s390 must transfer dirty + * flag from storage key to struct page. We can usually skip + * this if the page is anon, so about to be freed; but perhaps + * not if it's in swapcache - there might be another pte slot + * containing the swap entry, but page not yet written to swap. + */ + if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { + page_clear_dirty(page); + set_page_dirty(page); } + if (PageAnon(page)) { + mem_cgroup_uncharge_page(page); + __dec_zone_page_state(page, NR_ANON_PAGES); + } else { + __dec_zone_page_state(page, NR_FILE_MAPPED); + } + mem_cgroup_update_mapped_file_stat(page, -1); + /* + * It would be tidy to reset the PageAnon mapping here, + * but that might overwrite a racing page_add_anon_rmap + * which increments mapcount after us but sets mapping + * before us: so leave the reset to free_hot_cold_page, + * and remember that it's only reliable while mapped. + * Leaving it set also helps swapoff to reinstate ptes + * faster for those pages still in swapcache. + */ } /* -- cgit v1.2.3