summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rmap.h2
-rw-r--r--mm/memory.c6
-rw-r--r--mm/rmap.c42
3 files changed, 48 insertions, 2 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bd3504d11b15..8d29b7c38368 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -196,6 +196,8 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long address);
void page_add_file_rmap(struct page *, struct vm_area_struct *,
bool compound);
+void page_zap_file_rmap(struct page *);
+void page_zap_anon_rmap(struct page *);
void page_remove_rmap(struct page *, struct vm_area_struct *,
bool compound);
diff --git a/mm/memory.c b/mm/memory.c
index f88c351aecd4..ba1d08a908a4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1451,9 +1451,11 @@ again:
if (pte_young(ptent) &&
likely(!(vma->vm_flags & VM_SEQ_READ)))
mark_page_accessed(page);
- }
+ page_zap_file_rmap(page);
+ } else
+ page_zap_anon_rmap(page);
+ munlock_vma_page(page, vma, false);
rss[mm_counter(page)]--;
- page_remove_rmap(page, vma, false);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
if (unlikely(__tlb_remove_page(tlb, page))) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 2ec925e5fa6a..71a5365f23f3 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1413,6 +1413,48 @@ static void page_remove_anon_compound_rmap(struct page *page)
}
/**
+ * page_zap_file_rmap - take down non-anon pte mapping from a page
+ * @page: page to remove mapping from
+ *
+ * This is the simplified form of page_remove_rmap(), with:
+ * - we've already checked for '!PageAnon(page)'
+ * - 'compound' is always false
+ * - the caller does 'munlock_vma_page(page, vma, compound)' separately
+ * which allows for a much simpler calling convention.
+ *
+ * The caller holds the pte lock.
+ */
+void page_zap_file_rmap(struct page *page)
+{
+ lock_page_memcg(page);
+ page_remove_file_rmap(page, false);
+ unlock_page_memcg(page);
+}
+
+/**
+ * page_zap_anon_rmap(page) - take down non-anon pte mapping from a page
+ * @page: page to remove mapping from
+ *
+ * This is the simplified form of page_remove_rmap(), with:
+ * - we've already checked for 'PageAnon(page)'
+ * - 'compound' is always false
+ * - the caller does 'munlock_vma_page(page, vma, compound)' separately
+ * which allows for a much simpler calling convention.
+ *
+ * The caller holds the pte lock.
+ */
+void page_zap_anon_rmap(struct page *page)
+{
+ /* page still mapped by someone else? */
+ if (!atomic_add_negative(-1, &page->_mapcount))
+ return;
+
+ lock_page_memcg(page);
+ __dec_lruvec_page_state(page, NR_ANON_MAPPED);
+ unlock_page_memcg(page);
+}
+
+/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
* @vma: the vm area from which the mapping is removed