diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2017-02-24 14:59:19 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 17:46:56 -0800 |
commit | 595cd8f256d24face93b2722927ec9c980419c26 (patch) | |
tree | 4884ad51a34aa931eef71932ed7546ab390f51ef /mm | |
parent | 288bc54949fc2625a4fd811a188fb200cc498946 (diff) | |
download | linux-595cd8f256d24face93b2722927ec9c980419c26.tar.bz2 |
mm/ksm: handle protnone saved writes when making page write protect
Without this KSM will consider the page write protected, but a numa
fault can later mark the page writable. This can result in memory
corruption.
Link: http://lkml.kernel.org/r/1487498625-10891-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/ksm.c | 9 |
1 files changed, 7 insertions, 2 deletions
@@ -880,7 +880,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; - if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) { + if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || + (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) { pte_t entry; swapped = PageSwapCache(page); @@ -905,7 +906,11 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, } if (pte_dirty(entry)) set_page_dirty(page); - entry = pte_mkclean(pte_wrprotect(entry)); + + if (pte_protnone(entry)) + entry = pte_mkclean(pte_clear_savedwrite(entry)); + else + entry = pte_mkclean(pte_wrprotect(entry)); set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); } *orig_pte = *pvmw.pte; |