summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2011-01-13 15:47:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 17:32:44 -0800
commitcd7548ab360c462118568eebb8c6da3bc303b02e (patch)
tree44d145e6139c8753659ee773ae35810dc3d92d0b
parentb36f5b0710e9e3b92484de32920fddcb17278664 (diff)
downloadlinux-cd7548ab360c462118568eebb8c6da3bc303b02e.tar.bz2
thp: mprotect: transparent huge page support
Natively handle huge pmds when changing page tables on behalf of mprotect(). I left out update_mmu_cache() because we do not need it on x86 anyway but more importantly the interface works on ptes, not pmds. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--mm/huge_memory.c27
-rw-r--r--mm/mprotect.c8
3 files changed, 36 insertions, 1 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 25125fb6acf7..c590b08c6fa6 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -22,6 +22,8 @@ extern int zap_huge_pmd(struct mmu_gather *tlb,
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
+extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, pgprot_t newprot);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 37e89a32a0b1..7b55fe0e998b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -948,6 +948,33 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
return ret;
}
+int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, pgprot_t newprot)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int ret = 0;
+
+ spin_lock(&mm->page_table_lock);
+ if (likely(pmd_trans_huge(*pmd))) {
+ if (unlikely(pmd_trans_splitting(*pmd))) {
+ spin_unlock(&mm->page_table_lock);
+ wait_split_huge_page(vma->anon_vma, pmd);
+ } else {
+ pmd_t entry;
+
+ entry = pmdp_get_and_clear(mm, addr, pmd);
+ entry = pmd_modify(entry, newprot);
+ set_pmd_at(mm, addr, pmd, entry);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
+ ret = 1;
+ }
+ } else
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ return ret;
+}
+
pmd_t *page_check_address_pmd(struct page *page,
struct mm_struct *mm,
unsigned long address,
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 9402080d0d4a..5a688a2756be 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -88,7 +88,13 @@ static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- split_huge_page_pmd(vma->vm_mm, pmd);
+ if (pmd_trans_huge(*pmd)) {
+ if (next - addr != HPAGE_PMD_SIZE)
+ split_huge_page_pmd(vma->vm_mm, pmd);
+ else if (change_huge_pmd(vma, pmd, addr, newprot))
+ continue;
+ /* fall through */
+ }
if (pmd_none_or_clear_bad(pmd))
continue;
change_pte_range(vma->vm_mm, pmd, addr, next, newprot,