diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2017-03-09 17:24:08 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-09 11:48:48 -0800 |
commit | 90eceff1a375f6ffa78caf8654e787c0a8a591ef (patch) | |
tree | a3fa46c32dc6b45dc99559eafa937ff4c4d2f1d7 | |
parent | c2febafc67734a62196c1b9dfba926412d4077ba (diff) | |
download | linux-90eceff1a375f6ffa78caf8654e787c0a8a591ef.tar.bz2 |
mm: introduce __p4d_alloc()
For full 5-level paging we need a helper to allocate p4d page table.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/memory.c | 23 |
1 files changed, 23 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 7f1c2163b3ce..235ba51b2fbf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3906,6 +3906,29 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, } EXPORT_SYMBOL_GPL(handle_mm_fault); +#ifndef __PAGETABLE_P4D_FOLDED +/* + * Allocate p4d page table. + * We've already handled the fast-path in-line. + */ +int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +{ + p4d_t *new = p4d_alloc_one(mm, address); + if (!new) + return -ENOMEM; + + smp_wmb(); /* See comment in __pte_alloc */ + + spin_lock(&mm->page_table_lock); + if (pgd_present(*pgd)) /* Another has populated it */ + p4d_free(mm, new); + else + pgd_populate(mm, pgd, new); + spin_unlock(&mm->page_table_lock); + return 0; +} +#endif /* __PAGETABLE_P4D_FOLDED */ + #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. |