summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYu Zhao <yuzhao@google.com>2019-03-11 18:57:49 -0600
committerWill Deacon <will.deacon@arm.com>2019-04-09 11:21:50 +0100
commit54c8d9119ec85de48fd0707946a5e57df9ad7acf (patch)
treebb63c0e6365016782e0d0646a6ad9e004ed88fc5
parent14b94d07572619af896c6d2d83b1196c4041fe19 (diff)
downloadlinux-54c8d9119ec85de48fd0707946a5e57df9ad7acf.tar.bz2
arm64: mm: enable per pmd page table lock
Switch from per mm_struct to per pmd page table lock by enabling ARCH_ENABLE_SPLIT_PMD_PTLOCK. This provides better granularity for large system. I'm not sure if there is contention on mm->page_table_lock. Given the option comes at no cost (apart from initializing more spin locks), why not enable it now. We only do so when pmd is not folded, so we don't mistakenly call pgtable_pmd_page_ctor() on pud or p4d in pgd_pgtable_alloc(). Signed-off-by: Yu Zhao <yuzhao@google.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/include/asm/pgalloc.h12
-rw-r--r--arch/arm64/include/asm/tlb.h5
3 files changed, 18 insertions, 2 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7e34b9eba5de..555af5035592 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -889,6 +889,9 @@ config ARCH_WANT_HUGE_PMD_SHARE
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
+config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+ def_bool y if PGTABLE_LEVELS > 2
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
---help---
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 52fa47c73bf0..dabba4b2c61f 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -33,12 +33,22 @@
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)__get_free_page(PGALLOC_GFP);
+ struct page *page;
+
+ page = alloc_page(PGALLOC_GFP);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ return page_address(page);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
{
BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmdp));
free_page((unsigned long)pmdp);
}
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 106fdc951b6e..4e3becfed387 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -62,7 +62,10 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
- tlb_remove_table(tlb, virt_to_page(pmdp));
+ struct page *page = virt_to_page(pmdp);
+
+ pgtable_pmd_page_dtor(page);
+ tlb_remove_table(tlb, page);
}
#endif