summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2019-11-05 16:33:20 +0200
committerMax Filippov <jcmvbkbc@gmail.com>2019-11-26 11:33:39 -0800
commitf5ee2567921dec4f489c16d4fe22c3a7222d0ce6 (patch)
tree54a6a21fad933163b9b486439d852c33b9438a1a /arch/xtensa/mm
parentf0d1eab8c2e1f9240cf4ae4753d7947c65e60bd7 (diff)
downloadlinux-f5ee2567921dec4f489c16d4fe22c3a7222d0ce6.tar.bz2
xtensa: get rid of __ARCH_USE_5LEVEL_HACK
xtensa has 2-level page tables and already uses pgtable-nopmd for page table folding. Add walks of p4d level where appropriate and drop usage of __ARCH_USE_5LEVEL_HACK. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Message-Id: <1572964400-16542-3-git-send-email-rppt@kernel.org> Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> [fix up arch/xtensa/include/asm/fixmap.h and arch/xtensa/mm/tlb.c]
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/fault.c10
-rw-r--r--arch/xtensa/mm/kasan_init.c6
-rw-r--r--arch/xtensa/mm/mmu.c3
-rw-r--r--arch/xtensa/mm/tlb.c6
4 files changed, 19 insertions, 6 deletions
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 68a041402025..bee30a77cd70 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -197,6 +197,7 @@ vmalloc_fault:
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -212,8 +213,13 @@ vmalloc_fault:
pgd_val(*pgd) = pgd_val(*pgd_k);
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
+ goto bad_page_fault;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud) || !pud_present(*pud_k))
goto bad_page_fault;
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index ace98bdf5191..9c957791bb33 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -20,7 +20,8 @@ void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
pgd_t *pgd = pgd_offset_k(vaddr);
- pud_t *pud = pud_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
int i;
@@ -43,7 +44,8 @@ static void __init populate(void *start, void *end)
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
- pud_t *pud = pud_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 018dda2c6a91..37e478a27877 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -22,7 +22,8 @@
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
pgd_t *pgd = pgd_offset_k(vaddr);
- pud_t *pud = pud_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
unsigned long i;
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 164a2ca07a1f..ec8220973252 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -169,6 +169,7 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
struct task_struct *task = get_current();
struct mm_struct *mm = task->mm;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -178,7 +179,10 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
- pud = pud_offset(pgd, vaddr);
+ p4d = p4d_offset(pgd, vaddr);
+ if (p4d_none_or_clear_bad(p4d))
+ return 0;
+ pud = pud_offset(p4d, vaddr);
if (pud_none_or_clear_bad(pud))
return 0;
pmd = pmd_offset(pud, vaddr);