summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2019-12-04 16:53:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-04 19:44:14 -0800
commita73c948952ccd663b47f6cf59ac0fcae5bf0512c (patch)
treed5cd6ea2e2ad6ff0e0a03566f9969f18c21b2a2e /arch
parentb27d8517365eeac907de1cc1e91053ccf18179c3 (diff)
downloadlinux-a73c948952ccd663b47f6cf59ac0fcae5bf0512c.tar.bz2
alpha: use pgtable-nopud instead of 4level-fixup
Patch series "mm: remove __ARCH_HAS_4LEVEL_HACK", v13. These patches convert several architectures to use page table folding and remove __ARCH_HAS_4LEVEL_HACK along with include/asm-generic/4level-fixup.h. For the nommu configurations the folding is already implemented by the generic code so the only change was to use the appropriate header file. As for the rest, the changes are mostly about mechanical replacement of pgd accessors with pud/pmd ones and the addition of higher levels to page table traversals. With Vineet's patches from "elide extraneous generated code for folded p4d/pud/pmd" series [1] there is a small shrink of the kernel size of about -0.01% for the defconfig builds. This patch (of 13): It is not likely alpha will have 5-level page tables. Replace usage of include/asm-generic/4level-fixup.h and implied __ARCH_HAS_4LEVEL_HACK with include/asm-generic/pgtable-nopud.h and adjust page table manipulation macros and functions accordingly. Link: http://lkml.kernel.org/r/1572938135-31886-2-git-send-email-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Peter Rosin <peda@axentia.se> Cc: Richard Weinberger <richard@nod.at> Cc: Rolf Eike Beer <eike-kernel@sf-tec.de> Cc: Russell King <linux@armlinux.org.uk> Cc: Sam Creasey <sammy@sammy.net> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com> Cc: Anatoly Pugachev <matorola@gmail.com> Cc: Russell King <rmk+kernel@armlinux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/mmzone.h1
-rw-r--r--arch/alpha/include/asm/pgalloc.h4
-rw-r--r--arch/alpha/include/asm/pgtable.h24
-rw-r--r--arch/alpha/mm/init.c12
4 files changed, 22 insertions, 19 deletions
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 889b5d3ad825..7ee144f484f1 100644
--- a/arch/alpha/include/asm/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
@@ -73,7 +73,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
-#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32))
#define pte_pfn(pte) (pte_val(pte) >> 32)
#define mk_pte(page, pgprot) \
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index eb91f1e85629..a1a29f60934c 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -27,9 +27,9 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
}
static inline void
-pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
- pgd_set(pgd, pmd);
+ pud_set(pud, pmd);
}
extern pgd_t *pgd_alloc(struct mm_struct *mm);
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 065b57f408c3..299791ce14b6 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -2,7 +2,7 @@
#ifndef _ALPHA_PGTABLE_H
#define _ALPHA_PGTABLE_H
-#include <asm-generic/4level-fixup.h>
+#include <asm-generic/pgtable-nopud.h>
/*
* This file contains the functions and defines necessary to modify and use
@@ -226,8 +226,8 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
-extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
-{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
+extern inline void pud_set(pud_t * pudp, pmd_t * pmdp)
+{ pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
extern inline unsigned long
@@ -238,11 +238,11 @@ pmd_page_vaddr(pmd_t pmd)
#ifndef CONFIG_DISCONTIGMEM
#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
-#define pgd_page(pgd) (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
+#define pud_page(pud) (mem_map + ((pud_val(pud) & _PFN_MASK) >> 32))
#endif
-extern inline unsigned long pgd_page_vaddr(pgd_t pgd)
-{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
+extern inline unsigned long pud_page_vaddr(pud_t pgd)
+{ return PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
@@ -256,10 +256,10 @@ extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _P
extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
-extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
-extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
-extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
-extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
+extern inline int pud_none(pud_t pud) { return !pud_val(pud); }
+extern inline int pud_bad(pud_t pud) { return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; }
+extern inline int pud_present(pud_t pud) { return pud_val(pud) & _PAGE_VALID; }
+extern inline void pud_clear(pud_t * pudp) { pud_val(*pudp) = 0; }
/*
* The following only work if pte_present() is true.
@@ -301,9 +301,9 @@ extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
*/
/* Find an entry in the second-level page table.. */
-extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
{
- pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+ pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
smp_read_barrier_depends(); /* see above */
return ret;
}
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index e2cbec3789e8..12e218d3792a 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -146,6 +146,8 @@ callback_init(void * kernel_end)
{
struct crb_struct * crb;
pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
pmd_t *pmd;
void *two_pages;
@@ -184,8 +186,10 @@ callback_init(void * kernel_end)
memset(two_pages, 0, 2*PAGE_SIZE);
pgd = pgd_offset_k(VMALLOC_START);
- pgd_set(pgd, (pmd_t *)two_pages);
- pmd = pmd_offset(pgd, VMALLOC_START);
+ p4d = p4d_offset(pgd, VMALLOC_START);
+ pud = pud_offset(p4d, VMALLOC_START);
+ pud_set(pud, (pmd_t *)two_pages);
+ pmd = pmd_offset(pud, VMALLOC_START);
pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
if (alpha_using_srm) {
@@ -214,9 +218,9 @@ callback_init(void * kernel_end)
/* Newer consoles (especially on larger
systems) may require more pages of
PTEs. Grab additional pages as needed. */
- if (pmd != pmd_offset(pgd, vaddr)) {
+ if (pmd != pmd_offset(pud, vaddr)) {
memset(kernel_end, 0, PAGE_SIZE);
- pmd = pmd_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
pmd_set(pmd, (pte_t *)kernel_end);
kernel_end += PAGE_SIZE;
}