summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2019-04-26 15:58:06 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2019-05-03 01:20:25 +1000
commit4a6d8cf90017019f3b2829b38157cd1a74c64856 (patch)
tree601bd4e8b111fb3ff44701ed11701a6468f6e727
parent627f06c6f51e6af6ca3f7d1e82154b59583abc15 (diff)
downloadlinux-4a6d8cf90017019f3b2829b38157cd1a74c64856.tar.bz2
powerpc/mm: don't use pte_alloc_kernel() until slab is available on PPC32
In the same way as PPC64, implement early allocation functions and avoid calling pte_alloc_kernel() before slab is available. Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/pgtable_32.c34
1 files changed, 28 insertions, 6 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index c9cdbb84d31f..e54b612cbc98 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -43,11 +43,8 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[], _sinittext[], _einittext[];
-__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- if (!slab_is_available())
- return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
-
return (pte_t *)pte_fragment_alloc(mm, 1);
}
@@ -205,7 +202,29 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
-int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
+static void __init *early_alloc_pgtable(unsigned long size)
+{
+ void *ptr = memblock_alloc(size, size);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, size);
+
+ return ptr;
+}
+
+static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
+{
+ if (pmd_none(*pmdp)) {
+ pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
+
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+ }
+ return pte_offset_kernel(pmdp, va);
+}
+
+
+int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
{
pmd_t *pd;
pte_t *pg;
@@ -214,7 +233,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
/* Use middle 10 bits of VA to index the second-level map */
- pg = pte_alloc_kernel(pd, va);
+ if (likely(slab_is_available()))
+ pg = pte_alloc_kernel(pd, va);
+ else
+ pg = early_pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
/* The PTE should never be already set nor present in the