summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2020-03-06 16:49:49 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2020-03-13 21:10:37 +1100
commitaf3d0a68698c7e5df8b72267086b23422a3954bb (patch)
tree7914ec670d4f9ffc35a8243ab1ecceb8d085fee4 /arch
parent59bee45b9712c759ea4d3dcc4eff1752f3a66558 (diff)
downloadlinux-af3d0a68698c7e5df8b72267086b23422a3954bb.tar.bz2
powerpc/kasan: Fix shadow memory protection with CONFIG_KASAN_VMALLOC
With CONFIG_KASAN_VMALLOC, new page tables are created at the time shadow memory for vmalloc area is unmapped. If some parts of the page table still have entries to the zero page shadow memory, the entries are wrongly marked RW. With CONFIG_KASAN_VMALLOC, almost the entire kernel address space is managed by KASAN. To make it simple, just create KASAN page tables for the entire kernel space at kasan_init(). That doesn't use much more space, and that's anyway already done for hash platforms. Fixes: 3d4247fcc938 ("powerpc/32: Add support of KASAN_VMALLOC") Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/ef5248fc1f496c6b0dfdb59380f24968f25f75c5.1583513368.git.christophe.leroy@c-s.fr
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index db5664dde5ff..d2bed3fcb719 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -120,12 +120,6 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)
unsigned long k_cur;
phys_addr_t pa = __pa(kasan_early_shadow_page);
- if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
- int ret = kasan_init_shadow_page_tables(k_start, k_end);
-
- if (ret)
- panic("kasan: kasan_init_shadow_page_tables() failed");
- }
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
@@ -143,7 +137,8 @@ void __init kasan_mmu_init(void)
int ret;
struct memblock_region *reg;
- if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
+ IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
if (ret)