From 8a4102a0cf07cc76a18f373f6b49485258cc6af4 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sun, 16 May 2021 17:00:38 +0800 Subject: riscv: mm: Fix W+X mappings at boot When the kernel mapping was moved the last 2GB of the address space, (__va(PFN_PHYS(max_low_pfn))) is much smaller than the .data section start address, the last set_memory_nx() in protect_kernel_text_data() will fail, thus the .data section is still mapped as W+X. This results in below W+X mapping waring at boot. Fix it by passing the correct .data section page num to the set_memory_nx(). [ 0.396516] ------------[ cut here ]------------ [ 0.396889] riscv/mm: Found insecure W+X mapping at address (____ptrval____)/0xffffffff80c00000 [ 0.398347] WARNING: CPU: 0 PID: 1 at arch/riscv/mm/ptdump.c:258 note_page+0x244/0x24a [ 0.398964] Modules linked in: [ 0.399459] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.13.0-rc1+ #14 [ 0.400003] Hardware name: riscv-virtio,qemu (DT) [ 0.400591] epc : note_page+0x244/0x24a [ 0.401368] ra : note_page+0x244/0x24a [ 0.401772] epc : ffffffff80007c86 ra : ffffffff80007c86 sp : ffffffe000e7bc30 [ 0.402304] gp : ffffffff80caae88 tp : ffffffe000e70000 t0 : ffffffff80cb80cf [ 0.402800] t1 : ffffffff80cb80c0 t2 : 0000000000000000 s0 : ffffffe000e7bc80 [ 0.403310] s1 : ffffffe000e7bde8 a0 : 0000000000000053 a1 : ffffffff80c83ff0 [ 0.403805] a2 : 0000000000000010 a3 : 0000000000000000 a4 : 6c7e7a5137233100 [ 0.404298] a5 : 6c7e7a5137233100 a6 : 0000000000000030 a7 : ffffffffffffffff [ 0.404849] s2 : ffffffff80e00000 s3 : 0000000040000000 s4 : 0000000000000000 [ 0.405393] s5 : 0000000000000000 s6 : 0000000000000003 s7 : ffffffe000e7bd48 [ 0.405935] s8 : ffffffff81000000 s9 : ffffffffc0000000 s10: ffffffe000e7bd48 [ 0.406476] s11: 0000000000001000 t3 : 0000000000000072 t4 : ffffffffffffffff [ 0.407016] t5 : 0000000000000002 t6 : ffffffe000e7b978 [ 0.407435] status: 0000000000000120 badaddr: 0000000000000000 cause: 0000000000000003 [ 0.408052] Call Trace: [ 0.408343] [] note_page+0x244/0x24a [ 0.408855] [] ptdump_hole+0x14/0x1e [ 0.409263] [] walk_pgd_range+0x2a0/0x376 [ 0.409690] [] walk_page_range_novma+0x4e/0x6e [ 0.410146] [] ptdump_walk_pgd+0x48/0x78 [ 0.410570] [] ptdump_check_wx+0xb4/0xf8 [ 0.410990] [] mark_rodata_ro+0x26/0x2e [ 0.411407] [] kernel_init+0x44/0x108 [ 0.411814] [] ret_from_exception+0x0/0xc [ 0.412309] ---[ end trace 7ec3459f2547ea83 ]--- [ 0.413141] Checked W+X mappings: failed, 512 W+X pages found Fixes: 2bfc6cd81bd17e43 ("riscv: Move kernel mapping outside of linear mapping") Signed-off-by: Jisheng Zhang Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 4faf8bd157ea..4c4c92ce0bb8 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void) unsigned long init_data_start = (unsigned long)__init_data_begin; unsigned long rodata_start = (unsigned long)__start_rodata; unsigned long data_start = (unsigned long)_data; - unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); +#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) + unsigned long end_va = kernel_virt_addr + load_sz; +#else + unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); +#endif set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT); set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT); /* rodata section is marked readonly in mark_rodata_ro */ set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT); + set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT); } void mark_rodata_ro(void) -- cgit v1.2.3 From e5c35fa0401971701dcd7675f471b664698244dd Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Thu, 24 Jun 2021 14:00:41 +0200 Subject: riscv: Map the kernel with correct permissions the first time For 64-bit kernels, we map all the kernel with write and execute permissions and afterwards remove writability from text and executability from data. For 32-bit kernels, the kernel mapping resides in the linear mapping, so we map all the linear mapping as writable and executable and afterwards we remove those properties for unused memory and kernel mapping as described above. Change this behavior to directly map the kernel with correct permissions and avoid going through the whole mapping to fix the permissions. At the same time, this fixes an issue introduced by commit 2bfc6cd81bd1 ("riscv: Move kernel mapping outside of linear mapping") as reported here https://github.com/starfive-tech/linux/issues/17. Signed-off-by: Alexandre Ghiti Reviewed-by: Anup Patel Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/page.h | 13 ++++- arch/riscv/include/asm/sections.h | 17 ++++++ arch/riscv/include/asm/set_memory.h | 8 --- arch/riscv/kernel/setup.c | 12 +--- arch/riscv/mm/init.c | 113 ++++++++++++++++-------------------- 5 files changed, 82 insertions(+), 81 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 6e004d8fda4d..349e4f9874cc 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -95,6 +95,7 @@ extern unsigned long va_kernel_pa_offset; #endif extern unsigned long va_kernel_xip_pa_offset; extern unsigned long pfn_base; +extern uintptr_t load_sz; #define ARCH_PFN_OFFSET (pfn_base) #else #define va_pa_offset 0 @@ -108,6 +109,11 @@ extern unsigned long pfn_base; extern unsigned long kernel_virt_addr; #ifdef CONFIG_64BIT +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr) + #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = y; \ @@ -127,10 +133,15 @@ extern unsigned long kernel_virt_addr; #define __va_to_pa_nodebug(x) ({ \ unsigned long _x = x; \ - (_x < kernel_virt_addr) ? \ + is_linear_mapping(_x) ? \ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ }) #else +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET) + #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) #endif /* CONFIG_64BIT */ diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 8a303fb1ee3b..32336e8a17cb 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -6,6 +6,7 @@ #define __ASM_SECTIONS_H #include +#include extern char _start[]; extern char _start_kernel[]; @@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; extern char __alt_start[], __alt_end[]; +static inline bool is_va_kernel_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)_start; + uintptr_t end = (uintptr_t)__init_data_begin; + + return va >= start && va < end; +} + +static inline bool is_va_kernel_lm_alias_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)lm_alias(_start); + uintptr_t end = (uintptr_t)lm_alias(__init_data_begin); + + return va >= start && va < end; +} + #endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 135f726c4c1d..a2c14d4b3993 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -16,7 +16,6 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); int set_memory_rw_nx(unsigned long addr, int numpages); -void protect_kernel_text_data(void); static __always_inline int set_kernel_memory(char *startp, char *endp, int (*set_memory)(unsigned long start, int num_pages)) @@ -32,7 +31,6 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } -static inline void protect_kernel_text_data(void) {} static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } static inline int set_kernel_memory(char *startp, char *endp, int (*set_memory)(unsigned long start, @@ -42,12 +40,6 @@ static inline int set_kernel_memory(char *startp, char *endp, } #endif -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void protect_kernel_linear_mapping_text_rodata(void); -#else -static inline void protect_kernel_linear_mapping_text_rodata(void) {} -#endif - int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); bool kernel_page_present(struct page *page); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 03901d3a8b02..02d811127f48 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -291,11 +291,6 @@ void __init setup_arch(char **cmdline_p) init_resources(); sbi_init(); - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { - protect_kernel_text_data(); - protect_kernel_linear_mapping_text_rodata(); - } - #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif @@ -334,11 +329,10 @@ subsys_initcall(topology_init); void free_initmem(void) { - unsigned long init_begin = (unsigned long)__init_begin; - unsigned long init_end = (unsigned long)__init_end; - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) - set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT); + set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), + IS_ENABLED(CONFIG_64BIT) ? + set_memory_rw : set_memory_rw_nx); free_initmem_default(POISON_FREE_INITMEM); } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 4c4c92ce0bb8..dc37b9bb8cb9 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -436,6 +436,43 @@ asmlinkage void __init __copy_data(void) } #endif +#ifdef CONFIG_STRICT_KERNEL_RWX +static __init pgprot_t pgprot_from_va(uintptr_t va) +{ + if (is_va_kernel_text(va)) + return PAGE_KERNEL_READ_EXEC; + + /* + * In 64-bit kernel, the kernel mapping is outside the linear mapping so + * we must protect its linear mapping alias from being executed and + * written. + * And rodata section is marked readonly in mark_rodata_ro. + */ + if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va)) + return PAGE_KERNEL_READ; + + return PAGE_KERNEL; +} + +void mark_rodata_ro(void) +{ + set_kernel_memory(__start_rodata, _data, set_memory_ro); + if (IS_ENABLED(CONFIG_64BIT)) + set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), + set_memory_ro); + + debug_checkwx(); +} +#else +static __init pgprot_t pgprot_from_va(uintptr_t va) +{ + if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va)) + return PAGE_KERNEL; + + return PAGE_KERNEL_EXEC; +} +#endif /* CONFIG_STRICT_KERNEL_RWX */ + /* * setup_vm() is called from head.S with MMU-off. * @@ -454,7 +491,8 @@ asmlinkage void __init __copy_data(void) #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif -uintptr_t load_pa, load_sz; +static uintptr_t load_pa __initdata; +uintptr_t load_sz; #ifdef CONFIG_XIP_KERNEL #define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) #define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) @@ -465,7 +503,8 @@ uintptr_t xiprom, xiprom_sz; #define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) #define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) -static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, + __always_unused bool early) { uintptr_t va, end_va; @@ -484,7 +523,8 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) map_size, PAGE_KERNEL); } #else -static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, + bool early) { uintptr_t va, end_va; @@ -492,7 +532,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) for (va = kernel_virt_addr; va < end_va; va += map_size) create_pgd_mapping(pgdir, va, load_pa + (va - kernel_virt_addr), - map_size, PAGE_KERNEL_EXEC); + map_size, + early ? + PAGE_KERNEL_EXEC : pgprot_from_va(va)); } #endif @@ -569,7 +611,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) * us to reach paging_init(). We map all memory banks later * in setup_vm_final() below. */ - create_kernel_page_table(early_pg_dir, map_size); + create_kernel_page_table(early_pg_dir, map_size, true); #ifndef __PAGETABLE_PMD_FOLDED /* Setup early PMD for DTB */ @@ -645,22 +687,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #endif } -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void protect_kernel_linear_mapping_text_rodata(void) -{ - unsigned long text_start = (unsigned long)lm_alias(_start); - unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin); - unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata); - unsigned long data_start = (unsigned long)lm_alias(_data); - - set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); -} -#endif - static void __init setup_vm_final(void) { uintptr_t va, map_size; @@ -693,21 +719,15 @@ static void __init setup_vm_final(void) map_size = best_map_size(start, end - start); for (pa = start; pa < end; pa += map_size) { va = (uintptr_t)__va(pa); - create_pgd_mapping(swapper_pg_dir, va, pa, - map_size, -#ifdef CONFIG_64BIT - PAGE_KERNEL -#else - PAGE_KERNEL_EXEC -#endif - ); + create_pgd_mapping(swapper_pg_dir, va, pa, map_size, + pgprot_from_va(va)); } } #ifdef CONFIG_64BIT /* Map the kernel */ - create_kernel_page_table(swapper_pg_dir, PMD_SIZE); + create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false); #endif /* Clear fixmap PTE and PMD mappings */ @@ -738,39 +758,6 @@ static inline void setup_vm_final(void) } #endif /* CONFIG_MMU */ -#ifdef CONFIG_STRICT_KERNEL_RWX -void __init protect_kernel_text_data(void) -{ - unsigned long text_start = (unsigned long)_start; - unsigned long init_text_start = (unsigned long)__init_text_begin; - unsigned long init_data_start = (unsigned long)__init_data_begin; - unsigned long rodata_start = (unsigned long)__start_rodata; - unsigned long data_start = (unsigned long)_data; -#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) - unsigned long end_va = kernel_virt_addr + load_sz; -#else - unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); -#endif - - set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT); - set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT); - /* rodata section is marked readonly in mark_rodata_ro */ - set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT); -} - -void mark_rodata_ro(void) -{ - unsigned long rodata_start = (unsigned long)__start_rodata; - unsigned long data_start = (unsigned long)_data; - - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - - debug_checkwx(); -} -#endif - #ifdef CONFIG_KEXEC_CORE /* * reserve_crashkernel() - reserves memory for crash kernel -- cgit v1.2.3