diff options
author | Palmer Dabbelt <palmerdabbelt@google.com> | 2021-06-30 21:50:32 -0700 |
---|---|---|
committer | Palmer Dabbelt <palmerdabbelt@google.com> | 2021-06-30 21:50:32 -0700 |
commit | 01112e5e20f5298a81639806cd0a3c587aade467 (patch) | |
tree | b453c32b2e3a8905da331a81ad234c17b62cb9dc | |
parent | 47513f243b452a5e21180dcf3d6ac1c57e1781a6 (diff) | |
parent | e5c35fa0401971701dcd7675f471b664698244dd (diff) | |
download | linux-01112e5e20f5298a81639806cd0a3c587aade467.tar.bz2 |
Merge branch 'riscv-wx-mappings' into for-next
This contains both the short-term fix for the W+X boot mappings and the
larger cleanup.
* riscv-wx-mappings:
riscv: Map the kernel with correct permissions the first time
riscv: Introduce set_kernel_memory helper
riscv: Simplify xip and !xip kernel address conversion macros
riscv: Remove CONFIG_PHYS_RAM_BASE_FIXED
riscv: mm: Fix W+X mappings at boot
-rw-r--r-- | arch/riscv/Kconfig | 6 | ||||
-rw-r--r-- | arch/riscv/include/asm/page.h | 27 | ||||
-rw-r--r-- | arch/riscv/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/riscv/include/asm/sections.h | 17 | ||||
-rw-r--r-- | arch/riscv/include/asm/set_memory.h | 24 | ||||
-rw-r--r-- | arch/riscv/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/riscv/mm/init.c | 108 |
7 files changed, 102 insertions, 94 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 9ff363306237..7304278dbbe9 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -494,13 +494,8 @@ config STACKPROTECTOR_PER_TASK def_bool y depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS -config PHYS_RAM_BASE_FIXED - bool "Explicitly specified physical RAM address" - default n - config PHYS_RAM_BASE hex "Platform Physical RAM address" - depends on PHYS_RAM_BASE_FIXED default "0x80000000" help This is the physical address of RAM in the system. It has to be @@ -513,7 +508,6 @@ config XIP_KERNEL # This prevents XIP from being enabled by all{yes,mod}config, which # fail to build since XIP doesn't support large kernels. depends on !COMPILE_TEST - select PHYS_RAM_BASE_FIXED help Execute-In-Place allows the kernel to run from non-volatile storage directly addressable by the CPU, such as NOR flash. This saves RAM diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index a1b888f77d57..5d4622a44b09 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -83,55 +83,58 @@ extern unsigned long va_pa_offset; #ifdef CONFIG_64BIT extern unsigned long va_kernel_pa_offset; #endif -#ifdef CONFIG_XIP_KERNEL extern unsigned long va_kernel_xip_pa_offset; -#endif extern unsigned long pfn_base; +extern uintptr_t load_sz; #define ARCH_PFN_OFFSET (pfn_base) #else #define va_pa_offset 0 #ifdef CONFIG_64BIT #define va_kernel_pa_offset 0 #endif +#define va_kernel_xip_pa_offset 0 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #endif /* CONFIG_MMU */ extern unsigned long kernel_virt_addr; #ifdef CONFIG_64BIT +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr) + #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) -#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = y; \ (_y >= CONFIG_PHYS_RAM_BASE) ? \ (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ }) -#else -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset)) -#endif #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) -#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_va_to_pa(y) ({ \ unsigned long _y = y; \ (_y < kernel_virt_addr + XIP_OFFSET) ? \ ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ }) -#else -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset) -#endif + #define __va_to_pa_nodebug(x) ({ \ unsigned long _x = x; \ - (_x < kernel_virt_addr) ? \ + is_linear_mapping(_x) ? \ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ }) #else +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET) + #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) -#endif +#endif /* CONFIG_64BIT */ #ifdef CONFIG_DEBUG_VIRTUAL extern phys_addr_t __virt_to_phys(unsigned long x); diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index f282f7a375e2..2056faf06f13 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -77,6 +77,8 @@ #ifdef CONFIG_XIP_KERNEL #define XIP_OFFSET SZ_8M +#else +#define XIP_OFFSET 0 #endif #ifndef __ASSEMBLY__ diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 8a303fb1ee3b..32336e8a17cb 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -6,6 +6,7 @@ #define __ASM_SECTIONS_H #include <asm-generic/sections.h> +#include <linux/mm.h> extern char _start[]; extern char _start_kernel[]; @@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; extern char __alt_start[], __alt_end[]; +static inline bool is_va_kernel_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)_start; + uintptr_t end = (uintptr_t)__init_data_begin; + + return va >= start && va < end; +} + +static inline bool is_va_kernel_lm_alias_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)lm_alias(_start); + uintptr_t end = (uintptr_t)lm_alias(__init_data_begin); + + return va >= start && va < end; +} + #endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 9d4d455726d4..a2c14d4b3993 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -16,20 +16,28 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); int set_memory_rw_nx(unsigned long addr, int numpages); -void protect_kernel_text_data(void); +static __always_inline int set_kernel_memory(char *startp, char *endp, + int (*set_memory)(unsigned long start, + int num_pages)) +{ + unsigned long start = (unsigned long)startp; + unsigned long end = (unsigned long)endp; + int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT; + + return set_memory(start, num_pages); +} #else static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } -static inline void protect_kernel_text_data(void) {} static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } -#endif - -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void __init protect_kernel_linear_mapping_text_rodata(void); -#else -static inline void protect_kernel_linear_mapping_text_rodata(void) {} +static inline int set_kernel_memory(char *startp, char *endp, + int (*set_memory)(unsigned long start, + int num_pages)) +{ + return 0; +} #endif int set_direct_map_invalid_noflush(struct page *page); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 5c6d2a1fdbc7..b0c6f372a9ec 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -289,11 +289,6 @@ void __init setup_arch(char **cmdline_p) init_resources(); sbi_init(); - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { - protect_kernel_text_data(); - protect_kernel_linear_mapping_text_rodata(); - } - #ifdef CONFIG_KASAN kasan_init(); #endif @@ -328,11 +323,10 @@ subsys_initcall(topology_init); void free_initmem(void) { - unsigned long init_begin = (unsigned long)__init_begin; - unsigned long init_end = (unsigned long)__init_end; - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) - set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT); + set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), + IS_ENABLED(CONFIG_64BIT) ? + set_memory_rw : set_memory_rw_nx); free_initmem_default(POISON_FREE_INITMEM); } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 3a5190a09987..12f956b3a674 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -455,6 +455,43 @@ asmlinkage void __init __copy_data(void) } #endif +#ifdef CONFIG_STRICT_KERNEL_RWX +static __init pgprot_t pgprot_from_va(uintptr_t va) +{ + if (is_va_kernel_text(va)) + return PAGE_KERNEL_READ_EXEC; + + /* + * In 64-bit kernel, the kernel mapping is outside the linear mapping so + * we must protect its linear mapping alias from being executed and + * written. + * And rodata section is marked readonly in mark_rodata_ro. + */ + if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va)) + return PAGE_KERNEL_READ; + + return PAGE_KERNEL; +} + +void mark_rodata_ro(void) +{ + set_kernel_memory(__start_rodata, _data, set_memory_ro); + if (IS_ENABLED(CONFIG_64BIT)) + set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), + set_memory_ro); + + debug_checkwx(); +} +#else +static __init pgprot_t pgprot_from_va(uintptr_t va) +{ + if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va)) + return PAGE_KERNEL; + + return PAGE_KERNEL_EXEC; +} +#endif /* CONFIG_STRICT_KERNEL_RWX */ + /* * setup_vm() is called from head.S with MMU-off. * @@ -474,7 +511,7 @@ asmlinkage void __init __copy_data(void) #endif static uintptr_t load_pa __initdata; -static uintptr_t load_sz __initdata; +uintptr_t load_sz; #ifdef CONFIG_XIP_KERNEL #define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) #define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) @@ -486,7 +523,8 @@ static uintptr_t xiprom_sz __initdata; #define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) #define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) -static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, + __always_unused bool early) { uintptr_t va, end_va; @@ -505,7 +543,8 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) map_size, PAGE_KERNEL); } #else -static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, + bool early) { uintptr_t va, end_va; @@ -513,7 +552,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) for (va = kernel_virt_addr; va < end_va; va += map_size) create_pgd_mapping(pgdir, va, load_pa + (va - kernel_virt_addr), - map_size, PAGE_KERNEL_EXEC); + map_size, + early ? + PAGE_KERNEL_EXEC : pgprot_from_va(va)); } #endif @@ -590,7 +631,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) * us to reach paging_init(). We map all memory banks later * in setup_vm_final() below. */ - create_kernel_page_table(early_pg_dir, map_size); + create_kernel_page_table(early_pg_dir, map_size, true); #ifndef __PAGETABLE_PMD_FOLDED /* Setup early PMD for DTB */ @@ -666,22 +707,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #endif } -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void __init protect_kernel_linear_mapping_text_rodata(void) -{ - unsigned long text_start = (unsigned long)lm_alias(_start); - unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin); - unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata); - unsigned long data_start = (unsigned long)lm_alias(_data); - - set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); -} -#endif - static void __init setup_vm_final(void) { uintptr_t va, map_size; @@ -714,21 +739,15 @@ static void __init setup_vm_final(void) map_size = best_map_size(start, end - start); for (pa = start; pa < end; pa += map_size) { va = (uintptr_t)__va(pa); - create_pgd_mapping(swapper_pg_dir, va, pa, - map_size, -#ifdef CONFIG_64BIT - PAGE_KERNEL -#else - PAGE_KERNEL_EXEC -#endif - ); + create_pgd_mapping(swapper_pg_dir, va, pa, map_size, + pgprot_from_va(va)); } } #ifdef CONFIG_64BIT /* Map the kernel */ - create_kernel_page_table(swapper_pg_dir, PMD_SIZE); + create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false); #endif /* Clear fixmap PTE and PMD mappings */ @@ -759,35 +778,6 @@ static inline void setup_vm_final(void) } #endif /* CONFIG_MMU */ -#ifdef CONFIG_STRICT_KERNEL_RWX -void __init protect_kernel_text_data(void) -{ - unsigned long text_start = (unsigned long)_start; - unsigned long init_text_start = (unsigned long)__init_text_begin; - unsigned long init_data_start = (unsigned long)__init_data_begin; - unsigned long rodata_start = (unsigned long)__start_rodata; - unsigned long data_start = (unsigned long)_data; - unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); - - set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT); - set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT); - /* rodata section is marked readonly in mark_rodata_ro */ - set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT); -} - -void mark_rodata_ro(void) -{ - unsigned long rodata_start = (unsigned long)__start_rodata; - unsigned long data_start = (unsigned long)_data; - - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - - debug_checkwx(); -} -#endif - #ifdef CONFIG_KEXEC_CORE /* * reserve_crashkernel() - reserves memory for crash kernel |