From f842f5ff6aafc2752580ed99ee757652c08684e7 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 10 May 2021 19:42:22 +0800 Subject: riscv: Move setup_bootmem into paging_init Make setup_bootmem() static. Signed-off-by: Kefeng Wang Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/setup.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 03901d3a8b02..4db4d0b5911f 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -276,7 +276,6 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); efi_init(); - setup_bootmem(); paging_init(); #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); -- cgit v1.2.3 From 37a7a2a10ec525a79d733008bc7fe4ebbca34382 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Wed, 12 May 2021 22:55:45 +0800 Subject: riscv: Turn has_fpu into a static key if FPU=y The has_fpu check sits at hot code path: switch_to(). Currently, has_fpu is a bool variable if FPU=y, switch_to() checks it each time, we can optimize out this check by turning the has_fpu into a static key. Signed-off-by: Jisheng Zhang Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/switch_to.h | 11 ++++++++--- arch/riscv/kernel/cpufeature.c | 4 ++-- arch/riscv/kernel/process.c | 2 +- arch/riscv/kernel/signal.c | 4 ++-- 4 files changed, 13 insertions(+), 8 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 407bcc96a710..0a3f4f95c555 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -6,6 +6,7 @@ #ifndef _ASM_RISCV_SWITCH_TO_H #define _ASM_RISCV_SWITCH_TO_H +#include #include #include #include @@ -55,9 +56,13 @@ static inline void __switch_to_aux(struct task_struct *prev, fstate_restore(next, task_pt_regs(next)); } -extern bool has_fpu; +extern struct static_key_false cpu_hwcap_fpu; +static __always_inline bool has_fpu(void) +{ + return static_branch_likely(&cpu_hwcap_fpu); +} #else -#define has_fpu false +static __always_inline bool has_fpu(void) { return false; } #define fstate_save(task, regs) do { } while (0) #define fstate_restore(task, regs) do { } while (0) #define __switch_to_aux(__prev, __next) do { } while (0) @@ -70,7 +75,7 @@ extern struct task_struct *__switch_to(struct task_struct *, do { \ struct task_struct *__prev = (prev); \ struct task_struct *__next = (next); \ - if (has_fpu) \ + if (has_fpu()) \ __switch_to_aux(__prev, __next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index ac202f44a670..a2848dc36927 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -19,7 +19,7 @@ unsigned long elf_hwcap __read_mostly; static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly; #ifdef CONFIG_FPU -bool has_fpu __read_mostly; +__ro_after_init DEFINE_STATIC_KEY_FALSE(cpu_hwcap_fpu); #endif /** @@ -146,6 +146,6 @@ void riscv_fill_hwcap(void) #ifdef CONFIG_FPU if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)) - has_fpu = true; + static_branch_enable(&cpu_hwcap_fpu); #endif } diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index f9cd57c9c67d..03ac3aa611f5 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -87,7 +87,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { regs->status = SR_PIE; - if (has_fpu) { + if (has_fpu()) { regs->status |= SR_FS_INITIAL; /* * Restore the initial value to the FP register diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 65942b3748b4..c2d5ecbe5526 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -90,7 +90,7 @@ static long restore_sigcontext(struct pt_regs *regs, /* sc_regs is structured the same as the start of pt_regs */ err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs)); /* Restore the floating-point state. */ - if (has_fpu) + if (has_fpu()) err |= restore_fp_state(regs, &sc->sc_fpregs); return err; } @@ -143,7 +143,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame, /* sc_regs is structured the same as the start of pt_regs */ err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs)); /* Save the floating-point state. */ - if (has_fpu) + if (has_fpu()) err |= save_fp_state(regs, &sc->sc_fpregs); return err; } -- cgit v1.2.3 From 8c9f4940c27dd72ee68ca5af2922e4d83ca9121b Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 19 Apr 2021 00:29:19 +0800 Subject: riscv: kprobes: Remove redundant kprobe_step_ctx Inspired by commit ba090f9cafd5 ("arm64: kprobes: Remove redundant kprobe_step_ctx"), the ss_pending and match_addr of kprobe_step_ctx are redundant because those can be replaced by KPROBE_HIT_SS and &cur_kprobe->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) respectively. Remove the kprobe_step_ctx to simplify the code. Signed-off-by: Jisheng Zhang Reviewed-by: Masami Hiramatsu Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/kprobes.h | 7 ------- arch/riscv/kernel/probes/kprobes.c | 40 +++++++++----------------------------- 2 files changed, 9 insertions(+), 38 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h index 4647d38018f6..9ea9b5ec3113 100644 --- a/arch/riscv/include/asm/kprobes.h +++ b/arch/riscv/include/asm/kprobes.h @@ -29,18 +29,11 @@ struct prev_kprobe { unsigned int status; }; -/* Single step context for kprobe */ -struct kprobe_step_ctx { - unsigned long ss_pending; - unsigned long match_addr; -}; - /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned int kprobe_status; unsigned long saved_status; struct prev_kprobe prev_kprobe; - struct kprobe_step_ctx ss_ctx; }; void arch_remove_kprobe(struct kprobe *p); diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 10b965c34536..fbd127d5dc04 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -17,7 +17,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes -post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); +post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { @@ -43,7 +43,7 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) p->ainsn.api.handler((u32)p->opcode, (unsigned long)p->addr, regs); - post_kprobe_handler(kcb, regs); + post_kprobe_handler(p, kcb, regs); } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -149,21 +149,6 @@ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, regs->status = kcb->saved_status; } -static void __kprobes -set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p) -{ - unsigned long offset = GET_INSN_LENGTH(p->opcode); - - kcb->ss_ctx.ss_pending = true; - kcb->ss_ctx.match_addr = addr + offset; -} - -static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) -{ - kcb->ss_ctx.ss_pending = false; - kcb->ss_ctx.match_addr = 0; -} - static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) @@ -182,8 +167,6 @@ static void __kprobes setup_singlestep(struct kprobe *p, /* prepare for single stepping */ slot = (unsigned long)p->ainsn.api.insn; - set_ss_context(kcb, slot, p); /* mark pending ss */ - /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); @@ -219,13 +202,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p, } static void __kprobes -post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - struct kprobe *cur = kprobe_running(); - - if (!cur) - return; - /* return addr restore if non-branching insn */ if (cur->ainsn.api.restore != 0) regs->epc = cur->ainsn.api.restore; @@ -357,16 +335,16 @@ bool __kprobes kprobe_single_step_handler(struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long addr = instruction_pointer(regs); + struct kprobe *cur = kprobe_running(); - if ((kcb->ss_ctx.ss_pending) - && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) { - clear_ss_context(kcb); /* clear pending ss */ - + if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && + ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) { kprobes_restore_local_irqflag(kcb, regs); - - post_kprobe_handler(kcb, regs); + post_kprobe_handler(cur, kcb, regs); return true; } + /* not ours, kprobes should ignore it */ return false; } -- cgit v1.2.3 From 3df952ae2ac81fbc5d44b014e5462b53d1decbb5 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sun, 16 May 2021 20:59:42 +0800 Subject: riscv: Add __init section marker to some functions again These functions are not needed after booting, so mark them as __init to move them to the __init section. Signed-off-by: Jisheng Zhang Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpufeature.c | 2 +- arch/riscv/mm/context.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index a2848dc36927..d959d207a40d 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -59,7 +59,7 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) } EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); -void riscv_fill_hwcap(void) +void __init riscv_fill_hwcap(void) { struct device_node *node; const char *isa; diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 83e7ae37675a..9bc46ab01c25 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -213,7 +213,7 @@ static inline void set_mm(struct mm_struct *mm, unsigned int cpu) set_mm_noasid(mm); } -static int asids_init(void) +static int __init asids_init(void) { unsigned long old; -- cgit v1.2.3 From ce3aca0465e31c20ada1270ac6547ba28b610ab2 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 2 Jun 2021 16:55:16 +0800 Subject: riscv: Only initialize swiotlb when necessary The SWIOTLB buffer is not needed unless the physical address space is beyond the limit of dma, only initialize swiotlb when swiotlb_force is true or not all system memory is DMA-able. Also move the swiotlb_init() into mem_init(). Signed-off-by: Kefeng Wang Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/setup.c | 5 ----- arch/riscv/mm/init.c | 8 ++++++++ 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 4db4d0b5911f..5c6d2a1fdbc7 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -295,10 +294,6 @@ void __init setup_arch(char **cmdline_p) protect_kernel_linear_mapping_text_rodata(); } -#ifdef CONFIG_SWIOTLB - swiotlb_init(1); -#endif - #ifdef CONFIG_KASAN kasan_init(); #endif diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index e7b136abf90c..30b204546ceb 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -109,6 +110,13 @@ void __init mem_init(void) BUG_ON(!mem_map); #endif /* CONFIG_FLATMEM */ +#ifdef CONFIG_SWIOTLB + if (swiotlb_force == SWIOTLB_FORCE || + max_pfn > PFN_DOWN(dma32_phys_limit)) + swiotlb_init(1); + else + swiotlb_force = SWIOTLB_NO_FORCE; +#endif high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); memblock_free_all(); -- cgit v1.2.3 From e5c35fa0401971701dcd7675f471b664698244dd Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Thu, 24 Jun 2021 14:00:41 +0200 Subject: riscv: Map the kernel with correct permissions the first time For 64-bit kernels, we map all the kernel with write and execute permissions and afterwards remove writability from text and executability from data. For 32-bit kernels, the kernel mapping resides in the linear mapping, so we map all the linear mapping as writable and executable and afterwards we remove those properties for unused memory and kernel mapping as described above. Change this behavior to directly map the kernel with correct permissions and avoid going through the whole mapping to fix the permissions. At the same time, this fixes an issue introduced by commit 2bfc6cd81bd1 ("riscv: Move kernel mapping outside of linear mapping") as reported here https://github.com/starfive-tech/linux/issues/17. Signed-off-by: Alexandre Ghiti Reviewed-by: Anup Patel Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/page.h | 13 ++++- arch/riscv/include/asm/sections.h | 17 ++++++ arch/riscv/include/asm/set_memory.h | 8 --- arch/riscv/kernel/setup.c | 12 +--- arch/riscv/mm/init.c | 113 ++++++++++++++++-------------------- 5 files changed, 82 insertions(+), 81 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 6e004d8fda4d..349e4f9874cc 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -95,6 +95,7 @@ extern unsigned long va_kernel_pa_offset; #endif extern unsigned long va_kernel_xip_pa_offset; extern unsigned long pfn_base; +extern uintptr_t load_sz; #define ARCH_PFN_OFFSET (pfn_base) #else #define va_pa_offset 0 @@ -108,6 +109,11 @@ extern unsigned long pfn_base; extern unsigned long kernel_virt_addr; #ifdef CONFIG_64BIT +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr) + #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = y; \ @@ -127,10 +133,15 @@ extern unsigned long kernel_virt_addr; #define __va_to_pa_nodebug(x) ({ \ unsigned long _x = x; \ - (_x < kernel_virt_addr) ? \ + is_linear_mapping(_x) ? \ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ }) #else +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET) + #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) #endif /* CONFIG_64BIT */ diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 8a303fb1ee3b..32336e8a17cb 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -6,6 +6,7 @@ #define __ASM_SECTIONS_H #include +#include extern char _start[]; extern char _start_kernel[]; @@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; extern char __alt_start[], __alt_end[]; +static inline bool is_va_kernel_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)_start; + uintptr_t end = (uintptr_t)__init_data_begin; + + return va >= start && va < end; +} + +static inline bool is_va_kernel_lm_alias_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)lm_alias(_start); + uintptr_t end = (uintptr_t)lm_alias(__init_data_begin); + + return va >= start && va < end; +} + #endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 135f726c4c1d..a2c14d4b3993 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -16,7 +16,6 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); int set_memory_rw_nx(unsigned long addr, int numpages); -void protect_kernel_text_data(void); static __always_inline int set_kernel_memory(char *startp, char *endp, int (*set_memory)(unsigned long start, int num_pages)) @@ -32,7 +31,6 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } -static inline void protect_kernel_text_data(void) {} static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } static inline int set_kernel_memory(char *startp, char *endp, int (*set_memory)(unsigned long start, @@ -42,12 +40,6 @@ static inline int set_kernel_memory(char *startp, char *endp, } #endif -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void protect_kernel_linear_mapping_text_rodata(void); -#else -static inline void protect_kernel_linear_mapping_text_rodata(void) {} -#endif - int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); bool kernel_page_present(struct page *page); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 03901d3a8b02..02d811127f48 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -291,11 +291,6 @@ void __init setup_arch(char **cmdline_p) init_resources(); sbi_init(); - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { - protect_kernel_text_data(); - protect_kernel_linear_mapping_text_rodata(); - } - #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif @@ -334,11 +329,10 @@ subsys_initcall(topology_init); void free_initmem(void) { - unsigned long init_begin = (unsigned long)__init_begin; - unsigned long init_end = (unsigned long)__init_end; - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) - set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT); + set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), + IS_ENABLED(CONFIG_64BIT) ? + set_memory_rw : set_memory_rw_nx); free_initmem_default(POISON_FREE_INITMEM); } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 4c4c92ce0bb8..dc37b9bb8cb9 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -436,6 +436,43 @@ asmlinkage void __init __copy_data(void) } #endif +#ifdef CONFIG_STRICT_KERNEL_RWX +static __init pgprot_t pgprot_from_va(uintptr_t va) +{ + if (is_va_kernel_text(va)) + return PAGE_KERNEL_READ_EXEC; + + /* + * In 64-bit kernel, the kernel mapping is outside the linear mapping so + * we must protect its linear mapping alias from being executed and + * written. + * And rodata section is marked readonly in mark_rodata_ro. + */ + if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va)) + return PAGE_KERNEL_READ; + + return PAGE_KERNEL; +} + +void mark_rodata_ro(void) +{ + set_kernel_memory(__start_rodata, _data, set_memory_ro); + if (IS_ENABLED(CONFIG_64BIT)) + set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), + set_memory_ro); + + debug_checkwx(); +} +#else +static __init pgprot_t pgprot_from_va(uintptr_t va) +{ + if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va)) + return PAGE_KERNEL; + + return PAGE_KERNEL_EXEC; +} +#endif /* CONFIG_STRICT_KERNEL_RWX */ + /* * setup_vm() is called from head.S with MMU-off. * @@ -454,7 +491,8 @@ asmlinkage void __init __copy_data(void) #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif -uintptr_t load_pa, load_sz; +static uintptr_t load_pa __initdata; +uintptr_t load_sz; #ifdef CONFIG_XIP_KERNEL #define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) #define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) @@ -465,7 +503,8 @@ uintptr_t xiprom, xiprom_sz; #define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) #define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) -static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, + __always_unused bool early) { uintptr_t va, end_va; @@ -484,7 +523,8 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) map_size, PAGE_KERNEL); } #else -static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, + bool early) { uintptr_t va, end_va; @@ -492,7 +532,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) for (va = kernel_virt_addr; va < end_va; va += map_size) create_pgd_mapping(pgdir, va, load_pa + (va - kernel_virt_addr), - map_size, PAGE_KERNEL_EXEC); + map_size, + early ? + PAGE_KERNEL_EXEC : pgprot_from_va(va)); } #endif @@ -569,7 +611,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) * us to reach paging_init(). We map all memory banks later * in setup_vm_final() below. */ - create_kernel_page_table(early_pg_dir, map_size); + create_kernel_page_table(early_pg_dir, map_size, true); #ifndef __PAGETABLE_PMD_FOLDED /* Setup early PMD for DTB */ @@ -645,22 +687,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #endif } -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void protect_kernel_linear_mapping_text_rodata(void) -{ - unsigned long text_start = (unsigned long)lm_alias(_start); - unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin); - unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata); - unsigned long data_start = (unsigned long)lm_alias(_data); - - set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); -} -#endif - static void __init setup_vm_final(void) { uintptr_t va, map_size; @@ -693,21 +719,15 @@ static void __init setup_vm_final(void) map_size = best_map_size(start, end - start); for (pa = start; pa < end; pa += map_size) { va = (uintptr_t)__va(pa); - create_pgd_mapping(swapper_pg_dir, va, pa, - map_size, -#ifdef CONFIG_64BIT - PAGE_KERNEL -#else - PAGE_KERNEL_EXEC -#endif - ); + create_pgd_mapping(swapper_pg_dir, va, pa, map_size, + pgprot_from_va(va)); } } #ifdef CONFIG_64BIT /* Map the kernel */ - create_kernel_page_table(swapper_pg_dir, PMD_SIZE); + create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false); #endif /* Clear fixmap PTE and PMD mappings */ @@ -738,39 +758,6 @@ static inline void setup_vm_final(void) } #endif /* CONFIG_MMU */ -#ifdef CONFIG_STRICT_KERNEL_RWX -void __init protect_kernel_text_data(void) -{ - unsigned long text_start = (unsigned long)_start; - unsigned long init_text_start = (unsigned long)__init_text_begin; - unsigned long init_data_start = (unsigned long)__init_data_begin; - unsigned long rodata_start = (unsigned long)__start_rodata; - unsigned long data_start = (unsigned long)_data; -#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) - unsigned long end_va = kernel_virt_addr + load_sz; -#else - unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); -#endif - - set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); - set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT); - set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT); - /* rodata section is marked readonly in mark_rodata_ro */ - set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT); -} - -void mark_rodata_ro(void) -{ - unsigned long rodata_start = (unsigned long)__start_rodata; - unsigned long data_start = (unsigned long)_data; - - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - - debug_checkwx(); -} -#endif - #ifdef CONFIG_KEXEC_CORE /* * reserve_crashkernel() - reserves memory for crash kernel -- cgit v1.2.3 From 658e2c5125bbbc9b9b5eac23b3c35b87df3c30b8 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Thu, 17 Jun 2021 15:53:07 +0200 Subject: riscv: Introduce structure that group all variables regarding kernel mapping We have a lot of variables that are used to hold kernel mapping addresses, offsets between physical and virtual mappings and some others used for XIP kernels: they are all defined at different places in mm/init.c, so group them into a single structure with, for some of them, more explicit and concise names. Signed-off-by: Alexandre Ghiti Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/page.h | 54 +++++++++++---------- arch/riscv/kernel/asm-offsets.c | 2 + arch/riscv/kernel/head.S | 4 +- arch/riscv/kernel/kexec_relocate.S | 4 +- arch/riscv/kernel/machine_kexec.c | 2 +- arch/riscv/mm/init.c | 98 ++++++++++++++------------------------ arch/riscv/mm/physaddr.c | 2 +- arch/riscv/mm/ptdump.c | 2 +- 8 files changed, 75 insertions(+), 93 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 5d4622a44b09..cca8764aed83 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -79,46 +79,52 @@ typedef struct page *pgtable_t; #endif #ifdef CONFIG_MMU -extern unsigned long va_pa_offset; -#ifdef CONFIG_64BIT -extern unsigned long va_kernel_pa_offset; -#endif -extern unsigned long va_kernel_xip_pa_offset; extern unsigned long pfn_base; -extern uintptr_t load_sz; #define ARCH_PFN_OFFSET (pfn_base) #else -#define va_pa_offset 0 -#ifdef CONFIG_64BIT -#define va_kernel_pa_offset 0 -#endif -#define va_kernel_xip_pa_offset 0 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #endif /* CONFIG_MMU */ -extern unsigned long kernel_virt_addr; +struct kernel_mapping { + unsigned long virt_addr; + uintptr_t phys_addr; + uintptr_t size; + /* Offset between linear mapping virtual address and kernel load address */ + unsigned long va_pa_offset; +#ifdef CONFIG_64BIT + /* Offset between kernel mapping virtual address and kernel load address */ + unsigned long va_kernel_pa_offset; +#endif + unsigned long va_kernel_xip_pa_offset; +#ifdef CONFIG_XIP_KERNEL + uintptr_t xiprom; + uintptr_t xiprom_sz; +#endif +}; + +extern struct kernel_mapping kernel_map; #ifdef CONFIG_64BIT #define is_kernel_mapping(x) \ - ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) + ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) #define is_linear_mapping(x) \ - ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr) + ((x) >= PAGE_OFFSET && (x) < kernel_map.virt_addr) -#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) +#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = y; \ (_y >= CONFIG_PHYS_RAM_BASE) ? \ - (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ - (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ + (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) : \ + (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset); \ }) #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) -#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) +#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset) #define kernel_mapping_va_to_pa(y) ({ \ unsigned long _y = y; \ - (_y < kernel_virt_addr + XIP_OFFSET) ? \ - ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ - ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ + (_y < kernel_map.virt_addr + XIP_OFFSET) ? \ + ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \ + ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ }) #define __va_to_pa_nodebug(x) ({ \ @@ -128,12 +134,12 @@ extern unsigned long kernel_virt_addr; }) #else #define is_kernel_mapping(x) \ - ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) + ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) #define is_linear_mapping(x) \ ((x) >= PAGE_OFFSET) -#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) -#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) +#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + kernel_map.va_pa_offset)) +#define __va_to_pa_nodebug(x) ((unsigned long)(x) - kernel_map.va_pa_offset) #endif /* CONFIG_64BIT */ #ifdef CONFIG_DEBUG_VIRTUAL diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 9ef33346853c..90f8ce64fa6f 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -311,4 +311,6 @@ void asm_offsets(void) * ensures the alignment is sane. */ DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN)); + + OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr); } diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 89cc58ab52b4..fce5184b22c3 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -81,9 +81,9 @@ pe_head_start: #ifdef CONFIG_MMU relocate: /* Relocate return address */ - la a1, kernel_virt_addr + la a1, kernel_map XIP_FIXUP_OFFSET a1 - REG_L a1, 0(a1) + REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) la a2, _start sub a1, a1, a2 add ra, ra, a1 diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S index 88c3beabe9b4..a80b52a74f58 100644 --- a/arch/riscv/kernel/kexec_relocate.S +++ b/arch/riscv/kernel/kexec_relocate.S @@ -20,7 +20,7 @@ SYM_CODE_START(riscv_kexec_relocate) * s4: Pointer to the destination address for the relocation * s5: (const) Number of words per page * s6: (const) 1, used for subtraction - * s7: (const) va_pa_offset, used when switching MMU off + * s7: (const) kernel_map.va_pa_offset, used when switching MMU off * s8: (const) Physical address of the main loop * s9: (debug) indirection page counter * s10: (debug) entry counter @@ -159,7 +159,7 @@ SYM_CODE_START(riscv_kexec_norelocate) * s0: (const) Phys address to jump to * s1: (const) Phys address of the FDT image * s2: (const) The hartid of the current hart - * s3: (const) va_pa_offset, used when switching MMU off + * s3: (const) kernel_map.va_pa_offset, used when switching MMU off */ mv s0, a1 mv s1, a2 diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c index cc048143fba5..3e39fd95e02b 100644 --- a/arch/riscv/kernel/machine_kexec.c +++ b/arch/riscv/kernel/machine_kexec.c @@ -188,6 +188,6 @@ machine_kexec(struct kimage *image) /* Jump to the relocation code */ pr_notice("Bye...\n"); kexec_method(first_ind_entry, jump_addr, fdt_addr, - this_hart_id, va_pa_offset); + this_hart_id, kernel_map.va_pa_offset); unreachable(); } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 12f956b3a674..269fc648ef3d 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -30,10 +30,13 @@ #include "../kernel/head.h" -unsigned long kernel_virt_addr = KERNEL_LINK_ADDR; -EXPORT_SYMBOL(kernel_virt_addr); +struct kernel_mapping kernel_map __ro_after_init; +EXPORT_SYMBOL(kernel_map); +#ifdef CONFIG_XIP_KERNEL +#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map)) +#endif + #ifdef CONFIG_XIP_KERNEL -#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr))) extern char _xiprom[], _exiprom[]; #endif @@ -211,25 +214,6 @@ static struct pt_alloc_ops _pt_ops __initdata; #define pt_ops _pt_ops #endif -/* Offset between linear mapping virtual address and kernel load address */ -unsigned long va_pa_offset __ro_after_init; -EXPORT_SYMBOL(va_pa_offset); -#ifdef CONFIG_XIP_KERNEL -#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset))) -#endif -/* Offset between kernel mapping virtual address and kernel load address */ -#ifdef CONFIG_64BIT -unsigned long va_kernel_pa_offset __ro_after_init; -EXPORT_SYMBOL(va_kernel_pa_offset); -#endif -#ifdef CONFIG_XIP_KERNEL -#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset))) -#endif -unsigned long va_kernel_xip_pa_offset __ro_after_init; -EXPORT_SYMBOL(va_kernel_xip_pa_offset); -#ifdef CONFIG_XIP_KERNEL -#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset))) -#endif unsigned long pfn_base __ro_after_init; EXPORT_SYMBOL(pfn_base); @@ -345,7 +329,7 @@ static pmd_t *__init get_pmd_virt_late(phys_addr_t pa) static phys_addr_t __init alloc_pmd_early(uintptr_t va) { - BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT); + BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); return (uintptr_t)early_pmd; } @@ -510,36 +494,24 @@ static __init pgprot_t pgprot_from_va(uintptr_t va) #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif -static uintptr_t load_pa __initdata; -uintptr_t load_sz; -#ifdef CONFIG_XIP_KERNEL -#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) -#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) -#endif - #ifdef CONFIG_XIP_KERNEL -static uintptr_t xiprom __initdata; -static uintptr_t xiprom_sz __initdata; -#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) -#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) - static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, __always_unused bool early) { uintptr_t va, end_va; /* Map the flash resident part */ - end_va = kernel_virt_addr + xiprom_sz; - for (va = kernel_virt_addr; va < end_va; va += map_size) + end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; + for (va = kernel_map.virt_addr; va < end_va; va += map_size) create_pgd_mapping(pgdir, va, - xiprom + (va - kernel_virt_addr), + kernel_map.xiprom + (va - kernel_map.virt_addr), map_size, PAGE_KERNEL_EXEC); /* Map the data in RAM */ - end_va = kernel_virt_addr + XIP_OFFSET + load_sz; - for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size) + end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size; + for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size) create_pgd_mapping(pgdir, va, - load_pa + (va - (kernel_virt_addr + XIP_OFFSET)), + kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), map_size, PAGE_KERNEL); } #else @@ -548,10 +520,10 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, { uintptr_t va, end_va; - end_va = kernel_virt_addr + load_sz; - for (va = kernel_virt_addr; va < end_va; va += map_size) + end_va = kernel_map.virt_addr + kernel_map.size; + for (va = kernel_map.virt_addr; va < end_va; va += map_size) create_pgd_mapping(pgdir, va, - load_pa + (va - kernel_virt_addr), + kernel_map.phys_addr + (va - kernel_map.virt_addr), map_size, early ? PAGE_KERNEL_EXEC : pgprot_from_va(va)); @@ -566,25 +538,27 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) pmd_t fix_bmap_spmd, fix_bmap_epmd; #endif + kernel_map.virt_addr = KERNEL_LINK_ADDR; + #ifdef CONFIG_XIP_KERNEL - xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; - xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); + kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; + kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); - load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE; - load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); + kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; + kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); - va_kernel_xip_pa_offset = kernel_virt_addr - xiprom; + kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; #else - load_pa = (uintptr_t)(&_start); - load_sz = (uintptr_t)(&_end) - load_pa; + kernel_map.phys_addr = (uintptr_t)(&_start); + kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; #endif - va_pa_offset = PAGE_OFFSET - load_pa; + kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr; #ifdef CONFIG_64BIT - va_kernel_pa_offset = kernel_virt_addr - load_pa; + kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; #endif - pfn_base = PFN_DOWN(load_pa); + pfn_base = PFN_DOWN(kernel_map.phys_addr); /* * Enforce boot alignment requirements of RV32 and @@ -594,7 +568,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) /* Sanity check alignment and size */ BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); - BUG_ON((load_pa % map_size) != 0); + BUG_ON((kernel_map.phys_addr % map_size) != 0); pt_ops.alloc_pte = alloc_pte_early; pt_ops.get_pte_virt = get_pte_virt_early; @@ -611,19 +585,19 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pmd_mapping(fixmap_pmd, FIXADDR_START, (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); /* Setup trampoline PGD and PMD */ - create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, + create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); #ifdef CONFIG_XIP_KERNEL - create_pmd_mapping(trampoline_pmd, kernel_virt_addr, - xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); + create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, + kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); #else - create_pmd_mapping(trampoline_pmd, kernel_virt_addr, - load_pa, PMD_SIZE, PAGE_KERNEL_EXEC); + create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, + kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC); #endif #else /* Setup trampoline PGD */ - create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, - load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC); + create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, + kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC); #endif /* diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c index 35703d5ef5fd..e7fd0c253c7b 100644 --- a/arch/riscv/mm/physaddr.c +++ b/arch/riscv/mm/physaddr.c @@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys); phys_addr_t __phys_addr_symbol(unsigned long x) { - unsigned long kernel_start = (unsigned long)kernel_virt_addr; + unsigned long kernel_start = kernel_map.virt_addr; unsigned long kernel_end = (unsigned long)_end; /* diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 0536ac84b730..ee4e5c1c39c5 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -379,7 +379,7 @@ static int __init ptdump_init(void) address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET; #ifdef CONFIG_64BIT address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR; - address_markers[KERNEL_MAPPING_NR].start_address = kernel_virt_addr; + address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr; #endif kernel_ptd_info.base_addr = KERN_VIRT_START; -- cgit v1.2.3 From 31da94c25aea835ceac00575a9fd206c5a833fed Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 21 Jun 2021 11:28:55 +0800 Subject: riscv: add VMAP_STACK overflow detection This patch adds stack overflow detection to riscv, usable when CONFIG_VMAP_STACK=y. Overflow is detected in kernel exception entry(kernel/entry.S), if the kernel stack is overflow and been detected, the overflow handler is invoked on a per-cpu overflow stack. This approach preserves GPRs and the original exception information. The overflow detect is performed before any attempt is made to access the stack and the principle of stack overflow detection: kernel stacks are aligned to double their size, enabling overflow to be detected with a single bit test. For example, a 16K stack is aligned to 32K, ensuring that bit 14 of the SP must be zero. On an overflow (or underflow), this bit is flipped. Thus, overflow (of less than the size of the stack) can be detected by testing whether this bit is set. This gives us a useful error message on stack overflow, as can be trigger with the LKDTM overflow test: [ 388.053267] lkdtm: Performing direct entry EXHAUST_STACK [ 388.053663] lkdtm: Calling function with 1024 frame size to depth 32 ... [ 388.054016] lkdtm: loop 32/32 ... [ 388.054186] lkdtm: loop 31/32 ... [ 388.054491] lkdtm: loop 30/32 ... [ 388.054672] lkdtm: loop 29/32 ... [ 388.054859] lkdtm: loop 28/32 ... [ 388.055010] lkdtm: loop 27/32 ... [ 388.055163] lkdtm: loop 26/32 ... [ 388.055309] lkdtm: loop 25/32 ... [ 388.055481] lkdtm: loop 24/32 ... [ 388.055653] lkdtm: loop 23/32 ... [ 388.055837] lkdtm: loop 22/32 ... [ 388.056015] lkdtm: loop 21/32 ... [ 388.056188] lkdtm: loop 20/32 ... [ 388.058145] Insufficient stack space to handle exception! [ 388.058153] Task stack: [0xffffffd014260000..0xffffffd014264000] [ 388.058160] Overflow stack: [0xffffffe1f8d2c220..0xffffffe1f8d2d220] [ 388.058168] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058175] Hardware name: riscv-virtio,qemu (DT) [ 388.058187] epc : number+0x32/0x2c0 [ 388.058247] ra : vsnprintf+0x2ae/0x3f0 [ 388.058255] epc : ffffffe0002d38f6 ra : ffffffe0002d814e sp : ffffffd01425ffc0 [ 388.058263] gp : ffffffe0012e4010 tp : ffffffe08014da00 t0 : ffffffd0142606e8 [ 388.058271] t1 : 0000000000000000 t2 : 0000000000000000 s0 : ffffffd014260070 [ 388.058303] s1 : ffffffd014260158 a0 : ffffffd01426015e a1 : ffffffd014260158 [ 388.058311] a2 : 0000000000000013 a3 : ffff0a01ffffff10 a4 : ffffffe000c398e0 [ 388.058319] a5 : 511b02ec65f3e300 a6 : 0000000000a1749a a7 : 0000000000000000 [ 388.058327] s2 : ffffffff000000ff s3 : 00000000ffff0a01 s4 : ffffffe0012e50a8 [ 388.058335] s5 : 0000000000ffff0a s6 : ffffffe0012e50a8 s7 : ffffffe000da1cc0 [ 388.058343] s8 : ffffffffffffffff s9 : ffffffd0142602b0 s10: ffffffd0142602a8 [ 388.058351] s11: ffffffd01426015e t3 : 00000000000f0000 t4 : ffffffffffffffff [ 388.058359] t5 : 000000000000002f t6 : ffffffd014260158 [ 388.058366] status: 0000000000000100 badaddr: ffffffd01425fff8 cause: 000000000000000f [ 388.058374] Kernel panic - not syncing: Kernel stack overflow [ 388.058381] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058387] Hardware name: riscv-virtio,qemu (DT) [ 388.058393] Call Trace: [ 388.058400] [] walk_stackframe+0x0/0xce [ 388.058406] [] dump_backtrace+0x38/0x46 [ 388.058412] [] show_stack+0x10/0x18 [ 388.058418] [] dump_stack+0x74/0x8e [ 388.058424] [] panic+0xfc/0x2b2 [ 388.058430] [] print_trace_address+0x0/0x24 [ 388.058436] [] vsnprintf+0x2ae/0x3f0 [ 388.058956] SMP: stopping secondary CPUs Signed-off-by: Tong Tiangen Reviewed-by: Kefeng Wang Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/asm-prototypes.h | 3 + arch/riscv/include/asm/thread_info.h | 15 +++++ arch/riscv/kernel/entry.S | 108 ++++++++++++++++++++++++++++++++ arch/riscv/kernel/traps.c | 35 +++++++++++ arch/riscv/kernel/vmlinux.lds.S | 2 +- 6 files changed, 163 insertions(+), 1 deletion(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 1e8dde174946..3590eb76000e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -70,6 +70,7 @@ config RISCV select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_VMAP_STACK if MMU && 64BIT select HAVE_ASM_MODVERSIONS select HAVE_CONTEXT_TRACKING select HAVE_DEBUG_KMEMLEAK diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index 2a652b0c987d..ef386fcf3939 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -25,4 +25,7 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s); DECLARE_DO_ERROR_INFO(do_trap_ecall_m); DECLARE_DO_ERROR_INFO(do_trap_break); +asmlinkage unsigned long get_overflow_stack(void); +asmlinkage void handle_bad_stack(struct pt_regs *regs); + #endif /* _ASM_RISCV_PROTOTYPES_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 0e549a3089b3..60da0dcacf14 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -19,6 +19,21 @@ #endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +/* + * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by + * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry + * assembly. + */ +#ifdef CONFIG_VMAP_STACK +#define THREAD_ALIGN (2 * THREAD_SIZE) +#else +#define THREAD_ALIGN THREAD_SIZE +#endif + +#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) +#define OVERFLOW_STACK_SIZE SZ_4K +#define SHADOW_OVERFLOW_STACK_SIZE (1024) + #ifndef __ASSEMBLY__ #include diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 80d5a9e017b0..98f502654edd 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -30,6 +30,15 @@ ENTRY(handle_exception) _restore_kernel_tpsp: csrr tp, CSR_SCRATCH REG_S sp, TASK_TI_KERNEL_SP(tp) + +#ifdef CONFIG_VMAP_STACK + addi sp, sp, -(PT_SIZE_ON_STACK) + srli sp, sp, THREAD_SHIFT + andi sp, sp, 0x1 + bnez sp, handle_kernel_stack_overflow + REG_L sp, TASK_TI_KERNEL_SP(tp) +#endif + _save_context: REG_S sp, TASK_TI_USER_SP(tp) REG_L sp, TASK_TI_KERNEL_SP(tp) @@ -376,6 +385,105 @@ handle_syscall_trace_exit: call do_syscall_trace_exit j ret_from_exception +#ifdef CONFIG_VMAP_STACK +handle_kernel_stack_overflow: + la sp, shadow_stack + addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE + + //save caller register to shadow stack + addi sp, sp, -(PT_SIZE_ON_STACK) + REG_S x1, PT_RA(sp) + REG_S x5, PT_T0(sp) + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) + + la ra, restore_caller_reg + tail get_overflow_stack + +restore_caller_reg: + //save per-cpu overflow stack + REG_S a0, -8(sp) + //restore caller register from shadow_stack + REG_L x1, PT_RA(sp) + REG_L x5, PT_T0(sp) + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) + + //load per-cpu overflow stack + REG_L sp, -8(sp) + addi sp, sp, -(PT_SIZE_ON_STACK) + + //save context to overflow stack + REG_S x1, PT_RA(sp) + REG_S x3, PT_GP(sp) + REG_S x5, PT_T0(sp) + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x8, PT_S0(sp) + REG_S x9, PT_S1(sp) + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + REG_S x18, PT_S2(sp) + REG_S x19, PT_S3(sp) + REG_S x20, PT_S4(sp) + REG_S x21, PT_S5(sp) + REG_S x22, PT_S6(sp) + REG_S x23, PT_S7(sp) + REG_S x24, PT_S8(sp) + REG_S x25, PT_S9(sp) + REG_S x26, PT_S10(sp) + REG_S x27, PT_S11(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) + + REG_L s0, TASK_TI_KERNEL_SP(tp) + csrr s1, CSR_STATUS + csrr s2, CSR_EPC + csrr s3, CSR_TVAL + csrr s4, CSR_CAUSE + csrr s5, CSR_SCRATCH + REG_S s0, PT_SP(sp) + REG_S s1, PT_STATUS(sp) + REG_S s2, PT_EPC(sp) + REG_S s3, PT_BADADDR(sp) + REG_S s4, PT_CAUSE(sp) + REG_S s5, PT_TP(sp) + move a0, sp + tail handle_bad_stack +#endif + END(handle_exception) ENTRY(ret_from_fork) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 0721b9798595..bb6a450f0ecc 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -198,3 +198,38 @@ int is_valid_bugaddr(unsigned long pc) void __init trap_init(void) { } + +#ifdef CONFIG_VMAP_STACK +static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], + overflow_stack)__aligned(16); +/* + * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used + * to get per-cpu overflow stack(get_overflow_stack). + */ +long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)]; +asmlinkage unsigned long get_overflow_stack(void) +{ + return (unsigned long)this_cpu_ptr(overflow_stack) + + OVERFLOW_STACK_SIZE; +} + +asmlinkage void handle_bad_stack(struct pt_regs *regs) +{ + unsigned long tsk_stk = (unsigned long)current->stack; + unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); + + console_verbose(); + + pr_emerg("Insufficient stack space to handle exception!\n"); + pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", + tsk_stk, tsk_stk + THREAD_SIZE); + pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", + ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); + + __show_regs(regs); + panic("Kernel stack overflow"); + + for (;;) + wait_for_interrupt(); +} +#endif diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 891742ff75a7..502d0826ecb1 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -117,7 +117,7 @@ SECTIONS . = ALIGN(SECTION_ALIGN); _data = .; - RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) .sdata : { __global_pointer$ = . + 0x800; *(.sdata*) -- cgit v1.2.3 From 1958e5aef5098e28b7d6e6a2972649901ebecace Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 25 Jun 2021 17:28:24 +0800 Subject: riscv: xip: Fix duplicate included asm/pgtable.h Clean up the following includecheck warning: ./arch/riscv/kernel/vmlinux-xip.lds.S: asm/pgtable.h is included more than once. No functional change. Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/vmlinux-xip.lds.S | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S index 4b29b9917f99..b0a813faa264 100644 --- a/arch/riscv/kernel/vmlinux-xip.lds.S +++ b/arch/riscv/kernel/vmlinux-xip.lds.S @@ -12,7 +12,6 @@ #include #include -#include #include #include -- cgit v1.2.3