From 6bf212c89c48458d8deef1c973678c62528dab04 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 26 Jul 2022 00:37:34 -0700 Subject: arm64: stacktrace: Add shared header for common stack unwinding code In order to reuse the arm64 stack unwinding logic for the nVHE hypervisor stack, move the common code to a shared header (arch/arm64/include/asm/stacktrace/common.h). The nVHE hypervisor cannot safely link against kernel code, so we make use of the shared header to avoid duplicated logic later in this series. Signed-off-by: Kalesh Singh Reviewed-by: Mark Brown Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220726073750.3219117-2-kaleshsingh@google.com --- arch/arm64/kernel/stacktrace.c | 57 ------------------------------------------ 1 file changed, 57 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index fcaa151b81f1..94a5dd2ab8fd 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -18,63 +18,6 @@ #include #include -/* - * A snapshot of a frame record or fp/lr register values, along with some - * accounting information necessary for robust unwinding. - * - * @fp: The fp value in the frame record (or the real fp) - * @pc: The lr value in the frame record (or the real lr) - * - * @stacks_done: Stacks which have been entirely unwound, for which it is no - * longer valid to unwind to. - * - * @prev_fp: The fp that pointed to this frame record, or a synthetic value - * of 0. This is used to ensure that within a stack, each - * subsequent frame record is at an increasing address. - * @prev_type: The type of stack this frame record was on, or a synthetic - * value of STACK_TYPE_UNKNOWN. This is used to detect a - * transition from one stack to another. - * - * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance - * associated with the most recently encountered replacement lr - * value. - * - * @task: The task being unwound. - */ -struct unwind_state { - unsigned long fp; - unsigned long pc; - DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); - unsigned long prev_fp; - enum stack_type prev_type; -#ifdef CONFIG_KRETPROBES - struct llist_node *kr_cur; -#endif - struct task_struct *task; -}; - -static void unwind_init_common(struct unwind_state *state, - struct task_struct *task) -{ - state->task = task; -#ifdef CONFIG_KRETPROBES - state->kr_cur = NULL; -#endif - - /* - * Prime the first unwind. - * - * In unwind_next() we'll check that the FP points to a valid stack, - * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be - * treated as a transition to whichever stack that happens to be. The - * prev_fp value won't be used, but we set it to 0 such that it is - * definitely not an accessible stack address. - */ - bitmap_zero(state->stacks_done, __NR_STACK_TYPES); - state->prev_fp = 0; - state->prev_type = STACK_TYPE_UNKNOWN; -} - /* * Start an unwind from a pt_regs. * -- cgit v1.2.3 From be63c647fd28d25484257f5f36a008db7d99991d Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 26 Jul 2022 00:37:36 -0700 Subject: arm64: stacktrace: Factor out unwind_next_common() Move common unwind_next logic to stacktrace/common.h. This allows reusing the code in the implementation the nVHE hypervisor stack unwinder, later in this series. Signed-off-by: Kalesh Singh Reviewed-by: Fuad Tabba Reviewed-by: Mark Brown Tested-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220726073750.3219117-4-kaleshsingh@google.com --- arch/arm64/include/asm/stacktrace/common.h | 50 ++++++++++++++++++++++++++++++ arch/arm64/kernel/stacktrace.c | 41 +++--------------------- 2 files changed, 54 insertions(+), 37 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index f58b786460d3..0c5cbfdb56b5 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -65,6 +65,10 @@ struct unwind_state { static inline bool on_overflow_stack(unsigned long sp, unsigned long size, struct stack_info *info); +static inline bool on_accessible_stack(const struct task_struct *tsk, + unsigned long sp, unsigned long size, + struct stack_info *info); + static inline bool on_stack(unsigned long sp, unsigned long size, unsigned long low, unsigned long high, enum stack_type type, struct stack_info *info) @@ -120,4 +124,50 @@ static inline void unwind_init_common(struct unwind_state *state, state->prev_type = STACK_TYPE_UNKNOWN; } +static inline int unwind_next_common(struct unwind_state *state, + struct stack_info *info) +{ + struct task_struct *tsk = state->task; + unsigned long fp = state->fp; + + if (fp & 0x7) + return -EINVAL; + + if (!on_accessible_stack(tsk, fp, 16, info)) + return -EINVAL; + + if (test_bit(info->type, state->stacks_done)) + return -EINVAL; + + /* + * As stacks grow downward, any valid record on the same stack must be + * at a strictly higher address than the prior record. + * + * Stacks can nest in several valid orders, e.g. + * + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW + * + * ... but the nesting itself is strict. Once we transition from one + * stack to another, it's never valid to unwind back to that first + * stack. + */ + if (info->type == state->prev_type) { + if (fp <= state->prev_fp) + return -EINVAL; + } else { + __set_bit(state->prev_type, state->stacks_done); + } + + /* + * Record this frame record's values and location. The prev_fp and + * prev_type are only meaningful to the next unwind_next() invocation. + */ + state->fp = READ_ONCE(*(unsigned long *)(fp)); + state->pc = READ_ONCE(*(unsigned long *)(fp + 8)); + state->prev_fp = fp; + state->prev_type = info->type; + + return 0; +} #endif /* __ASM_STACKTRACE_COMMON_H */ diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 94a5dd2ab8fd..834851939364 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -81,48 +81,15 @@ static int notrace unwind_next(struct unwind_state *state) struct task_struct *tsk = state->task; unsigned long fp = state->fp; struct stack_info info; + int err; /* Final frame; nothing to unwind */ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - if (fp & 0x7) - return -EINVAL; - - if (!on_accessible_stack(tsk, fp, 16, &info)) - return -EINVAL; - - if (test_bit(info.type, state->stacks_done)) - return -EINVAL; - - /* - * As stacks grow downward, any valid record on the same stack must be - * at a strictly higher address than the prior record. - * - * Stacks can nest in several valid orders, e.g. - * - * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL - * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW - * - * ... but the nesting itself is strict. Once we transition from one - * stack to another, it's never valid to unwind back to that first - * stack. - */ - if (info.type == state->prev_type) { - if (fp <= state->prev_fp) - return -EINVAL; - } else { - __set_bit(state->prev_type, state->stacks_done); - } - - /* - * Record this frame record's values and location. The prev_fp and - * prev_type are only meaningful to the next unwind_next() invocation. - */ - state->fp = READ_ONCE(*(unsigned long *)(fp)); - state->pc = READ_ONCE(*(unsigned long *)(fp + 8)); - state->prev_fp = fp; - state->prev_type = info.type; + err = unwind_next_common(state, &info); + if (err) + return err; state->pc = ptrauth_strip_insn_pac(state->pc); -- cgit v1.2.3 From 5b1b08619f50422c3e43d1fd7af257595a9e4a67 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 26 Jul 2022 00:37:37 -0700 Subject: arm64: stacktrace: Handle frame pointer from different address spaces The unwinder code is made reusable so that it can be used to unwind various types of stacks. One usecase is unwinding the nVHE hyp stack from the host (EL1) in non-protected mode. This means that the unwinder must be able to translate HYP stack addresses to kernel addresses. Add a callback (stack_trace_translate_fp_fn) to allow specifying the translation function. Signed-off-by: Kalesh Singh Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220726073750.3219117-5-kaleshsingh@google.com --- arch/arm64/include/asm/stacktrace/common.h | 29 +++++++++++++++++++++++++---- arch/arm64/kernel/stacktrace.c | 2 +- 2 files changed, 26 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 0c5cbfdb56b5..b241edba5c76 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -124,11 +124,25 @@ static inline void unwind_init_common(struct unwind_state *state, state->prev_type = STACK_TYPE_UNKNOWN; } +/* + * stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to + * a kernel address. + * + * @fp: the frame pointer to be updated to its kernel address. + * @type: the stack type associated with frame pointer @fp + * + * Returns true and success and @fp is updated to the corresponding + * kernel virtual address; otherwise returns false. + */ +typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp, + enum stack_type type); + static inline int unwind_next_common(struct unwind_state *state, - struct stack_info *info) + struct stack_info *info, + stack_trace_translate_fp_fn translate_fp) { + unsigned long fp = state->fp, kern_fp = fp; struct task_struct *tsk = state->task; - unsigned long fp = state->fp; if (fp & 0x7) return -EINVAL; @@ -139,6 +153,13 @@ static inline int unwind_next_common(struct unwind_state *state, if (test_bit(info->type, state->stacks_done)) return -EINVAL; + /* + * If fp is not from the current address space perform the necessary + * translation before dereferencing it to get the next fp. + */ + if (translate_fp && !translate_fp(&kern_fp, info->type)) + return -EINVAL; + /* * As stacks grow downward, any valid record on the same stack must be * at a strictly higher address than the prior record. @@ -163,8 +184,8 @@ static inline int unwind_next_common(struct unwind_state *state, * Record this frame record's values and location. The prev_fp and * prev_type are only meaningful to the next unwind_next() invocation. */ - state->fp = READ_ONCE(*(unsigned long *)(fp)); - state->pc = READ_ONCE(*(unsigned long *)(fp + 8)); + state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); + state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); state->prev_fp = fp; state->prev_type = info->type; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 834851939364..eef3cf6bf2d7 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -87,7 +87,7 @@ static int notrace unwind_next(struct unwind_state *state) if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_common(state, &info); + err = unwind_next_common(state, &info, NULL); if (err) return err; -- cgit v1.2.3 From f51e7146740514347d6c5526a2c393e224a19c0d Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 26 Jul 2022 00:37:38 -0700 Subject: arm64: stacktrace: Factor out common unwind() Move unwind() to stacktrace/common.h, and as a result the kernel unwind_next() to asm/stacktrace.h. This allow reusing unwind() in the implementation of the nVHE HYP stack unwinder, later in the series. Signed-off-by: Kalesh Singh Reviewed-by: Fuad Tabba Reviewed-by: Mark Brown Tested-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220726073750.3219117-6-kaleshsingh@google.com --- arch/arm64/include/asm/stacktrace.h | 51 +++++++++++++++++++++++ arch/arm64/include/asm/stacktrace/common.h | 19 +++++++++ arch/arm64/kernel/stacktrace.c | 67 ------------------------------ 3 files changed, 70 insertions(+), 67 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 43f4b4a6d383..ea828579a98b 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -80,4 +81,54 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, return false; } +/* + * Unwind from one frame record (A) to the next frame record (B). + * + * We terminate early if the location of B indicates a malformed chain of frame + * records (e.g. a cycle), determined based on the location and fp value of A + * and the location (but not the fp value) of B. + */ +static inline int notrace unwind_next(struct unwind_state *state) +{ + struct task_struct *tsk = state->task; + unsigned long fp = state->fp; + struct stack_info info; + int err; + + /* Final frame; nothing to unwind */ + if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) + return -ENOENT; + + err = unwind_next_common(state, &info, NULL); + if (err) + return err; + + state->pc = ptrauth_strip_insn_pac(state->pc); + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (tsk->ret_stack && + (state->pc == (unsigned long)return_to_handler)) { + unsigned long orig_pc; + /* + * This is a case where function graph tracer has + * modified a return address (LR) in a stack frame + * to hook a function return. + * So replace it to an original value. + */ + orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, + (void *)state->fp); + if (WARN_ON_ONCE(state->pc == orig_pc)) + return -EINVAL; + state->pc = orig_pc; + } +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(state->pc)) + state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); +#endif + + return 0; +} +NOKPROBE_SYMBOL(unwind_next); + #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index b241edba5c76..4b632141d91c 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -9,6 +9,7 @@ #include #include +#include #include enum stack_type { @@ -69,6 +70,8 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info); +static inline int unwind_next(struct unwind_state *state); + static inline bool on_stack(unsigned long sp, unsigned long size, unsigned long low, unsigned long high, enum stack_type type, struct stack_info *info) @@ -191,4 +194,20 @@ static inline int unwind_next_common(struct unwind_state *state, return 0; } + +static inline void notrace unwind(struct unwind_state *state, + stack_trace_consume_fn consume_entry, + void *cookie) +{ + while (1) { + int ret; + + if (!consume_entry(cookie, state->pc)) + break; + ret = unwind_next(state); + if (ret < 0) + break; + } +} +NOKPROBE_SYMBOL(unwind); #endif /* __ASM_STACKTRACE_COMMON_H */ diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index eef3cf6bf2d7..9fa60ee48499 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -7,14 +7,12 @@ #include #include #include -#include #include #include #include #include #include -#include #include #include @@ -69,71 +67,6 @@ static inline void unwind_init_from_task(struct unwind_state *state, state->pc = thread_saved_pc(task); } -/* - * Unwind from one frame record (A) to the next frame record (B). - * - * We terminate early if the location of B indicates a malformed chain of frame - * records (e.g. a cycle), determined based on the location and fp value of A - * and the location (but not the fp value) of B. - */ -static int notrace unwind_next(struct unwind_state *state) -{ - struct task_struct *tsk = state->task; - unsigned long fp = state->fp; - struct stack_info info; - int err; - - /* Final frame; nothing to unwind */ - if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) - return -ENOENT; - - err = unwind_next_common(state, &info, NULL); - if (err) - return err; - - state->pc = ptrauth_strip_insn_pac(state->pc); - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (tsk->ret_stack && - (state->pc == (unsigned long)return_to_handler)) { - unsigned long orig_pc; - /* - * This is a case where function graph tracer has - * modified a return address (LR) in a stack frame - * to hook a function return. - * So replace it to an original value. - */ - orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, - (void *)state->fp); - if (WARN_ON_ONCE(state->pc == orig_pc)) - return -EINVAL; - state->pc = orig_pc; - } -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#ifdef CONFIG_KRETPROBES - if (is_kretprobe_trampoline(state->pc)) - state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); -#endif - - return 0; -} -NOKPROBE_SYMBOL(unwind_next); - -static void notrace unwind(struct unwind_state *state, - stack_trace_consume_fn consume_entry, void *cookie) -{ - while (1) { - int ret; - - if (!consume_entry(cookie, state->pc)) - break; - ret = unwind_next(state); - if (ret < 0) - break; - } -} -NOKPROBE_SYMBOL(unwind); - static bool dump_backtrace_entry(void *arg, unsigned long where) { char *loglvl = arg; -- cgit v1.2.3 From 4e00532f37365967e9896966b1fe61888e659259 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Jul 2022 15:29:03 +0100 Subject: KVM: arm64: Make unwind()/on_accessible_stack() per-unwinder functions Having multiple versions of on_accessible_stack() (one per unwinder) makes it very hard to reason about what is used where due to the complexity of the various includes, the forward declarations, and the reliance on everything being 'inline'. Instead, move the code back where it should be. Each unwinder implements: - on_accessible_stack() as well as the helpers it depends on, - unwind()/unwind_next(), as they pass on_accessible_stack as a parameter to unwind_next_common() (which is the only common code here) This hardly results in any duplication, and makes it much easier to reason about the code. Signed-off-by: Marc Zyngier Reviewed-by: Kalesh Singh Tested-by: Kalesh Singh Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20220727142906.1856759-4-maz@kernel.org --- arch/arm64/include/asm/stacktrace.h | 74 ------------------------ arch/arm64/include/asm/stacktrace/common.h | 55 +++++------------- arch/arm64/include/asm/stacktrace/nvhe.h | 84 +--------------------------- arch/arm64/kernel/stacktrace.c | 90 ++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/stacktrace.c | 52 +++++++++++++++++ arch/arm64/kvm/stacktrace.c | 55 ++++++++++++++++++ 6 files changed, 213 insertions(+), 197 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index ea828579a98b..6ebdcdff77f5 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -57,78 +57,4 @@ static inline bool on_overflow_stack(unsigned long sp, unsigned long size, struct stack_info *info) { return false; } #endif - -/* - * We can only safely access per-cpu stacks from current in a non-preemptible - * context. - */ -static inline bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) -{ - if (on_accessible_stack_common(tsk, sp, size, info)) - return true; - - if (on_task_stack(tsk, sp, size, info)) - return true; - if (tsk != current || preemptible()) - return false; - if (on_irq_stack(sp, size, info)) - return true; - if (on_sdei_stack(sp, size, info)) - return true; - - return false; -} - -/* - * Unwind from one frame record (A) to the next frame record (B). - * - * We terminate early if the location of B indicates a malformed chain of frame - * records (e.g. a cycle), determined based on the location and fp value of A - * and the location (but not the fp value) of B. - */ -static inline int notrace unwind_next(struct unwind_state *state) -{ - struct task_struct *tsk = state->task; - unsigned long fp = state->fp; - struct stack_info info; - int err; - - /* Final frame; nothing to unwind */ - if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) - return -ENOENT; - - err = unwind_next_common(state, &info, NULL); - if (err) - return err; - - state->pc = ptrauth_strip_insn_pac(state->pc); - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (tsk->ret_stack && - (state->pc == (unsigned long)return_to_handler)) { - unsigned long orig_pc; - /* - * This is a case where function graph tracer has - * modified a return address (LR) in a stack frame - * to hook a function return. - * So replace it to an original value. - */ - orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, - (void *)state->fp); - if (WARN_ON_ONCE(state->pc == orig_pc)) - return -EINVAL; - state->pc = orig_pc; - } -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#ifdef CONFIG_KRETPROBES - if (is_kretprobe_trampoline(state->pc)) - state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); -#endif - - return 0; -} -NOKPROBE_SYMBOL(unwind_next); - #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 3ebb69ea374a..18046a7248a2 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -79,15 +79,6 @@ struct unwind_state { struct task_struct *task; }; -static inline bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info); - -static inline bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info); - -static inline int unwind_next(struct unwind_state *state); - static inline bool on_stack(unsigned long sp, unsigned long size, unsigned long low, unsigned long high, enum stack_type type, struct stack_info *info) @@ -106,21 +97,6 @@ static inline bool on_stack(unsigned long sp, unsigned long size, return true; } -static inline bool on_accessible_stack_common(const struct task_struct *tsk, - unsigned long sp, - unsigned long size, - struct stack_info *info) -{ - if (info) - info->type = STACK_TYPE_UNKNOWN; - - /* - * Both the kernel and nvhe hypervisor make use of - * an overflow_stack - */ - return on_overflow_stack(sp, size, info); -} - static inline void unwind_init_common(struct unwind_state *state, struct task_struct *task) { @@ -156,8 +132,22 @@ static inline void unwind_init_common(struct unwind_state *state, typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp, enum stack_type type); +/* + * on_accessible_stack_fn() - Check whether a stack range is on any + * of the possible stacks. + * + * @tsk: task whose stack is being unwound + * @sp: stack address being checked + * @size: size of the stack range being checked + * @info: stack unwinding context + */ +typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, + unsigned long sp, unsigned long size, + struct stack_info *info); + static inline int unwind_next_common(struct unwind_state *state, struct stack_info *info, + on_accessible_stack_fn accessible, stack_trace_translate_fp_fn translate_fp) { unsigned long fp = state->fp, kern_fp = fp; @@ -166,7 +156,7 @@ static inline int unwind_next_common(struct unwind_state *state, if (fp & 0x7) return -EINVAL; - if (!on_accessible_stack(tsk, fp, 16, info)) + if (!accessible(tsk, fp, 16, info)) return -EINVAL; if (test_bit(info->type, state->stacks_done)) @@ -212,19 +202,4 @@ static inline int unwind_next_common(struct unwind_state *state, return 0; } -static inline void notrace unwind(struct unwind_state *state, - stack_trace_consume_fn consume_entry, - void *cookie) -{ - while (1) { - int ret; - - if (!consume_entry(cookie, state->pc)) - break; - ret = unwind_next(state); - if (ret < 0) - break; - } -} -NOKPROBE_SYMBOL(unwind); #endif /* __ASM_STACKTRACE_COMMON_H */ diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h index 8a5cb96d7143..a096216d8970 100644 --- a/arch/arm64/include/asm/stacktrace/nvhe.h +++ b/arch/arm64/include/asm/stacktrace/nvhe.h @@ -37,59 +37,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state, state->pc = pc; } -static inline bool on_hyp_stack(unsigned long sp, unsigned long size, - struct stack_info *info); - -static inline bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) -{ - if (on_accessible_stack_common(tsk, sp, size, info)) - return true; - - if (on_hyp_stack(sp, size, info)) - return true; - - return false; -} - -#ifdef __KVM_NVHE_HYPERVISOR__ -/* - * Protected nVHE HYP stack unwinder - * - * In protected mode, the unwinding is done by the hypervisor in EL2. - */ - -#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE -static inline bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); - unsigned long high = low + OVERFLOW_STACK_SIZE; - - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); -} - -static inline bool on_hyp_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); - unsigned long high = params->stack_hyp_va; - unsigned long low = high - PAGE_SIZE; - - return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); -} - -static inline int notrace unwind_next(struct unwind_state *state) -{ - struct stack_info info; - - return unwind_next_common(state, &info, NULL); -} -NOKPROBE_SYMBOL(unwind_next); -#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */ - -#else /* !__KVM_NVHE_HYPERVISOR__ */ +#ifndef __KVM_NVHE_HYPERVISOR__ /* * Conventional (non-protected) nVHE HYP stack unwinder * @@ -142,36 +90,6 @@ static inline bool kvm_nvhe_stack_kern_va(unsigned long *addr, return true; } -static inline bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct kvm_nvhe_stacktrace_info *stacktrace_info - = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; - unsigned long high = low + OVERFLOW_STACK_SIZE; - - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); -} - -static inline bool on_hyp_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct kvm_nvhe_stacktrace_info *stacktrace_info - = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - unsigned long low = (unsigned long)stacktrace_info->stack_base; - unsigned long high = low + PAGE_SIZE; - - return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); -} - -static inline int notrace unwind_next(struct unwind_state *state) -{ - struct stack_info info; - - return unwind_next_common(state, &info, kvm_nvhe_stack_kern_va); -} -NOKPROBE_SYMBOL(unwind_next); - void kvm_nvhe_dump_backtrace(unsigned long hyp_offset); #endif /* __KVM_NVHE_HYPERVISOR__ */ diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 9fa60ee48499..ce190ee18a20 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -67,6 +67,96 @@ static inline void unwind_init_from_task(struct unwind_state *state, state->pc = thread_saved_pc(task); } +/* + * We can only safely access per-cpu stacks from current in a non-preemptible + * context. + */ +static bool on_accessible_stack(const struct task_struct *tsk, + unsigned long sp, unsigned long size, + struct stack_info *info) +{ + if (info) + info->type = STACK_TYPE_UNKNOWN; + + if (on_task_stack(tsk, sp, size, info)) + return true; + if (tsk != current || preemptible()) + return false; + if (on_irq_stack(sp, size, info)) + return true; + if (on_overflow_stack(sp, size, info)) + return true; + if (on_sdei_stack(sp, size, info)) + return true; + + return false; +} + +/* + * Unwind from one frame record (A) to the next frame record (B). + * + * We terminate early if the location of B indicates a malformed chain of frame + * records (e.g. a cycle), determined based on the location and fp value of A + * and the location (but not the fp value) of B. + */ +static int notrace unwind_next(struct unwind_state *state) +{ + struct task_struct *tsk = state->task; + unsigned long fp = state->fp; + struct stack_info info; + int err; + + /* Final frame; nothing to unwind */ + if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) + return -ENOENT; + + err = unwind_next_common(state, &info, on_accessible_stack, NULL); + if (err) + return err; + + state->pc = ptrauth_strip_insn_pac(state->pc); + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (tsk->ret_stack && + (state->pc == (unsigned long)return_to_handler)) { + unsigned long orig_pc; + /* + * This is a case where function graph tracer has + * modified a return address (LR) in a stack frame + * to hook a function return. + * So replace it to an original value. + */ + orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, + (void *)state->fp); + if (WARN_ON_ONCE(state->pc == orig_pc)) + return -EINVAL; + state->pc = orig_pc; + } +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(state->pc)) + state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); +#endif + + return 0; +} +NOKPROBE_SYMBOL(unwind_next); + +static void notrace unwind(struct unwind_state *state, + stack_trace_consume_fn consume_entry, void *cookie) +{ + while (1) { + int ret; + + if (!consume_entry(cookie, state->pc)) + break; + ret = unwind_next(state); + if (ret < 0) + break; + } +} +NOKPROBE_SYMBOL(unwind); + static bool dump_backtrace_entry(void *arg, unsigned long where) { char *loglvl = arg; diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 900324b7a08f..acbe272ecb32 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -39,6 +39,58 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc) DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace); +static bool on_overflow_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); + unsigned long high = low + OVERFLOW_STACK_SIZE; + + return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); +} + +static bool on_hyp_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); + unsigned long high = params->stack_hyp_va; + unsigned long low = high - PAGE_SIZE; + + return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); +} + +static bool on_accessible_stack(const struct task_struct *tsk, + unsigned long sp, unsigned long size, + struct stack_info *info) +{ + if (info) + info->type = STACK_TYPE_UNKNOWN; + + return (on_overflow_stack(sp, size, info) || + on_hyp_stack(sp, size, info)); +} + +static int unwind_next(struct unwind_state *state) +{ + struct stack_info info; + + return unwind_next_common(state, &info, on_accessible_stack, NULL); +} + +static void notrace unwind(struct unwind_state *state, + stack_trace_consume_fn consume_entry, + void *cookie) +{ + while (1) { + int ret; + + if (!consume_entry(cookie, state->pc)) + break; + ret = unwind_next(state); + if (ret < 0) + break; + } +} + /* * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry * diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 9812aefdcfb4..4d5fec3175ff 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -21,6 +21,61 @@ #include +static bool on_overflow_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + struct kvm_nvhe_stacktrace_info *stacktrace_info + = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); + unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; + unsigned long high = low + OVERFLOW_STACK_SIZE; + + return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); +} + +static bool on_hyp_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + struct kvm_nvhe_stacktrace_info *stacktrace_info + = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); + unsigned long low = (unsigned long)stacktrace_info->stack_base; + unsigned long high = low + PAGE_SIZE; + + return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); +} + +static bool on_accessible_stack(const struct task_struct *tsk, + unsigned long sp, unsigned long size, + struct stack_info *info) +{ + if (info) + info->type = STACK_TYPE_UNKNOWN; + + return (on_overflow_stack(sp, size, info) || + on_hyp_stack(sp, size, info)); +} + +static int unwind_next(struct unwind_state *state) +{ + struct stack_info info; + + return unwind_next_common(state, &info, on_accessible_stack, + kvm_nvhe_stack_kern_va); +} + +static void unwind(struct unwind_state *state, + stack_trace_consume_fn consume_entry, void *cookie) +{ + while (1) { + int ret; + + if (!consume_entry(cookie, state->pc)) + break; + ret = unwind_next(state); + if (ret < 0) + break; + } +} + /* * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry * -- cgit v1.2.3