From 46a2b02d232ece64af43e9ab0b4c64a7a756e58e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 2 Aug 2021 15:07:30 +0100 Subject: arm64: entry: consolidate entry/exit helpers To make the various entry/exit helpers easier to understand and easier to compare, this patch moves all the entry/exit helpers to be adjacent at the top of entry-common.c, rather than being spread out throughout the file. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Joey Gouly Link: https://lore.kernel.org/r/20210802140733.52716-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry-common.c | 84 ++++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index db8b2e2d02c2..6f7a98d8d60f 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -75,6 +75,24 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) } } +asmlinkage void noinstr enter_from_user_mode(void) +{ + lockdep_hardirqs_off(CALLER_ADDR0); + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); + trace_hardirqs_off_finish(); +} + +asmlinkage void noinstr exit_to_user_mode(void) +{ + mte_check_tfsr_exit(); + + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + user_enter_irqoff(); + lockdep_hardirqs_on(CALLER_ADDR0); +} + static void noinstr arm64_enter_nmi(struct pt_regs *regs) { regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); @@ -105,6 +123,30 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs) __nmi_exit(); } +static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) +{ + regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); + + lockdep_hardirqs_off(CALLER_ADDR0); + rcu_nmi_enter(); + + trace_hardirqs_off_finish(); +} + +static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) +{ + bool restore = regs->lockdep_hardirqs; + + if (restore) { + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + } + + rcu_nmi_exit(); + if (restore) + lockdep_hardirqs_on(CALLER_ADDR0); +} + static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) @@ -265,30 +307,6 @@ static void noinstr el1_undef(struct pt_regs *regs) exit_to_kernel_mode(regs); } -static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) -{ - regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); - - lockdep_hardirqs_off(CALLER_ADDR0); - rcu_nmi_enter(); - - trace_hardirqs_off_finish(); -} - -static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) -{ - bool restore = regs->lockdep_hardirqs; - - if (restore) { - trace_hardirqs_on_prepare(); - lockdep_hardirqs_on_prepare(CALLER_ADDR0); - } - - rcu_nmi_exit(); - if (restore) - lockdep_hardirqs_on(CALLER_ADDR0); -} - static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); @@ -382,24 +400,6 @@ asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) arm64_exit_nmi(regs); } -asmlinkage void noinstr enter_from_user_mode(void) -{ - lockdep_hardirqs_off(CALLER_ADDR0); - CT_WARN_ON(ct_state() != CONTEXT_USER); - user_exit_irqoff(); - trace_hardirqs_off_finish(); -} - -asmlinkage void noinstr exit_to_user_mode(void) -{ - mte_check_tfsr_exit(); - - trace_hardirqs_on_prepare(); - lockdep_hardirqs_on_prepare(CALLER_ADDR0); - user_enter_irqoff(); - lockdep_hardirqs_on(CALLER_ADDR0); -} - static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); -- cgit v1.2.3 From bc29b71f53b13c9e90a53a42d2431228a869b459 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 2 Aug 2021 15:07:31 +0100 Subject: arm64: entry: clarify entry/exit helpers When entering an exception, we must perform irq/context state management before we can use instrumentable C code. Similarly, when exiting an exception we cannot use instrumentable C code after we perform irq/context state management. Originally, we'd intended that the enter_from_*() and exit_to_*() helpers would enforce this by virtue of being the first and last functions called, respectively, in an exception handler. However, as they now call instrumentable code themselves, this is not as clearly true. To make this more robust, this patch splits the irq/context state management into separate helpers, with all the helpers commented to make their intended purpose more obvious. In exit_to_kernel_mode() we'll now check TFSR_EL1 before we assert that IRQs are disabled, but this ordering is not important, and other than this there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Joey Gouly Link: https://lore.kernel.org/r/20210802140733.52716-3-mark.rutland@arm.com [catalin.marinas@arm.com: comment typos fix-up] Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry-common.c | 70 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 63 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 6f7a98d8d60f..c4923e3ba065 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -26,10 +26,14 @@ #include /* + * Handle IRQ/context state management when entering from kernel mode. + * Before this function is called it is not safe to call regular kernel code, + * intrumentable code, or any code which may trigger an exception. + * * This is intended to match the logic in irqentry_enter(), handling the kernel * mode transitions only. */ -static void noinstr enter_from_kernel_mode(struct pt_regs *regs) +static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs) { regs->exit_rcu = false; @@ -45,20 +49,26 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) lockdep_hardirqs_off(CALLER_ADDR0); rcu_irq_enter_check_tick(); trace_hardirqs_off_finish(); +} +static void noinstr enter_from_kernel_mode(struct pt_regs *regs) +{ + __enter_from_kernel_mode(regs); mte_check_tfsr_entry(); } /* + * Handle IRQ/context state management when exiting to kernel mode. + * After this function returns it is not safe to call regular kernel code, + * intrumentable code, or any code which may trigger an exception. + * * This is intended to match the logic in irqentry_exit(), handling the kernel * mode transitions only, and with preemption handled elsewhere. */ -static void noinstr exit_to_kernel_mode(struct pt_regs *regs) +static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs) { lockdep_assert_irqs_disabled(); - mte_check_tfsr_exit(); - if (interrupts_enabled(regs)) { if (regs->exit_rcu) { trace_hardirqs_on_prepare(); @@ -75,7 +85,18 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) } } -asmlinkage void noinstr enter_from_user_mode(void) +static void noinstr exit_to_kernel_mode(struct pt_regs *regs) +{ + mte_check_tfsr_exit(); + __exit_to_kernel_mode(regs); +} + +/* + * Handle IRQ/context state management when entering from user mode. + * Before this function is called it is not safe to call regular kernel code, + * intrumentable code, or any code which may trigger an exception. + */ +static __always_inline void __enter_from_user_mode(void) { lockdep_hardirqs_off(CALLER_ADDR0); CT_WARN_ON(ct_state() != CONTEXT_USER); @@ -83,9 +104,18 @@ asmlinkage void noinstr enter_from_user_mode(void) trace_hardirqs_off_finish(); } -asmlinkage void noinstr exit_to_user_mode(void) +asmlinkage void noinstr enter_from_user_mode(void) +{ + __enter_from_user_mode(); +} + +/* + * Handle IRQ/context state management when exiting to user mode. + * After this function returns it is not safe to call regular kernel code, + * intrumentable code, or any code which may trigger an exception. + */ +static __always_inline void __exit_to_user_mode(void) { - mte_check_tfsr_exit(); trace_hardirqs_on_prepare(); lockdep_hardirqs_on_prepare(CALLER_ADDR0); @@ -93,6 +123,17 @@ asmlinkage void noinstr exit_to_user_mode(void) lockdep_hardirqs_on(CALLER_ADDR0); } +asmlinkage void noinstr exit_to_user_mode(void) +{ + mte_check_tfsr_exit(); + __exit_to_user_mode(); +} + +/* + * Handle IRQ/context state management when entering an NMI from user/kernel + * mode. Before this function is called it is not safe to call regular kernel + * code, intrumentable code, or any code which may trigger an exception. + */ static void noinstr arm64_enter_nmi(struct pt_regs *regs) { regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); @@ -106,6 +147,11 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs) ftrace_nmi_enter(); } +/* + * Handle IRQ/context state management when exiting an NMI from user/kernel + * mode. After this function returns it is not safe to call regular kernel + * code, intrumentable code, or any code which may trigger an exception. + */ static void noinstr arm64_exit_nmi(struct pt_regs *regs) { bool restore = regs->lockdep_hardirqs; @@ -123,6 +169,11 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs) __nmi_exit(); } +/* + * Handle IRQ/context state management when entering a debug exception from + * kernel mode. Before this function is called it is not safe to call regular + * kernel code, intrumentable code, or any code which may trigger an exception. + */ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) { regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); @@ -133,6 +184,11 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) trace_hardirqs_off_finish(); } +/* + * Handle IRQ/context state management when exiting a debug exception from + * kernel mode. After this function returns it is not safe to call regular + * kernel code, intrumentable code, or any code which may trigger an exception. + */ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) { bool restore = regs->lockdep_hardirqs; -- cgit v1.2.3 From 4d1c2ee2709fd6e1655206cafcdb22737f5d7379 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 2 Aug 2021 15:07:32 +0100 Subject: arm64: entry: move bulk of ret_to_user to C In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Joey Gouly Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com [catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/exception.h | 5 ++-- arch/arm64/kernel/entry-common.c | 21 +++++++++++++++-- arch/arm64/kernel/entry.S | 48 +++----------------------------------- arch/arm64/kernel/signal.c | 3 +-- 4 files changed, 26 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 4afbc45b8bb0..339477dca551 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -55,8 +55,8 @@ asmlinkage void el0t_32_error_handler(struct pt_regs *regs); asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); -asmlinkage void enter_from_user_mode(void); -asmlinkage void exit_to_user_mode(void); +asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); + void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); @@ -73,6 +73,7 @@ void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); void do_serror(struct pt_regs *regs, unsigned int esr); +void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index c4923e3ba065..3da4859c43a1 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -104,7 +104,7 @@ static __always_inline void __enter_from_user_mode(void) trace_hardirqs_off_finish(); } -asmlinkage void noinstr enter_from_user_mode(void) +static __always_inline void enter_from_user_mode(void) { __enter_from_user_mode(); } @@ -123,12 +123,29 @@ static __always_inline void __exit_to_user_mode(void) lockdep_hardirqs_on(CALLER_ADDR0); } -asmlinkage void noinstr exit_to_user_mode(void) +static __always_inline void exit_to_user_mode(void) { mte_check_tfsr_exit(); __exit_to_user_mode(); } +static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs) +{ + unsigned long flags; + + local_daif_mask(); + + flags = READ_ONCE(current_thread_info()->flags); + if (unlikely(flags & _TIF_WORK_MASK)) + do_notify_resume(regs, flags); +} + +asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) +{ + prepare_exit_to_user_mode(regs); + exit_to_user_mode(); +} + /* * Handle IRQ/context state management when entering an NMI from user/kernel * mode. Before this function is called it is not safe to call regular kernel diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 863d44f73028..b59eee28cc1b 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,16 +29,6 @@ #include #include -/* - * Context tracking and irqflag tracing need to instrument transitions between - * user and kernel mode. - */ - .macro user_enter_irqoff -#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) - bl exit_to_user_mode -#endif - .endm - .macro clear_gp_regs .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 mov x\n, xzr @@ -474,18 +464,6 @@ SYM_CODE_END(__swpan_exit_el0) /* GPRs used by entry code */ tsk .req x28 // current thread_info -/* - * Interrupt handling. - */ - .macro gic_prio_kentry_setup, tmp:req -#ifdef CONFIG_ARM64_PSEUDO_NMI - alternative_if ARM64_HAS_IRQ_PRIO_MASKING - mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON) - msr_s SYS_ICC_PMR_EL1, \tmp - alternative_else_nop_endif -#endif - .endm - .text /* @@ -585,37 +563,17 @@ SYM_CODE_START_LOCAL(ret_to_kernel) kernel_exit 1 SYM_CODE_END(ret_to_kernel) -/* - * "slow" syscall return path. - */ SYM_CODE_START_LOCAL(ret_to_user) - disable_daif - gic_prio_kentry_setup tmp=x3 -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif - ldr x19, [tsk, #TSK_TI_FLAGS] - and x2, x19, #_TIF_WORK_MASK - cbnz x2, work_pending -finish_ret_to_user: - user_enter_irqoff + mov x0, sp + bl asm_exit_to_user_mode /* Ignore asynchronous tag check faults in the uaccess routines */ clear_mte_async_tcf + ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step enable_step_tsk x19, x2 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK bl stackleak_erase #endif kernel_exit 0 - -/* - * Ok, we need to do extra processing, enter the slow path. - */ -work_pending: - mov x0, sp // 'regs' - mov x1, x19 - bl do_notify_resume - ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step - b finish_ret_to_user SYM_CODE_END(ret_to_user) .popsection // .entry.text diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index f8192f4ae0b8..53c2c85efb34 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -924,8 +924,7 @@ static bool cpu_affinity_invalid(struct pt_regs *regs) system_32bit_el0_cpumask()); } -asmlinkage void do_notify_resume(struct pt_regs *regs, - unsigned long thread_flags) +void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { do { if (thread_flags & _TIF_NEED_RESCHED) { -- cgit v1.2.3 From e130338eed5de0f5b6b913e8619d096153cbccb0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 2 Aug 2021 15:07:33 +0100 Subject: arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Joey Gouly Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry-common.c | 74 +++++++++++++++++++++++++--------------- arch/arm64/kernel/entry.S | 4 +-- 2 files changed, 48 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3da4859c43a1..32f9796c4ffe 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -104,7 +104,7 @@ static __always_inline void __enter_from_user_mode(void) trace_hardirqs_off_finish(); } -static __always_inline void enter_from_user_mode(void) +static __always_inline void enter_from_user_mode(struct pt_regs *regs) { __enter_from_user_mode(); } @@ -116,19 +116,12 @@ static __always_inline void enter_from_user_mode(void) */ static __always_inline void __exit_to_user_mode(void) { - trace_hardirqs_on_prepare(); lockdep_hardirqs_on_prepare(CALLER_ADDR0); user_enter_irqoff(); lockdep_hardirqs_on(CALLER_ADDR0); } -static __always_inline void exit_to_user_mode(void) -{ - mte_check_tfsr_exit(); - __exit_to_user_mode(); -} - static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs) { unsigned long flags; @@ -140,10 +133,16 @@ static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs) do_notify_resume(regs, flags); } -asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) +static __always_inline void exit_to_user_mode(struct pt_regs *regs) { prepare_exit_to_user_mode(regs); - exit_to_user_mode(); + mte_check_tfsr_exit(); + __exit_to_user_mode(); +} + +asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) +{ + exit_to_user_mode(regs); } /* @@ -477,9 +476,10 @@ static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_mem_abort(far, esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) @@ -494,37 +494,42 @@ static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) if (!is_ttbr0_addr(far)) arm64_apply_bp_hardening(); - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_mem_abort(far, esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_fpsimd_acc(esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_sve_acc(esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_fpsimd_exc(esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_sysinstr(esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) @@ -534,37 +539,42 @@ static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) if (!is_ttbr0_addr(instruction_pointer(regs))) arm64_apply_bp_hardening(); - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_sp_pc_abort(far, esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_sp_pc_abort(regs->sp, esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_undef(struct pt_regs *regs) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_undefinstr(regs); + exit_to_user_mode(regs); } static void noinstr el0_bti(struct pt_regs *regs) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_bti(regs); + exit_to_user_mode(regs); } static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); bad_el0_sync(regs, 0, esr); + exit_to_user_mode(regs); } static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) @@ -572,23 +582,26 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ unsigned long far = read_sysreg(far_el1); - enter_from_user_mode(); + enter_from_user_mode(regs); do_debug_exception(far, esr, regs); local_daif_restore(DAIF_PROCCTX); + exit_to_user_mode(regs); } static void noinstr el0_svc(struct pt_regs *regs) { - enter_from_user_mode(); + enter_from_user_mode(regs); cortex_a76_erratum_1463225_svc_handler(); do_el0_svc(regs); + exit_to_user_mode(regs); } static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_ptrauth_fault(regs, esr); + exit_to_user_mode(regs); } asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) @@ -647,7 +660,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) static void noinstr el0_interrupt(struct pt_regs *regs, void (*handler)(struct pt_regs *)) { - enter_from_user_mode(); + enter_from_user_mode(regs); write_sysreg(DAIF_PROCCTX_NOIRQ, daif); @@ -655,6 +668,8 @@ static void noinstr el0_interrupt(struct pt_regs *regs, arm64_apply_bp_hardening(); do_interrupt_handler(regs, handler); + + exit_to_user_mode(regs); } static void noinstr __el0_irq_handler_common(struct pt_regs *regs) @@ -681,12 +696,13 @@ static void noinstr __el0_error_handler_common(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_ERRCTX); arm64_enter_nmi(regs); do_serror(regs, esr); arm64_exit_nmi(regs); local_daif_restore(DAIF_PROCCTX); + exit_to_user_mode(regs); } asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) @@ -697,16 +713,18 @@ asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) #ifdef CONFIG_COMPAT static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) { - enter_from_user_mode(); + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); do_cp15instr(esr, regs); + exit_to_user_mode(regs); } static void noinstr el0_svc_compat(struct pt_regs *regs) { - enter_from_user_mode(); + enter_from_user_mode(regs); cortex_a76_erratum_1463225_svc_handler(); do_el0_svc_compat(regs); + exit_to_user_mode(regs); } asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b59eee28cc1b..1b45065a22d3 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -564,8 +564,6 @@ SYM_CODE_START_LOCAL(ret_to_kernel) SYM_CODE_END(ret_to_kernel) SYM_CODE_START_LOCAL(ret_to_user) - mov x0, sp - bl asm_exit_to_user_mode /* Ignore asynchronous tag check faults in the uaccess routines */ clear_mte_async_tcf ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step @@ -739,6 +737,8 @@ SYM_CODE_START(ret_from_fork) mov x0, x20 blr x19 1: get_current_task tsk + mov x0, sp + bl asm_exit_to_user_mode b ret_to_user SYM_CODE_END(ret_from_fork) NOKPROBE(ret_from_fork) -- cgit v1.2.3