summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-06-07 10:46:18 +0100
committerWill Deacon <will@kernel.org>2021-06-07 11:35:55 +0100
commitec841aab8d3cdd23decdcf0c47292e14627446c1 (patch)
tree5e44c612c5e8e053cea58cbc5ab510012583d5e0 /arch/arm64/kernel/entry.S
parenta5b43a87a7609d49ed4a453a2b99b6d36ab1e5d0 (diff)
downloadlinux-ec841aab8d3cdd23decdcf0c47292e14627446c1.tar.bz2
arm64: entry: handle all vectors with C
We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S146
1 files changed, 45 insertions, 101 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index b719ac26f7d1..d43a12dfd189 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -45,16 +45,7 @@
.endr
.endm
-/*
- * Bad Abort numbers
- *-----------------
- */
-#define BAD_SYNC 0
-#define BAD_IRQ 1
-#define BAD_FIQ 2
-#define BAD_ERROR 3
-
- .macro kernel_ventry, el:req, regsize:req, label:req
+ .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
.align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
.if \el == 0
@@ -81,7 +72,7 @@ alternative_else_nop_endif
tbnz x0, #THREAD_SHIFT, 0f
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
- b el\()\el\()_\label
+ b el\el\ht\()_\regsize\()_\label
0:
/*
@@ -113,7 +104,7 @@ alternative_else_nop_endif
sub sp, sp, x0
mrs x0, tpidrro_el0
#endif
- b el\()\el\()_\label
+ b el\el\ht\()_\regsize\()_\label
.endm
.macro tramp_alias, dst, sym
@@ -504,32 +495,25 @@ tsk .req x28 // current thread_info
.align 11
SYM_CODE_START(vectors)
- kernel_ventry 1, 64, sync_invalid // Synchronous EL1t
- kernel_ventry 1, 64, irq_invalid // IRQ EL1t
- kernel_ventry 1, 64, fiq_invalid // FIQ EL1t
- kernel_ventry 1, 64, error_invalid // Error EL1t
-
- kernel_ventry 1, 64, sync // Synchronous EL1h
- kernel_ventry 1, 64, irq // IRQ EL1h
- kernel_ventry 1, 64, fiq // FIQ EL1h
- kernel_ventry 1, 64, error // Error EL1h
-
- kernel_ventry 0, 64, sync // Synchronous 64-bit EL0
- kernel_ventry 0, 64, irq // IRQ 64-bit EL0
- kernel_ventry 0, 64, fiq // FIQ 64-bit EL0
- kernel_ventry 0, 64, error // Error 64-bit EL0
-
-#ifdef CONFIG_COMPAT
- kernel_ventry 0, 32, sync_compat // Synchronous 32-bit EL0
- kernel_ventry 0, 32, irq_compat // IRQ 32-bit EL0
- kernel_ventry 0, 32, fiq_compat // FIQ 32-bit EL0
- kernel_ventry 0, 32, error_compat // Error 32-bit EL0
-#else
- kernel_ventry 0, 32, sync_invalid // Synchronous 32-bit EL0
- kernel_ventry 0, 32, irq_invalid // IRQ 32-bit EL0
- kernel_ventry 0, 32, fiq_invalid // FIQ 32-bit EL0
- kernel_ventry 0, 32, error_invalid // Error 32-bit EL0
-#endif
+ kernel_ventry 1, t, 64, sync // Synchronous EL1t
+ kernel_ventry 1, t, 64, irq // IRQ EL1t
+ kernel_ventry 1, t, 64, fiq // FIQ EL1h
+ kernel_ventry 1, t, 64, error // Error EL1t
+
+ kernel_ventry 1, h, 64, sync // Synchronous EL1h
+ kernel_ventry 1, h, 64, irq // IRQ EL1h
+ kernel_ventry 1, h, 64, fiq // FIQ EL1h
+ kernel_ventry 1, h, 64, error // Error EL1h
+
+ kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0
+ kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0
+ kernel_ventry 0, t, 64, error // Error 64-bit EL0
+
+ kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0
+ kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0
+ kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0
+ kernel_ventry 0, t, 32, error // Error 32-bit EL0
SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
@@ -560,82 +544,42 @@ __bad_stack:
ASM_BUG()
#endif /* CONFIG_VMAP_STACK */
-/*
- * Invalid mode handlers
- */
- .macro inv_entry, el, reason, regsize = 64
- kernel_entry \el, \regsize
- mov x0, sp
- mov x1, #\reason
- mrs x2, esr_el1
- bl bad_mode
- ASM_BUG()
- .endm
-
-SYM_CODE_START_LOCAL(el0_sync_invalid)
- inv_entry 0, BAD_SYNC
-SYM_CODE_END(el0_sync_invalid)
-
-SYM_CODE_START_LOCAL(el0_irq_invalid)
- inv_entry 0, BAD_IRQ
-SYM_CODE_END(el0_irq_invalid)
-
-SYM_CODE_START_LOCAL(el0_fiq_invalid)
- inv_entry 0, BAD_FIQ
-SYM_CODE_END(el0_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el0_error_invalid)
- inv_entry 0, BAD_ERROR
-SYM_CODE_END(el0_error_invalid)
-SYM_CODE_START_LOCAL(el1_sync_invalid)
- inv_entry 1, BAD_SYNC
-SYM_CODE_END(el1_sync_invalid)
-
-SYM_CODE_START_LOCAL(el1_irq_invalid)
- inv_entry 1, BAD_IRQ
-SYM_CODE_END(el1_irq_invalid)
-
-SYM_CODE_START_LOCAL(el1_fiq_invalid)
- inv_entry 1, BAD_FIQ
-SYM_CODE_END(el1_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el1_error_invalid)
- inv_entry 1, BAD_ERROR
-SYM_CODE_END(el1_error_invalid)
-
- .macro entry_handler el:req, regsize:req, label:req
-SYM_CODE_START_LOCAL(el\el\()_\label)
+ .macro entry_handler el:req, ht:req, regsize:req, label:req
+SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
kernel_entry \el, \regsize
mov x0, sp
- bl el\el\()_\label\()_handler
+ bl el\el\ht\()_\regsize\()_\label\()_handler
.if \el == 0
b ret_to_user
.else
b ret_to_kernel
.endif
-SYM_CODE_END(el\el\()_\label)
+SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
.endm
/*
* Early exception handlers
*/
- entry_handler 1, 64, sync
- entry_handler 1, 64, irq
- entry_handler 1, 64, fiq
- entry_handler 1, 64, error
-
- entry_handler 0, 64, sync
- entry_handler 0, 64, irq
- entry_handler 0, 64, fiq
- entry_handler 0, 64, error
-
-#ifdef CONFIG_COMPAT
- entry_handler 0, 32, sync_compat
- entry_handler 0, 32, irq_compat
- entry_handler 0, 32, fiq_compat
- entry_handler 0, 32, error_compat
-#endif
+ entry_handler 1, t, 64, sync
+ entry_handler 1, t, 64, irq
+ entry_handler 1, t, 64, fiq
+ entry_handler 1, t, 64, error
+
+ entry_handler 1, h, 64, sync
+ entry_handler 1, h, 64, irq
+ entry_handler 1, h, 64, fiq
+ entry_handler 1, h, 64, error
+
+ entry_handler 0, t, 64, sync
+ entry_handler 0, t, 64, irq
+ entry_handler 0, t, 64, fiq
+ entry_handler 0, t, 64, error
+
+ entry_handler 0, t, 32, sync
+ entry_handler 0, t, 32, irq
+ entry_handler 0, t, 32, fiq
+ entry_handler 0, t, 32, error
SYM_CODE_START_LOCAL(ret_to_kernel)
kernel_exit 1