summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSami Tolvanen <samitolvanen@google.com>2022-09-08 14:54:51 -0700
committerKees Cook <keescook@chromium.org>2022-09-26 10:13:13 -0700
commitc50d32859e70f6dbccb7d151408eb10afbbb7965 (patch)
treedce8f9e7ba7a14016d5e7c8d1d198691762e20a7
parent44f665b69c67f0a17a0c8748030ed30205532149 (diff)
downloadlinux-c50d32859e70f6dbccb7d151408eb10afbbb7965.tar.bz2
arm64: Add types to indirect called assembly functions
With CONFIG_CFI_CLANG, assembly functions indirectly called from C code must be annotated with type identifiers to pass CFI checking. Use SYM_TYPED_FUNC_START for the indirectly called functions, and ensure we emit `bti c` also with SYM_TYPED_FUNC_START. Signed-off-by: Sami Tolvanen <samitolvanen@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Kees Cook <keescook@chromium.org> Tested-by: Nathan Chancellor <nathan@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220908215504.3686827-10-samitolvanen@google.com
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S5
-rw-r--r--arch/arm64/crypto/sm3-ce-core.S3
-rw-r--r--arch/arm64/include/asm/linkage.h4
-rw-r--r--arch/arm64/kernel/cpu-reset.S5
-rw-r--r--arch/arm64/mm/proc.S5
5 files changed, 15 insertions, 7 deletions
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index 7868330dd54e..ebe5558929b7 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
SHASH .req v0
@@ -350,11 +351,11 @@ CPU_LE( rev64 T1.16b, T1.16b )
* void pmull_ghash_update(int blocks, u64 dg[], const char *src,
* struct ghash_key const *k, const char *head)
*/
-SYM_FUNC_START(pmull_ghash_update_p64)
+SYM_TYPED_FUNC_START(pmull_ghash_update_p64)
__pmull_ghash p64
SYM_FUNC_END(pmull_ghash_update_p64)
-SYM_FUNC_START(pmull_ghash_update_p8)
+SYM_TYPED_FUNC_START(pmull_ghash_update_p8)
__pmull_ghash p8
SYM_FUNC_END(pmull_ghash_update_p8)
diff --git a/arch/arm64/crypto/sm3-ce-core.S b/arch/arm64/crypto/sm3-ce-core.S
index ef97d3187cb7..ca70cfacd0d0 100644
--- a/arch/arm64/crypto/sm3-ce-core.S
+++ b/arch/arm64/crypto/sm3-ce-core.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
@@ -73,7 +74,7 @@
* int blocks)
*/
.text
-SYM_FUNC_START(sm3_ce_transform)
+SYM_TYPED_FUNC_START(sm3_ce_transform)
/* load state */
ld1 {v8.4s-v9.4s}, [x0]
rev64 v8.4s, v8.4s
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index 43f8c25b3fda..1436fa1cde24 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -39,4 +39,8 @@
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
bti c ;
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
+ bti c ;
+
#endif
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 48a8af97faa9..6b752fe89745 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/sysreg.h>
#include <asm/virt.h>
@@ -28,7 +29,7 @@
* branch to what would be the reset vector. It must be executed with the
* flat identity mapping.
*/
-SYM_CODE_START(cpu_soft_restart)
+SYM_TYPED_FUNC_START(cpu_soft_restart)
mov_q x12, INIT_SCTLR_EL1_MMU_OFF
pre_disable_mmu_workaround
/*
@@ -47,6 +48,6 @@ SYM_CODE_START(cpu_soft_restart)
mov x1, x3 // arg1
mov x2, x4 // arg2
br x8
-SYM_CODE_END(cpu_soft_restart)
+SYM_FUNC_END(cpu_soft_restart)
.popsection
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 7837a69524c5..8b9f419fcad9 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/pgtable.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/asm_pointer_auth.h>
@@ -185,7 +186,7 @@ SYM_FUNC_END(cpu_do_resume)
* This is the low-level counterpart to cpu_replace_ttbr1, and should not be
* called by anything else. It can only be executed from a TTBR0 mapping.
*/
-SYM_FUNC_START(idmap_cpu_replace_ttbr1)
+SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2
__idmap_cpu_set_reserved_ttbr1 x1, x3
@@ -253,7 +254,7 @@ SYM_FUNC_END(idmap_cpu_replace_ttbr1)
SYM_DATA(__idmap_kpti_flag, .long 1)
.popsection
-SYM_FUNC_START(idmap_kpti_install_ng_mappings)
+SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
cpu .req w0
temp_pte .req x0
num_cpus .req w1