summaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2022-11-18 11:44:14 -0800
committerHerbert Xu <herbert@gondor.apana.org.au>2022-11-25 17:39:19 +0800
commit19940ebbb59c12146d05c5f8acd873197b290648 (patch)
tree987dddfc44f71eec4175b5b466f37e6d6770fe53 /arch/x86/crypto
parent32f34bf7e44eeaa241fb845d6f52af5104bc30fd (diff)
downloadlinux-19940ebbb59c12146d05c5f8acd873197b290648.tar.bz2
crypto: x86/sha256 - fix possible crash with CFI enabled
sha256_transform_ssse3(), sha256_transform_avx(), sha256_transform_rorx(), and sha256_ni_transform() are called via indirect function calls. Therefore they need to use SYM_TYPED_FUNC_START instead of SYM_FUNC_START to cause their type hashes to be emitted when the kernel is built with CONFIG_CFI_CLANG=y. Otherwise, the code crashes with a CFI failure (if the compiler didn't happen to optimize out the indirect calls). Fixes: ccace936eec7 ("x86: Add types to indirectly called assembly functions") Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Sami Tolvanen <samitolvanen@google.com> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S3
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S3
-rw-r--r--arch/x86/crypto/sha256-ssse3-asm.S3
-rw-r--r--arch/x86/crypto/sha256_ni_asm.S3
4 files changed, 8 insertions, 4 deletions
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 3baa1ec39097..06ea30c20828 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -48,6 +48,7 @@
########################################################################
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
## assume buffers not aligned
#define VMOVDQ vmovdqu
@@ -346,7 +347,7 @@ a = TMP_
## arg 3 : Num blocks
########################################################################
.text
-SYM_FUNC_START(sha256_transform_avx)
+SYM_TYPED_FUNC_START(sha256_transform_avx)
.align 32
pushq %rbx
pushq %r12
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 9bcdbc47b8b4..2d2be531a11e 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -49,6 +49,7 @@
########################################################################
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
## assume buffers not aligned
#define VMOVDQ vmovdqu
@@ -523,7 +524,7 @@ STACK_SIZE = _CTX + _CTX_SIZE
## arg 3 : Num blocks
########################################################################
.text
-SYM_FUNC_START(sha256_transform_rorx)
+SYM_TYPED_FUNC_START(sha256_transform_rorx)
.align 32
pushq %rbx
pushq %r12
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
index c4a5db612c32..7db28839108d 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -47,6 +47,7 @@
########################################################################
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
## assume buffers not aligned
#define MOVDQ movdqu
@@ -355,7 +356,7 @@ a = TMP_
## arg 3 : Num blocks
########################################################################
.text
-SYM_FUNC_START(sha256_transform_ssse3)
+SYM_TYPED_FUNC_START(sha256_transform_ssse3)
.align 32
pushq %rbx
pushq %r12
diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
index 94d50dd27cb5..47f93937f798 100644
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/crypto/sha256_ni_asm.S
@@ -54,6 +54,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#define DIGEST_PTR %rdi /* 1st arg */
#define DATA_PTR %rsi /* 2nd arg */
@@ -97,7 +98,7 @@
.text
.align 32
-SYM_FUNC_START(sha256_ni_transform)
+SYM_TYPED_FUNC_START(sha256_ni_transform)
shl $6, NUM_BLKS /* convert to bytes */
jz .Ldone_hash