summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-01 15:49:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-01 15:49:18 -0800
commit2c923414d3963b959f65a8a6031972402e6a34a5 (patch)
treed53479dc181fd480d3e50213547bf75fa9bd5a90 /arch
parent29a8ea4fbe6beda81300835a739740c35c7abcab (diff)
parent49a20454e0eb907093ec564d4e8f3832bcbf9d53 (diff)
downloadlinux-2c923414d3963b959f65a8a6031972402e6a34a5.tar.bz2
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "This fixes the following issues: API: - algif_hash needs to wait for init operations to complete. - The has_key setting for shash was always true. Algorithms: - Add missing selections of CRYPTO_HASH. - Fix pkcs7 authentication. Drivers: - Fix stack alignment bug in chacha20-ssse3. - Fix performance regression in caam due to incorrect setting. - Fix potential compile-only build failure of stm32" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: atmel-aes - remove calls of clk_prepare() from atomic contexts crypto: algif_hash - wait for crypto_ahash_init() to complete crypto: shash - Fix has_key setting hwrng: stm32 - Fix dependencies for !HAS_IOMEM archs crypto: ghash,poly1305 - select CRYPTO_HASH where needed crypto: chacha20-ssse3 - Align stack pointer to 64 bytes PKCS#7: Don't require SpcSpOpusInfo in Authenticode pkcs7 signatures crypto: caam - make write transactions bufferable on PPC platforms
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/crypto/chacha20-ssse3-x86_64.S6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 712b13047b41..3a33124e9112 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -157,7 +157,9 @@ ENTRY(chacha20_4block_xor_ssse3)
# done with the slightly better performing SSSE3 byte shuffling,
# 7/12-bit word rotation uses traditional shift+OR.
- sub $0x40,%rsp
+ mov %rsp,%r11
+ sub $0x80,%rsp
+ and $~63,%rsp
# x0..15[0-3] = s0..3[0..3]
movq 0x00(%rdi),%xmm1
@@ -620,6 +622,6 @@ ENTRY(chacha20_4block_xor_ssse3)
pxor %xmm1,%xmm15
movdqu %xmm15,0xf0(%rsi)
- add $0x40,%rsp
+ mov %r11,%rsp
ret
ENDPROC(chacha20_4block_xor_ssse3)