summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2019-03-12 22:12:46 -0700
committerHerbert Xu <herbert@gondor.apana.org.au>2019-03-22 20:57:27 +0800
commit580e295178402d14bbf598a5702f8e01fc59dbaa (patch)
tree1c577fecbb63b68d096bb384ab4f81a8c367ec1e
parent7aceaaef04eaaf6019ca159bc354d800559bba1d (diff)
downloadlinux-580e295178402d14bbf598a5702f8e01fc59dbaa.tar.bz2
crypto: arm64/gcm-aes-ce - fix no-NEON fallback code
The arm64 gcm-aes-ce algorithm is failing the extra crypto self-tests following my patches to test the !may_use_simd() code paths, which previously were untested. The problem is that in the !may_use_simd() case, an odd number of AES blocks can be processed within each step of the skcipher_walk. However, the skcipher_walk is being done with a "stride" of 2 blocks and is advanced by an even number of blocks after each step. This causes the encryption to produce the wrong ciphertext and authentication tag, and causes the decryption to incorrectly fail. Fix it by only processing an even number of blocks per step. Fixes: c2b24c36e0a3 ("crypto: arm64/aes-gcm-ce - fix scatterwalk API violation") Fixes: 71e52c278c54 ("crypto: arm64/aes-ce-gcm - operate on two input blocks at a time") Cc: <stable@vger.kernel.org> # v4.19+ Signed-off-by: Eric Biggers <ebiggers@google.com> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 791ad422c427..089b09286da7 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -473,9 +473,11 @@ static int gcm_encrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ const int blocks =
+ walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
+ int remaining = blocks;
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
@@ -485,9 +487,9 @@ static int gcm_encrypt(struct aead_request *req)
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
- } while (--blocks > 0);
+ } while (--remaining > 0);
- ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
+ ghash_do_update(blocks, dg,
walk.dst.virt.addr, &ctx->ghash_key,
NULL, pmull_ghash_update_p64);
@@ -609,7 +611,7 @@ static int gcm_decrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;