diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-29 13:38:39 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-29 13:38:39 -0700 |
commit | b4df50de6ab66e41b3b8d8acf3ce45c632084163 (patch) | |
tree | 5e2c010a8642ca28c675d5a8b4cedf3f1a65d053 /arch/arm64 | |
parent | 3f16503b7d2274ac8cbab11163047ac0b4c66cfe (diff) | |
parent | 3d7c82060d1fe65bde4023aac41a0b1bd7718e07 (diff) | |
download | linux-b4df50de6ab66e41b3b8d8acf3ce45c632084163.tar.bz2 |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu:
- Check for the right CPU feature bit in sm4-ce on arm64.
- Fix scatterwalk WARN_ON in aes-gcm-ce on arm64.
- Fix unaligned fault in aesni on x86.
- Fix potential NULL pointer dereference on exit in chtls.
- Fix DMA mapping direction for RSA in caam.
- Fix error path return value for xts setkey in caam.
- Fix address endianness when DMA unmapping in caam.
- Fix sleep-in-atomic in vmx.
- Fix command corruption when queue is full in cavium/nitrox.
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions.
crypto: vmx - Fix sleep-in-atomic bugs
crypto: arm64/aes-gcm-ce - fix scatterwalk API violation
crypto: aesni - Use unaligned loads from gcm_context_data
crypto: chtls - fix null dereference chtls_free_uld()
crypto: arm64/sm4-ce - check for the right CPU feature bit
crypto: caam - fix DMA mapping direction for RSA forms 2 & 3
crypto: caam/qi - fix error path in xts setkey
crypto: caam/jr - fix descriptor DMA unmapping
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/crypto/ghash-ce-glue.c | 29 | ||||
-rw-r--r-- | arch/arm64/crypto/sm4-ce-glue.c | 2 |
2 files changed, 24 insertions, 7 deletions
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 6e9f33d14930..067d8937d5af 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req) __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - while (walk.nbytes >= AES_BLOCK_SIZE) { + while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; @@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req) NULL); err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); + walk.nbytes % (2 * AES_BLOCK_SIZE)); } - if (walk.nbytes) + if (walk.nbytes) { __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, nrounds); + if (walk.nbytes > AES_BLOCK_SIZE) { + crypto_inc(iv, AES_BLOCK_SIZE); + __aes_arm64_encrypt(ctx->aes_key.key_enc, + ks + AES_BLOCK_SIZE, iv, + nrounds); + } + } } /* handle the tail */ @@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req) __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - while (walk.nbytes >= AES_BLOCK_SIZE) { + while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; @@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req) } while (--blocks > 0); err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); + walk.nbytes % (2 * AES_BLOCK_SIZE)); } - if (walk.nbytes) + if (walk.nbytes) { + if (walk.nbytes > AES_BLOCK_SIZE) { + u8 *iv2 = iv + AES_BLOCK_SIZE; + + memcpy(iv2, iv, AES_BLOCK_SIZE); + crypto_inc(iv2, AES_BLOCK_SIZE); + + __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2, + iv2, nrounds); + } __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, nrounds); + } } /* handle the tail */ diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index b7fb5274b250..0c4fc223f225 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void) crypto_unregister_alg(&sm4_ce_alg); } -module_cpu_feature_match(SM3, sm4_ce_mod_init); +module_cpu_feature_match(SM4, sm4_ce_mod_init); module_exit(sm4_ce_mod_fini); |