diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-04 17:11:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-04 17:11:08 -0700 |
commit | 9eb31227cbccd3a37da0f42604f1ab5fc556bc53 (patch) | |
tree | 9aa467e620e002bf01cecdd98e3908e0cc3e7221 /arch/arm64 | |
parent | 527cd20771888443b5d8707debe98f62c7a1f596 (diff) | |
parent | f444ec106407d600f17fa1a4bd14f84577401dec (diff) | |
download | linux-9eb31227cbccd3a37da0f42604f1ab5fc556bc53.tar.bz2 |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- add AEAD support to crypto engine
- allow batch registration in simd
Algorithms:
- add CFB mode
- add speck block cipher
- add sm4 block cipher
- new test case for crct10dif
- improve scheduling latency on ARM
- scatter/gather support to gcm in aesni
- convert x86 crypto algorithms to skcihper
Drivers:
- hmac(sha224/sha256) support in inside-secure
- aes gcm/ccm support in stm32
- stm32mp1 support in stm32
- ccree driver from staging tree
- gcm support over QI in caam
- add ks-sa hwrng driver"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (212 commits)
crypto: ccree - remove unused enums
crypto: ahash - Fix early termination in hash walk
crypto: brcm - explicitly cast cipher to hash type
crypto: talitos - don't leak pointers to authenc keys
crypto: qat - don't leak pointers to authenc keys
crypto: picoxcell - don't leak pointers to authenc keys
crypto: ixp4xx - don't leak pointers to authenc keys
crypto: chelsio - don't leak pointers to authenc keys
crypto: caam/qi - don't leak pointers to authenc keys
crypto: caam - don't leak pointers to authenc keys
crypto: lrw - Free rctx->ext with kzfree
crypto: talitos - fix IPsec cipher in length
crypto: Deduplicate le32_to_cpu_array() and cpu_to_le32_array()
crypto: doc - clarify hash callbacks state machine
crypto: api - Keep failed instances alive
crypto: api - Make crypto_alg_lookup static
crypto: api - Remove unused crypto_type lookup function
crypto: chelsio - Remove declaration of static function from header
crypto: inside-secure - hmac(sha224) support
crypto: inside-secure - hmac(sha256) support
..
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/crypto/Kconfig | 6 | ||||
-rw-r--r-- | arch/arm64/crypto/Makefile | 8 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-ce-ccm-glue.c | 47 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-glue.c | 95 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-modes.S | 355 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-neonbs-glue.c | 48 | ||||
-rw-r--r-- | arch/arm64/crypto/chacha20-neon-glue.c | 12 | ||||
-rw-r--r-- | arch/arm64/crypto/sha256-glue.c | 36 | ||||
-rw-r--r-- | arch/arm64/crypto/speck-neon-core.S | 352 | ||||
-rw-r--r-- | arch/arm64/crypto/speck-neon-glue.c | 282 |
10 files changed, 886 insertions, 355 deletions
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 285c36c7b408..cb5a243110c4 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -113,4 +113,10 @@ config CRYPTO_AES_ARM64_BS select CRYPTO_AES_ARM64 select CRYPTO_SIMD +config CRYPTO_SPECK_NEON + tristate "NEON accelerated Speck cipher algorithms" + depends on KERNEL_MODE_NEON + select CRYPTO_BLKCIPHER + select CRYPTO_SPECK + endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index cee9b8d9830b..8df9f326f449 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -53,20 +53,21 @@ sha512-arm64-y := sha512-glue.o sha512-core.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o +obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o +speck-neon-y := speck-neon-core.o speck-neon-glue.o + obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o -AFLAGS_aes-ce.o := -DINTERLEAVE=4 -AFLAGS_aes-neon.o := -DINTERLEAVE=4 - CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE $(call if_changed_rule,cc_o_c) +ifdef REGENERATE_ARM64_CRYPTO quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $(<) void $(@) @@ -75,5 +76,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl $(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl $(call cmd,perlasm) +endif .PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index a1254036f2b1..68b11aa690e4 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -107,11 +107,13 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) } static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], - u32 abytes, u32 *macp, bool use_neon) + u32 abytes, u32 *macp) { - if (likely(use_neon)) { + if (may_use_simd()) { + kernel_neon_begin(); ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, num_rounds(key)); + kernel_neon_end(); } else { if (*macp > 0 && *macp < AES_BLOCK_SIZE) { int added = min(abytes, AES_BLOCK_SIZE - *macp); @@ -143,8 +145,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], } } -static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[], - bool use_neon) +static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); @@ -163,7 +164,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[], ltag.len = 6; } - ccm_update_mac(ctx, mac, (u8 *)<ag, ltag.len, &macp, use_neon); + ccm_update_mac(ctx, mac, (u8 *)<ag, ltag.len, &macp); scatterwalk_start(&walk, req->src); do { @@ -175,7 +176,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[], n = scatterwalk_clamp(&walk, len); } p = scatterwalk_map(&walk); - ccm_update_mac(ctx, mac, p, n, &macp, use_neon); + ccm_update_mac(ctx, mac, p, n, &macp); len -= n; scatterwalk_unmap(p); @@ -242,43 +243,42 @@ static int ccm_encrypt(struct aead_request *req) u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen; - bool use_neon = may_use_simd(); int err; err = ccm_init_mac(req, mac, len); if (err) return err; - if (likely(use_neon)) - kernel_neon_begin(); - if (req->assoclen) - ccm_calculate_auth_mac(req, mac, use_neon); + ccm_calculate_auth_mac(req, mac); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, true); - if (likely(use_neon)) { + if (may_use_simd()) { while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; if (walk.nbytes == walk.total) tail = 0; + kernel_neon_begin(); ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, tail); } - if (!err) + if (!err) { + kernel_neon_begin(); ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - - kernel_neon_end(); + kernel_neon_end(); + } } else { err = ccm_crypt_fallback(&walk, mac, buf, ctx, true); } @@ -301,43 +301,42 @@ static int ccm_decrypt(struct aead_request *req) u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen - authsize; - bool use_neon = may_use_simd(); int err; err = ccm_init_mac(req, mac, len); if (err) return err; - if (likely(use_neon)) - kernel_neon_begin(); - if (req->assoclen) - ccm_calculate_auth_mac(req, mac, use_neon); + ccm_calculate_auth_mac(req, mac); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_decrypt(&walk, req, true); - if (likely(use_neon)) { + if (may_use_simd()) { while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; if (walk.nbytes == walk.total) tail = 0; + kernel_neon_begin(); ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, tail); } - if (!err) + if (!err) { + kernel_neon_begin(); ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - - kernel_neon_end(); + kernel_neon_end(); + } } else { err = ccm_crypt_fallback(&walk, mac, buf, ctx, false); } diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 2fa850e86aa8..253188fb8cb0 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -64,17 +64,17 @@ MODULE_LICENSE("GPL v2"); /* defined in aes-modes.S */ asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, int first); + int rounds, int blocks); asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, int first); + int rounds, int blocks); asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 iv[], int first); + int rounds, int blocks, u8 iv[]); asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 iv[], int first); + int rounds, int blocks, u8 iv[]); asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 ctr[], int first); + int rounds, int blocks, u8 ctr[]); asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, int blocks, u8 const rk2[], u8 iv[], @@ -133,19 +133,19 @@ static int ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, first, rounds = 6 + ctx->key_length / 4; + int err, rounds = 6 + ctx->key_length / 4; struct skcipher_walk walk; unsigned int blocks; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); - for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { + while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + kernel_neon_begin(); aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_enc, rounds, blocks, first); + (u8 *)ctx->key_enc, rounds, blocks); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -153,19 +153,19 @@ static int ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, first, rounds = 6 + ctx->key_length / 4; + int err, rounds = 6 + ctx->key_length / 4; struct skcipher_walk walk; unsigned int blocks; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); - for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { + while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + kernel_neon_begin(); aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_dec, rounds, blocks, first); + (u8 *)ctx->key_dec, rounds, blocks); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -173,20 +173,19 @@ static int cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, first, rounds = 6 + ctx->key_length / 4; + int err, rounds = 6 + ctx->key_length / 4; struct skcipher_walk walk; unsigned int blocks; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); - for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { + while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + kernel_neon_begin(); aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_enc, rounds, blocks, walk.iv, - first); + (u8 *)ctx->key_enc, rounds, blocks, walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -194,20 +193,19 @@ static int cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, first, rounds = 6 + ctx->key_length / 4; + int err, rounds = 6 + ctx->key_length / 4; struct skcipher_walk walk; unsigned int blocks; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); - for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { + while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + kernel_neon_begin(); aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_dec, rounds, blocks, walk.iv, - first); + (u8 *)ctx->key_dec, rounds, blocks, walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -215,20 +213,18 @@ static int ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, first, rounds = 6 + ctx->key_length / 4; + int err, rounds = 6 + ctx->key_length / 4; struct skcipher_walk walk; int blocks; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - first = 1; - kernel_neon_begin(); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + kernel_neon_begin(); aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_enc, rounds, blocks, walk.iv, - first); + (u8 *)ctx->key_enc, rounds, blocks, walk.iv); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); - first = 0; + kernel_neon_end(); } if (walk.nbytes) { u8 __aligned(8) tail[AES_BLOCK_SIZE]; @@ -241,12 +237,13 @@ static int ctr_encrypt(struct skcipher_request *req) */ blocks = -1; + kernel_neon_begin(); aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds, - blocks, walk.iv, first); + blocks, walk.iv); + kernel_neon_end(); crypto_xor_cpy(tdst, tsrc, tail, nbytes); err = skcipher_walk_done(&walk, 0); } - kernel_neon_end(); return err; } @@ -270,16 +267,16 @@ static int xts_encrypt(struct skcipher_request *req) struct skcipher_walk walk; unsigned int blocks; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { + kernel_neon_begin(); aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, (u8 *)ctx->key1.key_enc, rounds, blocks, (u8 *)ctx->key2.key_enc, walk.iv, first); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -292,16 +289,16 @@ static int xts_decrypt(struct skcipher_request *req) struct skcipher_walk walk; unsigned int blocks; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { + kernel_neon_begin(); aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, (u8 *)ctx->key1.key_dec, rounds, blocks, (u8 *)ctx->key2.key_enc, walk.iv, first); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -425,7 +422,7 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key, /* encrypt the zero vector */ kernel_neon_begin(); - aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1, 1); + aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1); kernel_neon_end(); cmac_gf128_mul_by_x(consts, consts); @@ -454,8 +451,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key, return err; kernel_neon_begin(); - aes_ecb_encrypt(key, ks[0], rk, rounds, 1, 1); - aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2, 0); + aes_ecb_encrypt(key, ks[0], rk, rounds, 1); + aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2); kernel_neon_end(); return cbcmac_setkey(tfm, key, sizeof(key)); diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index 2674d43d1384..a68412e1e3a4 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -13,127 +13,39 @@ .text .align 4 -/* - * There are several ways to instantiate this code: - * - no interleave, all inline - * - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2) - * - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE) - * - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4) - * - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE) - * - * Macros imported by this code: - * - enc_prepare - setup NEON registers for encryption - * - dec_prepare - setup NEON registers for decryption - * - enc_switch_key - change to new key after having prepared for encryption - * - encrypt_block - encrypt a single block - * - decrypt block - decrypt a single block - * - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2) - * - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2) - * - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4) - * - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4) - */ - -#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE) -#define FRAME_PUSH stp x29, x30, [sp,#-16]! ; mov x29, sp -#define FRAME_POP ldp x29, x30, [sp],#16 - -#if INTERLEAVE == 2 - -aes_encrypt_block2x: - encrypt_block2x v0, v1, w3, x2, x6, w7 - ret -ENDPROC(aes_encrypt_block2x) - -aes_decrypt_block2x: - decrypt_block2x v0, v1, w3, x2, x6, w7 - ret -ENDPROC(aes_decrypt_block2x) - -#elif INTERLEAVE == 4 - aes_encrypt_block4x: - encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 + encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 ret ENDPROC(aes_encrypt_block4x) aes_decrypt_block4x: - decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 + decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 ret ENDPROC(aes_decrypt_block4x) -#else -#error INTERLEAVE should equal 2 or 4 -#endif - - .macro do_encrypt_block2x - bl aes_encrypt_block2x - .endm - - .macro do_decrypt_block2x - bl aes_decrypt_block2x - .endm - - .macro do_encrypt_block4x - bl aes_encrypt_block4x - .endm - - .macro do_decrypt_block4x - bl aes_decrypt_block4x - .endm - -#else -#define FRAME_PUSH -#define FRAME_POP - - .macro do_encrypt_block2x - encrypt_block2x v0, v1, w3, x2, x6, w7 - .endm - - .macro do_decrypt_block2x - decrypt_block2x v0, v1, w3, x2, x6, w7 - .endm - - .macro do_encrypt_block4x - encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 - .endm - - .macro do_decrypt_block4x - decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 - .endm - -#endif - /* * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, - * int blocks, int first) + * int blocks) * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, - * int blocks, int first) + * int blocks) */ AES_ENTRY(aes_ecb_encrypt) - FRAME_PUSH - cbz w5, .LecbencloopNx + stp x29, x30, [sp, #-16]! + mov x29, sp enc_prepare w3, x2, x5 .LecbencloopNx: -#if INTERLEAVE >= 2 - subs w4, w4, #INTERLEAVE + subs w4, w4, #4 bmi .Lecbenc1x -#if INTERLEAVE == 2 - ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */ - do_encrypt_block2x - st1 {v0.16b-v1.16b}, [x0], #32 -#else ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ - do_encrypt_block4x + bl aes_encrypt_block4x st1 {v0.16b-v3.16b}, [x0], #64 -#endif b .LecbencloopNx .Lecbenc1x: - adds w4, w4, #INTERLEAVE + adds w4, w4, #4 beq .Lecbencout -#endif .Lecbencloop: ld1 {v0.16b}, [x1], #16 /* get next pt block */ encrypt_block v0, w3, x2, x5, w6 @@ -141,35 +53,27 @@ AES_ENTRY(aes_ecb_encrypt) subs w4, w4, #1 bne .Lecbencloop .Lecbencout: - FRAME_POP + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_ecb_encrypt) AES_ENTRY(aes_ecb_decrypt) - FRAME_PUSH - cbz w5, .LecbdecloopNx + stp x29, x30, [sp, #-16]! + mov x29, sp dec_prepare w3, x2, x5 .LecbdecloopNx: -#if INTERLEAVE >= 2 - subs w4, w4, #INTERLEAVE + subs w4, w4, #4 bmi .Lecbdec1x -#if INTERLEAVE == 2 - ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */ - do_decrypt_block2x - st1 {v0.16b-v1.16b}, [x0], #32 -#else ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ - do_decrypt_block4x + bl aes_decrypt_block4x st1 {v0.16b-v3.16b}, [x0], #64 -#endif b .LecbdecloopNx .Lecbdec1x: - adds w4, w4, #INTERLEAVE + adds w4, w4, #4 beq .Lecbdecout -#endif .Lecbdecloop: ld1 {v0.16b}, [x1], #16 /* get next ct block */ decrypt_block v0, w3, x2, x5, w6 @@ -177,62 +81,68 @@ AES_ENTRY(aes_ecb_decrypt) subs w4, w4, #1 bne .Lecbdecloop .Lecbdecout: - FRAME_POP + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_ecb_decrypt) /* * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, - * int blocks, u8 iv[], int first) + * int blocks, u8 iv[]) * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, - * int blocks, u8 iv[], int first) + * int blocks, u8 iv[]) */ AES_ENTRY(aes_cbc_encrypt) - cbz w6, .Lcbcencloop - - ld1 {v0.16b}, [x5] /* get iv */ + ld1 {v4.16b}, [x5] /* get iv */ enc_prepare w3, x2, x6 -.Lcbcencloop: - ld1 {v1.16b}, [x1], #16 /* get next pt block */ - eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */ +.Lcbcencloop4x: + subs w4, w4, #4 + bmi .Lcbcenc1x + ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ + eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */ encrypt_block v0, w3, x2, x6, w7 - st1 {v0.16b}, [x0], #16 + eor v1.16b, v1.16b, v0.16b + encrypt_block v1, w3, x2, x6, w7 + eor v2.16b, v2.16b, v1.16b + encrypt_block v2, w3, x2, x6, w7 + eor v3.16b, v3.16b, v2.16b + encrypt_block v3, w3, x2, x6, w7 + st1 {v0.16b-v3.16b}, [x0], #64 + mov v4.16b, v3.16b + b .Lcbcencloop4x +.Lcbcenc1x: + adds w4, w4, #4 + beq .Lcbcencout +.Lcbcencloop: + ld1 {v0.16b}, [x1], #16 /* get next pt block */ + eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */ + encrypt_block v4, w3, x2, x6, w7 + st1 {v4.16b}, [x0], #16 subs w4, w4, #1 bne .Lcbcencloop - st1 {v0.16b}, [x5] /* return iv */ +.Lcbcencout: + st1 {v4.16b}, [x5] /* return iv */ ret AES_ENDPROC(aes_cbc_encrypt) AES_ENTRY(aes_cbc_decrypt) - FRAME_PUSH - cbz w6, .LcbcdecloopNx + stp x29, x30, [sp, #-16]! + mov x29, sp ld1 {v7.16b}, [x5] /* get iv */ dec_prepare w3, x2, x6 .LcbcdecloopNx: -#if INTERLEAVE >= 2 - subs w4, w4, #INTERLEAVE + subs w4, w4, #4 bmi .Lcbcdec1x -#if INTERLEAVE == 2 - ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */ - mov v2.16b, v0.16b - mov v3.16b, v1.16b - do_decrypt_block2x - eor v0.16b, v0.16b, v7.16b - eor v1.16b, v1.16b, v2.16b - mov v7.16b, v3.16b - st1 {v0.16b-v1.16b}, [x0], #32 -#else ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ mov v4.16b, v0.16b mov v5.16b, v1.16b mov v6.16b, v2.16b - do_decrypt_block4x + bl aes_decrypt_block4x sub x1, x1, #16 eor v0.16b, v0.16b, v7.16b eor v1.16b, v1.16b, v4.16b @@ -240,12 +150,10 @@ AES_ENTRY(aes_cbc_decrypt) eor v2.16b, v2.16b, v5.16b eor v3.16b, v3.16b, v6.16b st1 {v0.16b-v3.16b}, [x0], #64 -#endif b .LcbcdecloopNx .Lcbcdec1x: - adds w4, w4, #INTERLEAVE + adds w4, w4, #4 beq .Lcbcdecout -#endif .Lcbcdecloop: ld1 {v1.16b}, [x1], #16 /* get next ct block */ mov v0.16b, v1.16b /* ...and copy to v0 */ @@ -256,49 +164,33 @@ AES_ENTRY(aes_cbc_decrypt) subs w4, w4, #1 bne .Lcbcdecloop .Lcbcdecout: - FRAME_POP st1 {v7.16b}, [x5] /* return iv */ + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_cbc_decrypt) /* * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, - * int blocks, u8 ctr[], int first) + * int blocks, u8 ctr[]) */ AES_ENTRY(aes_ctr_encrypt) - FRAME_PUSH - cbz w6, .Lctrnotfirst /* 1st time around? */ + stp x29, x30, [sp, #-16]! + mov x29, sp + enc_prepare w3, x2, x6 ld1 {v4.16b}, [x5] -.Lctrnotfirst: - umov x8, v4.d[1] /* keep swabbed ctr in reg */ - rev x8, x8 -#if INTERLEAVE >= 2 - cmn w8, w4 /* 32 bit overflow? */ + umov x6, v4.d[1] /* keep swabbed ctr in reg */ + rev x6, x6 + cmn w6, w4 /* 32 bit overflow? */ bcs .Lctrloop .LctrloopNx: - subs w4, w4, #INTERLEAVE + subs w4, w4, #4 bmi .Lctr1x -#if INTERLEAVE == 2 - mov v0.8b, v4.8b - mov v1.8b, v4.8b - rev x7, x8 - add x8, x8, #1 - ins v0.d[1], x7 - rev x7, x8 - add x8, x8, #1 - ins v1.d[1], x7 - ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */ - do_encrypt_block2x - eor v0.16b, v0.16b, v2.16b - eor v1.16b, v1.16b, v3.16b - st1 {v0.16b-v1.16b}, [x0], #32 -#else ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ - dup v7.4s, w8 + dup v7.4s, w6 mov v0.16b, v4.16b add v7.4s, v7.4s, v8.4s mov v1.16b, v4.16b @@ -309,29 +201,27 @@ AES_ENTRY(aes_ctr_encrypt) mov v2.s[3], v8.s[1] mov v3.s[3], v8.s[2] ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ - do_encrypt_block4x + bl aes_encrypt_block4x eor v0.16b, v5.16b, v0.16b ld1 {v5.16b}, [x1], #16 /* get 1 input block */ eor v1.16b, v6.16b, v1.16b eor v2.16b, v7.16b, v2.16b eor v3.16b, v5.16b, v3.16b st1 {v0.16b-v3.16b}, [x0], #64 - add x8, x8, #INTERLEAVE -#endif - rev x7, x8 + add x6, x6, #4 + rev x7, x6 ins v4.d[1], x7 cbz w4, .Lctrout b .LctrloopNx .Lctr1x: - adds w4, w4, #INTERLEAVE + adds w4, w4, #4 beq .Lctrout -#endif .Lctrloop: mov v0.16b, v4.16b - encrypt_block v0, w3, x2, x6, w7 + encrypt_block v0, w3, x2, x8, w7 - adds x8, x8, #1 /* increment BE ctr */ - rev x7, x8 + adds x6, x6, #1 /* increment BE ctr */ + rev x7, x6 ins v4.d[1], x7 bcs .Lctrcarry /* overflow? */ @@ -345,12 +235,12 @@ AES_ENTRY(aes_ctr_encrypt) .Lctrout: st1 {v4.16b}, [x5] /* return next CTR value */ - FRAME_POP + ldp x29, x30, [sp], #16 ret .Lctrtailblock: st1 {v0.16b}, [x0] - FRAME_POP + ldp x29, x30, [sp], #16 ret .Lctrcarry: @@ -384,39 +274,26 @@ CPU_LE( .quad 1, 0x87 ) CPU_BE( .quad 0x87, 1 ) AES_ENTRY(aes_xts_encrypt) - FRAME_PUSH - cbz w7, .LxtsencloopNx + stp x29, x30, [sp, #-16]! + mov x29, sp ld1 {v4.16b}, [x6] - enc_prepare w3, x5, x6 - encrypt_block v4, w3, x5, x6, w7 /* first tweak */ - enc_switch_key w3, x2, x6 + cbz w7, .Lxtsencnotfirst + + enc_prepare w3, x5, x8 + encrypt_block v4, w3, x5, x8, w7 /* first tweak */ + enc_switch_key w3, x2, x8 ldr q7, .Lxts_mul_x b .LxtsencNx +.Lxtsencnotfirst: + enc_prepare w3, x2, x8 .LxtsencloopNx: ldr q7, .Lxts_mul_x next_tweak v4, v4, v7, v8 .LxtsencNx: -#if INTERLEAVE >= 2 - subs w4, w4, #INTERLEAVE + subs w4, w4, #4 bmi .Lxtsenc1x -#if INTERLEAVE == 2 - ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */ - next_tweak v5, v4, v7, v8 - eor v0.16b, v0.16b, v4.16b - eor v1.16b, v1.16b, v5.16b - do_encrypt_block2x - eor v0.16b, v0.16b, v4.16b - eor v1.16b, v1.16b, v5.16b - st1 {v0.16b-v1.16b}, [x0], #32 - cbz w4, .LxtsencoutNx - next_tweak v4, v5, v7, v8 - b .LxtsencNx -.LxtsencoutNx: - mov v4.16b, v5.16b - b .Lxtsencout -#else ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ next_tweak v5, v4, v7, v8 eor v0.16b, v0.16b, v4.16b @@ -425,7 +302,7 @@ AES_ENTRY(aes_xts_encrypt) eor v2.16b, v2.16b, v6.16b next_tweak v7, v6, v7, v8 eor v3.16b, v3.16b, v7.16b - do_encrypt_block4x + bl aes_encrypt_block4x eor v3.16b, v3.16b, v7.16b eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b @@ -434,15 +311,13 @@ AES_ENTRY(aes_xts_encrypt) mov v4.16b, v7.16b cbz w4, .Lxtsencout b .LxtsencloopNx -#endif .Lxtsenc1x: - adds w4, w4, #INTERLEAVE + adds w4, w4, #4 beq .Lxtsencout -#endif .Lxtsencloop: ld1 {v1.16b}, [x1], #16 eor v0.16b, v1.16b, v4.16b - encrypt_block v0, w3, x2, x6, w7 + encrypt_block v0, w3, x2, x8, w7 eor v0.16b, v0.16b, v4.16b st1 {v0.16b}, [x0], #16 subs w4, w4, #1 @@ -450,45 +325,33 @@ AES_ENTRY(aes_xts_encrypt) next_tweak v4, v4, v7, v8 b .Lxtsencloop .Lxtsencout: - FRAME_POP + st1 {v4.16b}, [x6] + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_xts_encrypt) AES_ENTRY(aes_xts_decrypt) - FRAME_PUSH - cbz w7, .LxtsdecloopNx + stp x29, x30, [sp, #-16]! + mov x29, sp ld1 {v4.16b}, [x6] - enc_prepare w3, x5, x6 - encrypt_block v4, w3, x5, x6, w7 /* first tweak */ - dec_prepare w3, x2, x6 + cbz w7, .Lxtsdecnotfirst + + enc_prepare w3, x5, x8 + encrypt_block v4, w3, x5, x8, w7 /* first tweak */ + dec_prepare w3, x2, x8 ldr q7, .Lxts_mul_x b .LxtsdecNx +.Lxtsdecnotfirst: + dec_prepare w3, x2, x8 .LxtsdecloopNx: ldr q7, .Lxts_mul_x next_tweak v4, v4, v7, v8 .LxtsdecNx: -#if INTERLEAVE >= 2 - subs w4, w4, #INTERLEAVE + subs w4, w4, #4 bmi .Lxtsdec1x -#if INTERLEAVE == 2 - ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */ - next_tweak v5, v4, v7, v8 - eor v0.16b, v0.16b, v4.16b - eor v1.16b, v1.16b, v5.16b - do_decrypt_block2x - eor v0.16b, v0.16b, v4.16b - eor v1.16b, v1.16b, v5.16b - st1 {v0.16b-v1.16b}, [x0], #32 - cbz w4, .LxtsdecoutNx - next_tweak v4, v5, v7, v8 - b .LxtsdecNx -.LxtsdecoutNx: - mov v4.16b, v5.16b - b .Lxtsdecout -#else ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ next_tweak v5, v4, v7, v8 eor v0.16b, v0.16b, v4.16b @@ -497,7 +360,7 @@ AES_ENTRY(aes_xts_decrypt) eor v2.16b, v2.16b, v6.16b next_tweak v7, v6, v7, v8 eor v3.16b, v3.16b, v7.16b - do_decrypt_block4x + bl aes_decrypt_block4x eor v3.16b, v3.16b, v7.16b eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b @@ -506,15 +369,13 @@ AES_ENTRY(aes_xts_decrypt) mov v4.16b, v7.16b cbz w4, .Lxtsdecout b .LxtsdecloopNx -#endif .Lxtsdec1x: - adds w4, w4, #INTERLEAVE + adds w4, w4, #4 beq .Lxtsdecout -#endif .Lxtsdecloop: ld1 {v1.16b}, [x1], #16 eor v0.16b, v1.16b, v4.16b - decrypt_block v0, w3, x2, x6, w7 + decrypt_block v0, w3, x2, x8, w7 eor v0.16b, v0.16b, v4.16b st1 {v0.16b}, [x0], #16 subs w4, w4, #1 @@ -522,7 +383,8 @@ AES_ENTRY(aes_xts_decrypt) next_tweak v4, v4, v7, v8 b .Lxtsdecloop .Lxtsdecout: - FRAME_POP + st1 {v4.16b}, [x6] + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_xts_decrypt) @@ -533,8 +395,28 @@ AES_ENDPROC(aes_xts_decrypt) AES_ENTRY(aes_mac_update) ld1 {v0.16b}, [x4] /* get dg */ enc_prepare w2, x1, x7 - cbnz w5, .Lmacenc + cbz w5, .Lmacloop4x + + encrypt_block v0, w2, x1, x7, w8 +.Lmacloop4x: + subs w3, w3, #4 + bmi .Lmac1x + ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */ + eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ + encrypt_block v0, w2, x1, x7, w8 + eor v0.16b, v0.16b, v2.16b + encrypt_block v0, w2, x1, x7, w8 + eor v0.16b, v0.16b, v3.16b + encrypt_block v0, w2, x1, x7, w8 + eor v0.16b, v0.16b, v4.16b + cmp w3, wzr + csinv x5, x6, xzr, eq + cbz w5, .Lmacout + encrypt_block v0, w2, x1, x7, w8 + b .Lmacloop4x +.Lmac1x: + add w3, w3, #4 .Lmacloop: cbz w3, .Lmacout ld1 {v1.16b}, [x0], #16 /* get next pt block */ @@ -544,7 +426,6 @@ AES_ENTRY(aes_mac_update) csinv x5, x6, xzr, eq cbz w5, .Lmacout -.Lmacenc: encrypt_block v0, w2, x1, x7, w8 b .Lmacloop diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index c55d68ccb89f..e7a95a566462 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -46,10 +46,9 @@ asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], /* borrowed from aes-neon-blk.ko */ asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], - int rounds, int blocks, int first); + int rounds, int blocks); asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], - int rounds, int blocks, u8 iv[], - int first); + int rounds, int blocks, u8 iv[]); struct aesbs_ctx { u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32]; @@ -100,9 +99,8 @@ static int __ecb_crypt(struct skcipher_request *req, struct skcipher_walk walk; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -110,12 +108,13 @@ static int __ecb_crypt(struct skcipher_request *req, blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, ctx->rounds, blocks); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -157,22 +156,21 @@ static int cbc_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; - int err, first = 1; + int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; /* fall back to the non-bitsliced NEON implementation */ + kernel_neon_begin(); neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->enc, ctx->key.rounds, blocks, walk.iv, - first); + ctx->enc, ctx->key.rounds, blocks, + walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); - first = 0; } - kernel_neon_end(); return err; } @@ -183,9 +181,8 @@ static int cbc_decrypt(struct skcipher_request *req) struct skcipher_walk walk; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -193,13 +190,14 @@ static int cbc_decrypt(struct skcipher_request *req) blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + kernel_neon_begin(); aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, ctx->key.rounds, blocks, walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -231,9 +229,8 @@ static int ctr_encrypt(struct skcipher_request *req) u8 buf[AES_BLOCK_SIZE]; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes > 0) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL; @@ -244,8 +241,10 @@ static int ctr_encrypt(struct skcipher_request *req) final = NULL; } + kernel_neon_begin(); aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, ctx->rounds, blocks, walk.iv, final); + kernel_neon_end(); if (final) { u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; @@ -260,8 +259,6 @@ static int ctr_encrypt(struct skcipher_request *req) err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); - return err; } @@ -306,12 +303,11 @@ static int __xts_crypt(struct skcipher_request *req, struct skcipher_walk walk; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); kernel_neon_begin(); - - neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, - ctx->key.rounds, 1, 1); + neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1); + kernel_neon_end(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -320,13 +316,13 @@ static int __xts_crypt(struct skcipher_request *req, blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, ctx->key.rounds, blocks, walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); - return err; } diff --git a/arch/arm64/crypto/chacha20-neon-glue.c b/arch/arm64/crypto/chacha20-neon-glue.c index cbdb75d15cd0..727579c93ded 100644 --- a/arch/arm64/crypto/chacha20-neon-glue.c +++ b/arch/arm64/crypto/chacha20-neon-glue.c @@ -37,12 +37,19 @@ static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src, u8 buf[CHACHA20_BLOCK_SIZE]; while (bytes >= CHACHA20_BLOCK_SIZE * 4) { + kernel_neon_begin(); chacha20_4block_xor_neon(state, dst, src); + kernel_neon_end(); bytes -= CHACHA20_BLOCK_SIZE * 4; src += CHACHA20_BLOCK_SIZE * 4; dst += CHACHA20_BLOCK_SIZE * 4; state[12] += 4; } + + if (!bytes) + return; + + kernel_neon_begin(); while (bytes >= CHACHA20_BLOCK_SIZE) { chacha20_block_xor_neon(state, dst, src); bytes -= CHACHA20_BLOCK_SIZE; @@ -55,6 +62,7 @@ static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src, chacha20_block_xor_neon(state, buf, buf); memcpy(dst, buf, bytes); } + kernel_neon_end(); } static int chacha20_neon(struct skcipher_request *req) @@ -68,11 +76,10 @@ static int chacha20_neon(struct skcipher_request *req) if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE) return crypto_chacha20_crypt(req); - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); crypto_chacha20_init(state, ctx, walk.iv); - kernel_neon_begin(); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -83,7 +90,6 @@ static int chacha20_neon(struct skcipher_request *req) nbytes); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } - kernel_neon_end(); return err; } diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index b064d925fe2a..e8880ccdc71f 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -89,21 +89,32 @@ static struct shash_alg algs[] = { { static int sha256_update_neon(struct shash_desc *desc, const u8 *data, unsigned int len) { - /* - * Stacking and unstacking a substantial slice of the NEON register - * file may significantly affect performance for small updates when - * executing in interrupt context, so fall back to the scalar code - * in that case. - */ + struct sha256_state *sctx = shash_desc_ctx(desc); + if (!may_use_simd()) return sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha256_block_data_order); - kernel_neon_begin(); - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_neon); - kernel_neon_end(); + while (len > 0) { + unsigned int chunk = len; + + /* + * Don't hog the CPU for the entire time it takes to process all + * input when running on a preemptible kernel, but process the + * data block by block instead. + */ + if (IS_ENABLED(CONFIG_PREEMPT) && + chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE) + chunk = SHA256_BLOCK_SIZE - + sctx->count % SHA256_BLOCK_SIZE; + kernel_neon_begin(); + sha256_base_do_update(desc, data, chunk, + (sha256_block_fn *)sha256_block_neon); + kernel_neon_end(); + data += chunk; + len -= chunk; + } return 0; } @@ -117,10 +128,9 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_block_data_order); } else { - kernel_neon_begin(); if (len) - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_neon); + sha256_update_neon(desc, data, len); + kernel_neon_begin(); sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_block_neon); kernel_neon_end(); diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S new file mode 100644 index 000000000000..b14463438b09 --- /dev/null +++ b/arch/arm64/crypto/speck-neon-core.S @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * + * Copyright (c) 2018 Google, Inc + * + * Author: Eric Biggers <ebiggers@google.com> + */ + +#include <linux/linkage.h> + + .text + + // arguments + ROUND_KEYS .req x0 // const {u64,u32} *round_keys + NROUNDS .req w1 // int nrounds + NROUNDS_X .req x1 + DST .req x2 // void *dst + SRC .req x3 // const void *src + NBYTES .req w4 // unsigned int nbytes + TWEAK .req x5 // void *tweak + + // registers which hold the data being encrypted/decrypted + // (underscores avoid a naming collision with ARM64 registers x0-x3) + X_0 .req v0 + Y_0 .req v1 + X_1 .req v2 + Y_1 .req v3 + X_2 .req v4 + Y_2 .req v5 + X_3 .req v6 + Y_3 .req v7 + + // the round key, duplicated in all lanes + ROUND_KEY .req v8 + + // index vector for tbl-based 8-bit rotates + ROTATE_TABLE .req v9 + ROTATE_TABLE_Q .req q9 + + // temporary registers + TMP0 .req v10 + TMP1 .req v11 + TMP2 .req v12 + TMP3 .req v13 + + // multiplication table for updating XTS tweaks + GFMUL_TABLE .req v14 + GFMUL_TABLE_Q .req q14 + + // next XTS tweak value(s) + TWEAKV_NEXT .req v15 + + // XTS tweaks for the blocks currently being encrypted/decrypted + TWEAKV0 .req v16 + TWEAKV1 .req v17 + TWEAKV2 .req v18 + TWEAKV3 .req v19 + TWEAKV4 .req v20 + TWEAKV5 .req v21 + TWEAKV6 .req v22 + TWEAKV7 .req v23 + + .align 4 +.Lror64_8_table: + .octa 0x080f0e0d0c0b0a090007060504030201 +.Lror32_8_table: + .octa 0x0c0f0e0d080b0a090407060500030201 +.Lrol64_8_table: + .octa 0x0e0d0c0b0a09080f0605040302010007 +.Lrol32_8_table: + .octa 0x0e0d0c0f0a09080b0605040702010003 +.Lgf128mul_table: + .octa 0x00000000000000870000000000000001 +.Lgf64mul_table: + .octa 0x0000000000000000000000002d361b00 + +/* + * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time + * + * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for + * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes + * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. + * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64. + */ +.macro _speck_round_128bytes n, lanes + + // x = ror(x, 8) + tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b + tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b + tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b + tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b + + // x += y + add X_0.\lanes, X_0.\lanes, Y_0.\lanes + add X_1.\lanes, X_1.\lanes, Y_1.\lanes + add X_2.\lanes, X_2.\lanes, Y_2.\lanes + add X_3.\lanes, X_3.\lanes, Y_3.\lanes + + // x ^= k + eor X_0.16b, X_0.16b, ROUND_KEY.16b + eor X_1.16b, X_1.16b, ROUND_KEY.16b + eor X_2.16b, X_2.16b, ROUND_KEY.16b + eor X_3.16b, X_3.16b, ROUND_KEY.16b + + // y = rol(y, 3) + shl TMP0.\lanes, Y_0.\lanes, #3 + shl TMP1.\lanes, Y_1.\lanes, #3 + shl TMP2.\lanes, Y_2.\lanes, #3 + shl TMP3.\lanes, Y_3.\lanes, #3 + sri TMP0.\lanes, Y_0.\lanes, #(\n - 3) + sri TMP1.\lanes, Y_1.\lanes, #(\n - 3) + sri TMP2.\lanes, Y_2.\lanes, #(\n - 3) + sri TMP3.\lanes, Y_3.\lanes, #(\n - 3) + + // y ^= x + eor Y_0.16b, TMP0.16b, X_0.16b + eor Y_1.16b, TMP1.16b, X_1.16b + eor Y_2.16b, TMP2.16b, X_2.16b + eor Y_3.16b, TMP3.16b, X_3.16b +.endm + +/* + * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time + * + * This is the inverse of _speck_round_128bytes(). + */ +.macro _speck_unround_128bytes n, lanes + + // y ^= x + eor TMP0.16b, Y_0.16b, X_0.16b + eor TMP1.16b, Y_1.16b, X_1.16b + eor TMP2.16b, Y_2.16b, X_2.16b + eor TMP3.16b, Y_3.16b, X_3.16b + + // y = ror(y, 3) + ushr Y_0.\lanes, TMP0.\lanes, #3 + ushr Y_1.\lanes, TMP1.\lanes, #3 + ushr Y_2.\lanes, TMP2.\lanes, #3 + ushr Y_3.\lanes, TMP3.\lanes, #3 + sli Y_0.\lanes, TMP0.\lanes, #(\n - 3) + sli Y_1.\lanes, TMP1.\lanes, #(\n - 3) + sli Y_2.\lanes, TMP2.\lanes, #(\n - 3) + sli Y_3.\lanes, TMP3.\lanes, #(\n - 3) + + // x ^= k + eor X_0.16b, X_0.16b, ROUND_KEY.16b + eor X_1.16b, X_1.16b, ROUND_KEY.16b + eor X_2.16b, X_2.16b, ROUND_KEY.16b + eor X_3.16b, X_3.16b, ROUND_KEY.16b + + // x -= y + sub X_0.\lanes, X_0.\lanes, Y_0.\lanes + sub X_1.\lanes, X_1.\lanes, Y_1.\lanes + sub X_2.\lanes, X_2.\lanes, Y_2.\lanes + sub X_3.\lanes, X_3.\lanes, Y_3.\lanes + + // x = rol(x, 8) + tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b + tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b + tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b + tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b +.endm + +.macro _next_xts_tweak next, cur, tmp, n +.if \n == 64 + /* + * Calculate the next tweak by multiplying the current one by x, + * modulo p(x) = x^128 + x^7 + x^2 + x + 1. + */ + sshr \tmp\().2d, \cur\().2d, #63 + and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b + shl \next\().2d, \cur\().2d, #1 + ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 + eor \next\().16b, \next\().16b, \tmp\().16b +.else + /* + * Calculate the next two tweaks by multiplying the current ones by x^2, + * modulo p(x) = x^64 + x^4 + x^3 + x + 1. + */ + ushr \tmp\().2d, \cur\().2d, #62 + shl \next\().2d, \cur\().2d, #2 + tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b + eor \next\().16b, \next\().16b, \tmp\().16b +.endif +.endm + +/* + * _speck_xts_crypt() - Speck-XTS encryption/decryption + * + * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer + * using Speck-XTS, specifically the variant with a block size of '2n' and round + * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and + * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a + * nonzero multiple of 128. + */ +.macro _speck_xts_crypt n, lanes, decrypting + + /* + * If decrypting, modify the ROUND_KEYS parameter to point to the last + * round key rather than the first, since for decryption the round keys + * are used in reverse order. + */ +.if \decrypting + mov NROUNDS, NROUNDS /* zero the high 32 bits */ +.if \n == 64 + add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3 + sub ROUND_KEYS, ROUND_KEYS, #8 +.else + add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2 + sub ROUND_KEYS, ROUND_KEYS, #4 +.endif +.endif + + // Load the index vector for tbl-based 8-bit rotates +.if \decrypting + ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table +.else + ldr ROTATE_TABLE_Q, .Lror\n\()_8_table +.endif + + // One-time XTS preparation +.if \n == 64 + // Load first tweak + ld1 {TWEAKV0.16b}, [TWEAK] + + // Load GF(2^128) multiplication table + ldr GFMUL_TABLE_Q, .Lgf128mul_table +.else + // Load first tweak + ld1 {TWEAKV0.8b}, [TWEAK] + + // Load GF(2^64) multiplication table + ldr GFMUL_TABLE_Q, .Lgf64mul_table + + // Calculate second tweak, packing it together with the first + ushr TMP0.2d, TWEAKV0.2d, #63 + shl TMP1.2d, TWEAKV0.2d, #1 + tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b + eor TMP0.8b, TMP0.8b, TMP1.8b + mov TWEAKV0.d[1], TMP0.d[0] +.endif + +.Lnext_128bytes_\@: + + // Calculate XTS tweaks for next 128 bytes + _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n + _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n + _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n + _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n + _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n + _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n + _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n + _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n + + // Load the next source blocks into {X,Y}[0-3] + ld1 {X_0.16b-Y_1.16b}, [SRC], #64 + ld1 {X_2.16b-Y_3.16b}, [SRC], #64 + + // XOR the source blocks with their XTS tweaks + eor TMP0.16b, X_0.16b, TWEAKV0.16b + eor Y_0.16b, Y_0.16b, TWEAKV1.16b + eor TMP1.16b, X_1.16b, TWEAKV2.16b + eor Y_1.16b, Y_1.16b, TWEAKV3.16b + eor TMP2.16b, X_2.16b, TWEAKV4.16b + eor Y_2.16b, Y_2.16b, TWEAKV5.16b + eor TMP3.16b, X_3.16b, TWEAKV6.16b + eor Y_3.16b, Y_3.16b, TWEAKV7.16b + + /* + * De-interleave the 'x' and 'y' elements of each block, i.e. make it so + * that the X[0-3] registers contain only the second halves of blocks, + * and the Y[0-3] registers contain only the first halves of blocks. + * (Speck uses the order (y, x) rather than the more intuitive (x, y).) + */ + uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes + uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes + uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes + uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes + uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes + uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes + uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes + uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes + + // Do the cipher rounds + mov x6, ROUND_KEYS + mov w7, NROUNDS +.Lnext_round_\@: +.if \decrypting + ld1r {ROUND_KEY.\lanes}, [x6] + sub x6, x6, #( \n / 8 ) + _speck_unround_128bytes \n, \lanes +.else + ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 ) + _speck_round_128bytes \n, \lanes +.endif + subs w7, w7, #1 + bne .Lnext_round_\@ + + // Re-interleave the 'x' and 'y' elements of each block + zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes + zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes + zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes + zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes + zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes + zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes + zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes + zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes + + // XOR the encrypted/decrypted blocks with the tweaks calculated earlier + eor X_0.16b, TMP0.16b, TWEAKV0.16b + eor Y_0.16b, Y_0.16b, TWEAKV1.16b + eor X_1.16b, TMP1.16b, TWEAKV2.16b + eor Y_1.16b, Y_1.16b, TWEAKV3.16b + eor X_2.16b, TMP2.16b, TWEAKV4.16b + eor Y_2.16b, Y_2.16b, TWEAKV5.16b + eor X_3.16b, TMP3.16b, TWEAKV6.16b + eor Y_3.16b, Y_3.16b, TWEAKV7.16b + mov TWEAKV0.16b, TWEAKV_NEXT.16b + + // Store the ciphertext in the destination buffer + st1 {X_0.16b-Y_1.16b}, [DST], #64 + st1 {X_2.16b-Y_3.16b}, [DST], #64 + + // Continue if there are more 128-byte chunks remaining + subs NBYTES, NBYTES, #128 + bne .Lnext_128bytes_\@ + + // Store the next tweak and return +.if \n == 64 + st1 {TWEAKV_NEXT.16b}, [TWEAK] +.else + st1 {TWEAKV_NEXT.8b}, [TWEAK] +.endif + ret +.endm + +ENTRY(speck128_xts_encrypt_neon) + _speck_xts_crypt n=64, lanes=2d, decrypting=0 +ENDPROC(speck128_xts_encrypt_neon) + +ENTRY(speck128_xts_decrypt_neon) + _speck_xts_crypt n=64, lanes=2d, decrypting=1 +ENDPROC(speck128_xts_decrypt_neon) + +ENTRY(speck64_xts_encrypt_neon) + _speck_xts_crypt n=32, lanes=4s, decrypting=0 +ENDPROC(speck64_xts_encrypt_neon) + +ENTRY(speck64_xts_decrypt_neon) + _speck_xts_crypt n=32, lanes=4s, decrypting=1 +ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c new file mode 100644 index 000000000000..6e233aeb4ff4 --- /dev/null +++ b/arch/arm64/crypto/speck-neon-glue.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * (64-bit version; based on the 32-bit version) + * + * Copyright (c) 2018 Google, Inc + */ + +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <asm/simd.h> +#include <crypto/algapi.h> +#include <crypto/gf128mul.h> +#include <crypto/internal/skcipher.h> +#include <crypto/speck.h> +#include <crypto/xts.h> +#include <linux/kernel.h> +#include <linux/module.h> + +/* The assembly functions only handle multiples of 128 bytes */ +#define SPECK_NEON_CHUNK_SIZE 128 + +/* Speck128 */ + +struct speck128_xts_tfm_ctx { + struct speck128_tfm_ctx main_key; + struct speck128_tfm_ctx tweak_key; +}; + +asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck128_xts_crypt(struct skcipher_request *req, + speck128_crypt_one_t crypt_one, + speck128_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + le128 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + le128_xor((le128 *)dst, (const le128 *)src, &tweak); + (*crypt_one)(&ctx->main_key, dst, dst); + le128_xor((le128 *)dst, (const le128 *)dst, &tweak); + gf128mul_x_ble(&tweak, &tweak); + + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck128_xts_encrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_encrypt, + speck128_xts_encrypt_neon); +} + +static int speck128_xts_decrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_decrypt, + speck128_xts_decrypt_neon); +} + +static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck128_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +/* Speck64 */ + +struct speck64_xts_tfm_ctx { + struct speck64_tfm_ctx main_key; + struct speck64_tfm_ctx tweak_key; +}; + +asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, + speck64_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + __le64 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + *(__le64 *)dst = *(__le64 *)src ^ tweak; + (*crypt_one)(&ctx->main_key, dst, dst); + *(__le64 *)dst ^= tweak; + tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ + ((tweak & cpu_to_le64(1ULL << 63)) ? + 0x1B : 0)); + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck64_xts_encrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_encrypt, + speck64_xts_encrypt_neon); +} + +static int speck64_xts_decrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_decrypt, + speck64_xts_decrypt_neon); +} + +static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck64_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +static struct skcipher_alg speck_algs[] = { + { + .base.cra_name = "xts(speck128)", + .base.cra_driver_name = "xts-speck128-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK128_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK128_128_KEY_SIZE, + .max_keysize = 2 * SPECK128_256_KEY_SIZE, + .ivsize = SPECK128_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck128_xts_setkey, + .encrypt = speck128_xts_encrypt, + .decrypt = speck128_xts_decrypt, + }, { + .base.cra_name = "xts(speck64)", + .base.cra_driver_name = "xts-speck64-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK64_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK64_96_KEY_SIZE, + .max_keysize = 2 * SPECK64_128_KEY_SIZE, + .ivsize = SPECK64_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck64_xts_setkey, + .encrypt = speck64_xts_encrypt, + .decrypt = speck64_xts_decrypt, + } +}; + +static int __init speck_neon_module_init(void) +{ + if (!(elf_hwcap & HWCAP_ASIMD)) + return -ENODEV; + return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_neon_module_exit(void) +{ + crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_neon_module_init); +module_exit(speck_neon_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("xts(speck128)"); +MODULE_ALIAS_CRYPTO("xts-speck128-neon"); +MODULE_ALIAS_CRYPTO("xts(speck64)"); +MODULE_ALIAS_CRYPTO("xts-speck64-neon"); |