From 714b33d15130cbb5ab426456d4e3de842d6c5b8a Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Tue, 17 Sep 2013 08:33:11 -0400 Subject: crypto: ansi_cprng - Fix off by one error in non-block size request Stephan Mueller reported to me recently a error in random number generation in the ansi cprng. If several small requests are made that are less than the instances block size, the remainder for loop code doesn't increment rand_data_valid in the last iteration, meaning that the last bytes in the rand_data buffer gets reused on the subsequent smaller-than-a-block request for random data. The fix is pretty easy, just re-code the for loop to make sure that rand_data_valid gets incremented appropriately Signed-off-by: Neil Horman Reported-by: Stephan Mueller CC: Stephan Mueller CC: Petr Matousek CC: Herbert Xu CC: "David S. Miller" Signed-off-by: Herbert Xu --- crypto/ansi_cprng.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index c0bb3778f1ae..666f1962a160 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -230,11 +230,11 @@ remainder: */ if (byte_count < DEFAULT_BLK_SZ) { empty_rbuf: - for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; - ctx->rand_data_valid++) { + while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { *ptr = ctx->rand_data[ctx->rand_data_valid]; ptr++; byte_count--; + ctx->rand_data_valid++; if (byte_count == 0) goto done; } -- cgit v1.2.3 From a62b01cd6cc1feb5e80d64d6937c291473ed82cb Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 20 Sep 2013 09:55:40 +0200 Subject: crypto: create generic version of ablk_helper Create a generic version of ablk_helper so it can be reused by other architectures. Acked-by: Jussi Kivilinna Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 4 ++ crypto/Makefile | 1 + crypto/ablk_helper.c | 150 +++++++++++++++++++++++++++++++++++++++++++ include/asm-generic/simd.h | 14 ++++ include/crypto/ablk_helper.h | 31 +++++++++ 5 files changed, 200 insertions(+) create mode 100644 crypto/ablk_helper.c create mode 100644 include/asm-generic/simd.h create mode 100644 include/crypto/ablk_helper.h (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 69ce573f1224..8179ae62cec0 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -179,6 +179,10 @@ config CRYPTO_ABLK_HELPER_X86 depends on X86 select CRYPTO_CRYPTD +config CRYPTO_ABLK_HELPER + tristate + select CRYPTO_CRYPTD + config CRYPTO_GLUE_HELPER_X86 tristate depends on X86 diff --git a/crypto/Makefile b/crypto/Makefile index 2d5ed08a239f..580af977f496 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -104,3 +104,4 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o obj-$(CONFIG_XOR_BLOCKS) += xor.o obj-$(CONFIG_ASYNC_CORE) += async_tx/ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ +obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c new file mode 100644 index 000000000000..62568b1fc885 --- /dev/null +++ b/crypto/ablk_helper.c @@ -0,0 +1,150 @@ +/* + * Shared async block cipher helpers + * + * Copyright (c) 2012 Jussi Kivilinna + * + * Based on aesni-intel_glue.c by: + * Copyright (C) 2008, Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; + int err; + + crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) + & CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(child, key, key_len); + crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) + & CRYPTO_TFM_RES_MASK); + return err; +} +EXPORT_SYMBOL_GPL(ablk_set_key); + +int __ablk_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct blkcipher_desc desc; + + desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); + desc.info = req->info; + desc.flags = 0; + + return crypto_blkcipher_crt(desc.tfm)->encrypt( + &desc, req->dst, req->src, req->nbytes); +} +EXPORT_SYMBOL_GPL(__ablk_encrypt); + +int ablk_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (!may_use_simd()) { + struct ablkcipher_request *cryptd_req = + ablkcipher_request_ctx(req); + + memcpy(cryptd_req, req, sizeof(*req)); + ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + + return crypto_ablkcipher_encrypt(cryptd_req); + } else { + return __ablk_encrypt(req); + } +} +EXPORT_SYMBOL_GPL(ablk_encrypt); + +int ablk_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (!may_use_simd()) { + struct ablkcipher_request *cryptd_req = + ablkcipher_request_ctx(req); + + memcpy(cryptd_req, req, sizeof(*req)); + ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + + return crypto_ablkcipher_decrypt(cryptd_req); + } else { + struct blkcipher_desc desc; + + desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); + desc.info = req->info; + desc.flags = 0; + + return crypto_blkcipher_crt(desc.tfm)->decrypt( + &desc, req->dst, req->src, req->nbytes); + } +} +EXPORT_SYMBOL_GPL(ablk_decrypt); + +void ablk_exit(struct crypto_tfm *tfm) +{ + struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); + + cryptd_free_ablkcipher(ctx->cryptd_tfm); +} +EXPORT_SYMBOL_GPL(ablk_exit); + +int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) +{ + struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); + struct cryptd_ablkcipher *cryptd_tfm; + + cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + ctx->cryptd_tfm = cryptd_tfm; + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(&cryptd_tfm->base); + + return 0; +} +EXPORT_SYMBOL_GPL(ablk_init_common); + +int ablk_init(struct crypto_tfm *tfm) +{ + char drv_name[CRYPTO_MAX_ALG_NAME]; + + snprintf(drv_name, sizeof(drv_name), "__driver-%s", + crypto_tfm_alg_driver_name(tfm)); + + return ablk_init_common(tfm, drv_name); +} +EXPORT_SYMBOL_GPL(ablk_init); + +MODULE_LICENSE("GPL"); diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h new file mode 100644 index 000000000000..f57eb7b5c23b --- /dev/null +++ b/include/asm-generic/simd.h @@ -0,0 +1,14 @@ + +#include + +/* + * may_use_simd - whether it is allowable at this time to issue SIMD + * instructions or access the SIMD register file + * + * As architectures typically don't preserve the SIMD register file when + * taking an interrupt, !in_interrupt() should be a reasonable default. + */ +static __must_check inline bool may_use_simd(void) +{ + return !in_interrupt(); +} diff --git a/include/crypto/ablk_helper.h b/include/crypto/ablk_helper.h new file mode 100644 index 000000000000..4f93df50c23e --- /dev/null +++ b/include/crypto/ablk_helper.h @@ -0,0 +1,31 @@ +/* + * Shared async block cipher helpers + */ + +#ifndef _CRYPTO_ABLK_HELPER_H +#define _CRYPTO_ABLK_HELPER_H + +#include +#include +#include + +struct async_helper_ctx { + struct cryptd_ablkcipher *cryptd_tfm; +}; + +extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len); + +extern int __ablk_encrypt(struct ablkcipher_request *req); + +extern int ablk_encrypt(struct ablkcipher_request *req); + +extern int ablk_decrypt(struct ablkcipher_request *req); + +extern void ablk_exit(struct crypto_tfm *tfm); + +extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name); + +extern int ablk_init(struct crypto_tfm *tfm); + +#endif /* _CRYPTO_ABLK_HELPER_H */ -- cgit v1.2.3 From 801201aa25646291aa87ba97250f6614639eb2ea Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 20 Sep 2013 09:55:41 +0200 Subject: crypto: move x86 to the generic version of ablk_helper Move all users of ablk_helper under x86/ to the generic version and delete the x86 specific version. Acked-by: Jussi Kivilinna Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 1 - arch/x86/crypto/ablk_helper.c | 149 ----------------------------- arch/x86/crypto/aesni-intel_glue.c | 2 +- arch/x86/crypto/camellia_aesni_avx2_glue.c | 2 +- arch/x86/crypto/camellia_aesni_avx_glue.c | 2 +- arch/x86/crypto/cast5_avx_glue.c | 2 +- arch/x86/crypto/cast6_avx_glue.c | 2 +- arch/x86/crypto/serpent_avx2_glue.c | 2 +- arch/x86/crypto/serpent_avx_glue.c | 2 +- arch/x86/crypto/serpent_sse2_glue.c | 2 +- arch/x86/crypto/twofish_avx_glue.c | 2 +- arch/x86/include/asm/crypto/ablk_helper.h | 31 ------ arch/x86/include/asm/simd.h | 11 +++ crypto/Kconfig | 25 ++--- 14 files changed, 30 insertions(+), 205 deletions(-) delete mode 100644 arch/x86/crypto/ablk_helper.c delete mode 100644 arch/x86/include/asm/crypto/ablk_helper.h create mode 100644 arch/x86/include/asm/simd.h (limited to 'crypto') diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 75b08e1e69db..e0fc24db234a 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -6,7 +6,6 @@ avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ $(comma)4)$(comma)%ymm2,yes,no) -obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o diff --git a/arch/x86/crypto/ablk_helper.c b/arch/x86/crypto/ablk_helper.c deleted file mode 100644 index 43282fe04a8b..000000000000 --- a/arch/x86/crypto/ablk_helper.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Shared async block cipher helpers - * - * Copyright (c) 2012 Jussi Kivilinna - * - * Based on aesni-intel_glue.c by: - * Copyright (C) 2008, Intel Corp. - * Author: Huang Ying - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, - unsigned int key_len) -{ - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; - int err; - - crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) - & CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(child, key, key_len); - crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) - & CRYPTO_TFM_RES_MASK); - return err; -} -EXPORT_SYMBOL_GPL(ablk_set_key); - -int __ablk_encrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - struct blkcipher_desc desc; - - desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); - desc.info = req->info; - desc.flags = 0; - - return crypto_blkcipher_crt(desc.tfm)->encrypt( - &desc, req->dst, req->src, req->nbytes); -} -EXPORT_SYMBOL_GPL(__ablk_encrypt); - -int ablk_encrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - - if (!irq_fpu_usable()) { - struct ablkcipher_request *cryptd_req = - ablkcipher_request_ctx(req); - - memcpy(cryptd_req, req, sizeof(*req)); - ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); - - return crypto_ablkcipher_encrypt(cryptd_req); - } else { - return __ablk_encrypt(req); - } -} -EXPORT_SYMBOL_GPL(ablk_encrypt); - -int ablk_decrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - - if (!irq_fpu_usable()) { - struct ablkcipher_request *cryptd_req = - ablkcipher_request_ctx(req); - - memcpy(cryptd_req, req, sizeof(*req)); - ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); - - return crypto_ablkcipher_decrypt(cryptd_req); - } else { - struct blkcipher_desc desc; - - desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); - desc.info = req->info; - desc.flags = 0; - - return crypto_blkcipher_crt(desc.tfm)->decrypt( - &desc, req->dst, req->src, req->nbytes); - } -} -EXPORT_SYMBOL_GPL(ablk_decrypt); - -void ablk_exit(struct crypto_tfm *tfm) -{ - struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); - - cryptd_free_ablkcipher(ctx->cryptd_tfm); -} -EXPORT_SYMBOL_GPL(ablk_exit); - -int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) -{ - struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); - struct cryptd_ablkcipher *cryptd_tfm; - - cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); - - ctx->cryptd_tfm = cryptd_tfm; - tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + - crypto_ablkcipher_reqsize(&cryptd_tfm->base); - - return 0; -} -EXPORT_SYMBOL_GPL(ablk_init_common); - -int ablk_init(struct crypto_tfm *tfm) -{ - char drv_name[CRYPTO_MAX_ALG_NAME]; - - snprintf(drv_name, sizeof(drv_name), "__driver-%s", - crypto_tfm_alg_driver_name(tfm)); - - return ablk_init_common(tfm, drv_name); -} -EXPORT_SYMBOL_GPL(ablk_init); - -MODULE_LICENSE("GPL"); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index f80e668785c0..835488b745ee 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index 414fe5d7946b..4209a76fcdaa 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -21,7 +22,6 @@ #include #include #include -#include #include #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 37fd0c0a81ea..87a041a10f4a 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -21,7 +22,6 @@ #include #include #include -#include #include #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index c6631813dc11..e6a3700489b9 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -26,13 +26,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #define CAST5_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 8d0dfb86a559..09f3677393e4 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -37,7 +38,6 @@ #include #include #include -#include #include #define CAST6_PARALLEL_BLOCKS 8 diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 23aabc6c20a5..2fae489b1524 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -22,7 +23,6 @@ #include #include #include -#include #include #define SERPENT_AVX2_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 9ae83cf8d21e..ff4870870972 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -38,7 +39,6 @@ #include #include #include -#include #include /* 8-way parallel cipher functions */ diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 97a356ece24d..8c95f8637306 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -42,7 +43,6 @@ #include #include #include -#include #include static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index a62ba541884e..4e3c665be129 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -39,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/arch/x86/include/asm/crypto/ablk_helper.h deleted file mode 100644 index 4f93df50c23e..000000000000 --- a/arch/x86/include/asm/crypto/ablk_helper.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Shared async block cipher helpers - */ - -#ifndef _CRYPTO_ABLK_HELPER_H -#define _CRYPTO_ABLK_HELPER_H - -#include -#include -#include - -struct async_helper_ctx { - struct cryptd_ablkcipher *cryptd_tfm; -}; - -extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, - unsigned int key_len); - -extern int __ablk_encrypt(struct ablkcipher_request *req); - -extern int ablk_encrypt(struct ablkcipher_request *req); - -extern int ablk_decrypt(struct ablkcipher_request *req); - -extern void ablk_exit(struct crypto_tfm *tfm); - -extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name); - -extern int ablk_init(struct crypto_tfm *tfm); - -#endif /* _CRYPTO_ABLK_HELPER_H */ diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h new file mode 100644 index 000000000000..ee80b92f0096 --- /dev/null +++ b/arch/x86/include/asm/simd.h @@ -0,0 +1,11 @@ + +#include + +/* + * may_use_simd - whether it is allowable at this time to issue SIMD + * instructions or access the SIMD register file + */ +static __must_check inline bool may_use_simd(void) +{ + return irq_fpu_usable(); +} diff --git a/crypto/Kconfig b/crypto/Kconfig index 8179ae62cec0..7c0a4c5de075 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -174,11 +174,6 @@ config CRYPTO_TEST help Quick & dirty crypto test module. -config CRYPTO_ABLK_HELPER_X86 - tristate - depends on X86 - select CRYPTO_CRYPTD - config CRYPTO_ABLK_HELPER tristate select CRYPTO_CRYPTD @@ -699,7 +694,7 @@ config CRYPTO_AES_NI_INTEL select CRYPTO_AES_X86_64 if 64BIT select CRYPTO_AES_586 if !64BIT select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_ALGAPI select CRYPTO_GLUE_HELPER_X86 if 64BIT select CRYPTO_LRW @@ -883,7 +878,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 depends on CRYPTO select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_CAMELLIA_X86_64 select CRYPTO_LRW @@ -905,7 +900,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 depends on CRYPTO select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_CAMELLIA_X86_64 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 @@ -957,7 +952,7 @@ config CRYPTO_CAST5_AVX_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_CAST_COMMON select CRYPTO_CAST5 help @@ -980,7 +975,7 @@ config CRYPTO_CAST6_AVX_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_CAST_COMMON select CRYPTO_CAST6 @@ -1098,7 +1093,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_LRW @@ -1120,7 +1115,7 @@ config CRYPTO_SERPENT_SSE2_586 depends on X86 && !64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_LRW @@ -1142,7 +1137,7 @@ config CRYPTO_SERPENT_AVX_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_LRW @@ -1164,7 +1159,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_SERPENT_AVX_X86_64 @@ -1280,7 +1275,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_X86_64 -- cgit v1.2.3 From ea493d324f6ae26907e3d99fb0dc7a89083ad798 Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Tue, 24 Sep 2013 08:21:29 +0800 Subject: crypto: ablk_helper - Replace memcpy with struct assignment tree: git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git master head: 48e6dc1b2a1ad8186d48968d5018912bdacac744 commit: a62b01cd6cc1feb5e80d64d6937c291473ed82cb [20/24] crypto: create generic version of ablk_helper coccinelle warnings: (new ones prefixed by >>) >> crypto/ablk_helper.c:97:2-8: Replace memcpy with struct assignment >> crypto/ablk_helper.c:78:2-8: Replace memcpy with struct assignment Please consider folding the attached diff :-) Signed-off-by: Herbert Xu --- crypto/ablk_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c index 62568b1fc885..ffe7278d4bd8 100644 --- a/crypto/ablk_helper.c +++ b/crypto/ablk_helper.c @@ -75,7 +75,7 @@ int ablk_encrypt(struct ablkcipher_request *req) struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); - memcpy(cryptd_req, req, sizeof(*req)); + *cryptd_req = *req; ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_encrypt(cryptd_req); @@ -94,7 +94,7 @@ int ablk_decrypt(struct ablkcipher_request *req) struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); - memcpy(cryptd_req, req, sizeof(*req)); + *cryptd_req = *req; ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_decrypt(cryptd_req); -- cgit v1.2.3 From 6bf37e5aa90f18baf5acf4874bca505dd667c37f Mon Sep 17 00:00:00 2001 From: James Yonan Date: Thu, 26 Sep 2013 02:20:39 -0600 Subject: crypto: crypto_memneq - add equality testing of memory regions w/o timing leaks When comparing MAC hashes, AEAD authentication tags, or other hash values in the context of authentication or integrity checking, it is important not to leak timing information to a potential attacker, i.e. when communication happens over a network. Bytewise memory comparisons (such as memcmp) are usually optimized so that they return a nonzero value as soon as a mismatch is found. E.g, on x86_64/i5 for 512 bytes this can be ~50 cyc for a full mismatch and up to ~850 cyc for a full match (cold). This early-return behavior can leak timing information as a side channel, allowing an attacker to iteratively guess the correct result. This patch adds a new method crypto_memneq ("memory not equal to each other") to the crypto API that compares memory areas of the same length in roughly "constant time" (cache misses could change the timing, but since they don't reveal information about the content of the strings being compared, they are effectively benign). Iow, best and worst case behaviour take the same amount of time to complete (in contrast to memcmp). Note that crypto_memneq (unlike memcmp) can only be used to test for equality or inequality, NOT for lexicographical order. This, however, is not an issue for its use-cases within the crypto API. We tried to locate all of the places in the crypto API where memcmp was being used for authentication or integrity checking, and convert them over to crypto_memneq. crypto_memneq is declared noinline, placed in its own source file, and compiled with optimizations that might increase code size disabled ("Os") because a smart compiler (or LTO) might notice that the return value is always compared against zero/nonzero, and might then reintroduce the same early-return optimization that we are trying to avoid. Using #pragma or __attribute__ optimization annotations of the code for disabling optimization was avoided as it seems to be considered broken or unmaintained for long time in GCC [1]. Therefore, we work around that by specifying the compile flag for memneq.o directly in the Makefile. We found that this seems to be most appropriate. As we use ("Os"), this patch also provides a loop-free "fast-path" for frequently used 16 byte digests. Similarly to kernel library string functions, leave an option for future even further optimized architecture specific assembler implementations. This was a joint work of James Yonan and Daniel Borkmann. Also thanks for feedback from Florian Weimer on this and earlier proposals [2]. [1] http://gcc.gnu.org/ml/gcc/2012-07/msg00211.html [2] https://lkml.org/lkml/2013/2/10/131 Signed-off-by: James Yonan Signed-off-by: Daniel Borkmann Cc: Florian Weimer Signed-off-by: Herbert Xu --- crypto/Makefile | 7 ++- crypto/asymmetric_keys/rsa.c | 5 +- crypto/authenc.c | 6 +- crypto/authencesn.c | 8 +-- crypto/ccm.c | 4 +- crypto/gcm.c | 2 +- crypto/memneq.c | 138 +++++++++++++++++++++++++++++++++++++++++++ include/crypto/algapi.h | 18 +++++- 8 files changed, 174 insertions(+), 14 deletions(-) create mode 100644 crypto/memneq.c (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index 580af977f496..d6a401c58d17 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -2,8 +2,13 @@ # Cryptographic API # +# memneq MUST be built with -Os or -O0 to prevent early-return optimizations +# that will defeat memneq's actual purpose to prevent timing attacks. +CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3 +CFLAGS_memneq.o := -Os + obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o +crypto-y := api.o cipher.o compress.o memneq.o obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c index 4a6a0696f8a3..1912b9be5043 100644 --- a/crypto/asymmetric_keys/rsa.c +++ b/crypto/asymmetric_keys/rsa.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "public_key.h" MODULE_LICENSE("GPL"); @@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size, } } - if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { + if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) { kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); return -EBADMSG; } - if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { + if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) { kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); return -EKEYREJECTED; } diff --git a/crypto/authenc.c b/crypto/authenc.c index ffce19de05cf..2b3f4abda929 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -188,7 +188,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -227,7 +227,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -462,7 +462,7 @@ static int crypto_authenc_verify(struct aead_request *req, ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; + return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; } static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, diff --git a/crypto/authencesn.c b/crypto/authencesn.c index ab53762fc309..c569d58de661 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -247,7 +247,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -296,7 +296,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -336,7 +336,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -568,7 +568,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req) ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; + return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; } static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, diff --git a/crypto/ccm.c b/crypto/ccm.c index 499c91717d93..3e05499d183a 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -363,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, if (!err) { err = crypto_ccm_auth(req, req->dst, cryptlen); - if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) + if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) err = -EBADMSG; } aead_request_complete(req, err); @@ -422,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) return err; /* verify */ - if (memcmp(authtag, odata, authsize)) + if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; diff --git a/crypto/gcm.c b/crypto/gcm.c index 43e1fb05ea54..b4f017939004 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req, crypto_xor(auth_tag, iauth_tag, 16); scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); - return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; + return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; } static void gcm_decrypt_done(struct crypto_async_request *areq, int err) diff --git a/crypto/memneq.c b/crypto/memneq.c new file mode 100644 index 000000000000..cd0162221c14 --- /dev/null +++ b/crypto/memneq.c @@ -0,0 +1,138 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan + * Daniel Borkmann + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= *(unsigned long *)a ^ *(unsigned long *)b; + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) + return ((*(unsigned long *)(a) ^ *(unsigned long *)(b)) + | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8))); + else if (sizeof(unsigned int) == 4) + return ((*(unsigned int *)(a) ^ *(unsigned int *)(b)) + | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4)) + | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8)) + | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12))); + else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + return ((*(unsigned char *)(a) ^ *(unsigned char *)(b)) + | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1)) + | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2)) + | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3)) + | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4)) + | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5)) + | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6)) + | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7)) + | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8)) + | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9)) + | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10)) + | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11)) + | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12)) + | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13)) + | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14)) + | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15))); +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} +EXPORT_SYMBOL(__crypto_memneq); + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 418d270e1806..e73c19e90e38 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask) return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; } -#endif /* _CRYPTO_ALGAPI_H */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); + +/** + * crypto_memneq - Compare two areas of memory without leaking + * timing information. + * + * @a: One area of memory + * @b: Another area of memory + * @size: The size of the area. + * + * Returns 0 when data is equal, 1 otherwise. + */ +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} +#endif /* _CRYPTO_ALGAPI_H */ -- cgit v1.2.3 From bc6e2bdb71056607141ada309a185f0a50b1aeaf Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Tue, 15 Oct 2013 13:49:30 +0200 Subject: crypto: authenc - Export key parsing helper function AEAD key parsing is duplicated to multiple places in the kernel. Add a common helper function to consolidate that functionality. Cc: Herbert Xu Cc: "David S. Miller" Signed-off-by: Mathias Krause Signed-off-by: Herbert Xu --- crypto/authenc.c | 48 ++++++++++++++++++++++++++++++------------------ include/crypto/authenc.h | 12 +++++++++++- 2 files changed, 41 insertions(+), 19 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index 2b3f4abda929..1875e7026e8f 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err) aead_request_complete(req, err); } -static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, - unsigned int keylen) +int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen) { - unsigned int authkeylen; - unsigned int enckeylen; - struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - struct crypto_ahash *auth = ctx->auth; - struct crypto_ablkcipher *enc = ctx->enc; - struct rtattr *rta = (void *)key; + struct rtattr *rta = (struct rtattr *)key; struct crypto_authenc_key_param *param; - int err = -EINVAL; if (!RTA_OK(rta, keylen)) - goto badkey; + return -EINVAL; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; + return -EINVAL; if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; + return -EINVAL; param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); + keys->enckeylen = be32_to_cpu(param->enckeylen); key += RTA_ALIGN(rta->rta_len); keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < enckeylen) - goto badkey; + if (keylen < keys->enckeylen) + return -EINVAL; - authkeylen = keylen - enckeylen; + keys->authkeylen = keylen - keys->enckeylen; + keys->authkey = key; + keys->enckey = key + keys->authkeylen; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys); + +static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_ahash *auth = ctx->auth; + struct crypto_ablkcipher *enc = ctx->enc; + struct crypto_authenc_keys keys; + int err = -EINVAL; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(auth, key, authkeylen); + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & CRYPTO_TFM_RES_MASK); @@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); + err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & CRYPTO_TFM_RES_MASK); diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h index e47b044929a8..6775059539b5 100644 --- a/include/crypto/authenc.h +++ b/include/crypto/authenc.h @@ -23,5 +23,15 @@ struct crypto_authenc_key_param { __be32 enckeylen; }; -#endif /* _CRYPTO_AUTHENC_H */ +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + + unsigned int authkeylen; + unsigned int enckeylen; +}; +int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen); + +#endif /* _CRYPTO_AUTHENC_H */ -- cgit v1.2.3 From fddc2c43c48d62f70553785d1220505f33aebe0e Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Tue, 15 Oct 2013 13:49:31 +0200 Subject: crypto: authencesn - Simplify key parsing Use the common helper function crypto_authenc_extractkeys() for key parsing. Cc: Herbert Xu Cc: "David S. Miller" Signed-off-by: Mathias Krause Signed-off-by: Herbert Xu --- crypto/authencesn.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) (limited to 'crypto') diff --git a/crypto/authencesn.c b/crypto/authencesn.c index c569d58de661..4be0dd4373a9 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err) static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, unsigned int keylen) { - unsigned int authkeylen; - unsigned int enckeylen; struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_ahash *auth = ctx->auth; struct crypto_ablkcipher *enc = ctx->enc; - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; + struct crypto_authenc_keys keys; int err = -EINVAL; - if (!RTA_OK(rta, keylen)) + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < enckeylen) - goto badkey; - - authkeylen = keylen - enckeylen; crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(auth, key, authkeylen); + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & CRYPTO_TFM_RES_MASK); @@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); + err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & CRYPTO_TFM_RES_MASK); -- cgit v1.2.3 From f3d53ed038944a5e785f04952170f7e239a49ae8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Oct 2013 09:51:45 +0800 Subject: crypto: skcipher - Use eseqiv even on UP machines Previously we would use eseqiv on all async ciphers in all cases, and sync ciphers if we have more than one CPU. This meant that chainiv is only used in the case of sync ciphers on a UP machine. As chainiv may aid attackers by making the IV predictable, even though this risk itself is small, the above usage pattern causes it to further leak information about the host. This patch addresses these issues by using eseqiv even if we're on a UP machine. Signed-off-by: Herbert Xu Acked-by: Steffen Klassert Acked-by: David S. Miller --- crypto/ablkcipher.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) (limited to 'crypto') diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 7d4a8d28277e..40886c489903 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -16,9 +16,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -30,8 +28,6 @@ #include "internal.h" -static const char *skcipher_default_geniv __read_mostly; - struct ablkcipher_buffer { struct list_head entry; struct scatter_walk dst; @@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg) alg->cra_blocksize) return "chainiv"; - return alg->cra_flags & CRYPTO_ALG_ASYNC ? - "eseqiv" : skcipher_default_geniv; + return "eseqiv"; } static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) @@ -709,17 +704,3 @@ err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); - -static int __init skcipher_module_init(void) -{ - skcipher_default_geniv = num_possible_cpus() > 1 ? - "eseqiv" : "chainiv"; - return 0; -} - -static void skcipher_module_exit(void) -{ -} - -module_init(skcipher_module_init); -module_exit(skcipher_module_exit); -- cgit v1.2.3