diff options
Diffstat (limited to 'crypto')
57 files changed, 4534 insertions, 2415 deletions
diff --git a/crypto/842.c b/crypto/842.c index 98e387efb8c8..bc26dc942821 100644 --- a/crypto/842.c +++ b/crypto/842.c @@ -31,11 +31,46 @@ #include <linux/module.h> #include <linux/crypto.h> #include <linux/sw842.h> +#include <crypto/internal/scompress.h> struct crypto842_ctx { - char wmem[SW842_MEM_COMPRESS]; /* working memory for compress */ + void *wmem; /* working memory for compress */ }; +static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) +{ + void *ctx; + + ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + return ctx; +} + +static int crypto842_init(struct crypto_tfm *tfm) +{ + struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->wmem = crypto842_alloc_ctx(NULL); + if (IS_ERR(ctx->wmem)) + return -ENOMEM; + + return 0; +} + +static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx) +{ + kfree(ctx); +} + +static void crypto842_exit(struct crypto_tfm *tfm) +{ + struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto842_free_ctx(NULL, ctx->wmem); +} + static int crypto842_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) @@ -45,6 +80,13 @@ static int crypto842_compress(struct crypto_tfm *tfm, return sw842_compress(src, slen, dst, dlen, ctx->wmem); } +static int crypto842_scompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + return sw842_compress(src, slen, dst, dlen, ctx); +} + static int crypto842_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) @@ -52,6 +94,13 @@ static int crypto842_decompress(struct crypto_tfm *tfm, return sw842_decompress(src, slen, dst, dlen); } +static int crypto842_sdecompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + return sw842_decompress(src, slen, dst, dlen); +} + static struct crypto_alg alg = { .cra_name = "842", .cra_driver_name = "842-generic", @@ -59,20 +108,48 @@ static struct crypto_alg alg = { .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct crypto842_ctx), .cra_module = THIS_MODULE, + .cra_init = crypto842_init, + .cra_exit = crypto842_exit, .cra_u = { .compress = { .coa_compress = crypto842_compress, .coa_decompress = crypto842_decompress } } }; +static struct scomp_alg scomp = { + .alloc_ctx = crypto842_alloc_ctx, + .free_ctx = crypto842_free_ctx, + .compress = crypto842_scompress, + .decompress = crypto842_sdecompress, + .base = { + .cra_name = "842", + .cra_driver_name = "842-scomp", + .cra_priority = 100, + .cra_module = THIS_MODULE, + } +}; + static int __init crypto842_mod_init(void) { - return crypto_register_alg(&alg); + int ret; + + ret = crypto_register_alg(&alg); + if (ret) + return ret; + + ret = crypto_register_scomp(&scomp); + if (ret) { + crypto_unregister_alg(&alg); + return ret; + } + + return ret; } module_init(crypto842_mod_init); static void __exit crypto842_mod_exit(void) { crypto_unregister_alg(&alg); + crypto_unregister_scomp(&scomp); } module_exit(crypto842_mod_exit); diff --git a/crypto/Kconfig b/crypto/Kconfig index 84d71482bf08..f37e9cca50e1 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -24,7 +24,7 @@ comment "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS - depends on MODULE_SIG + depends on (MODULE_SIG || !MODULES) help This options enables the fips boot option which is required if you want to system to operate in a FIPS 200 @@ -102,6 +102,15 @@ config CRYPTO_KPP select CRYPTO_ALGAPI select CRYPTO_KPP2 +config CRYPTO_ACOMP2 + tristate + select CRYPTO_ALGAPI2 + +config CRYPTO_ACOMP + tristate + select CRYPTO_ALGAPI + select CRYPTO_ACOMP2 + config CRYPTO_RSA tristate "RSA algorithm" select CRYPTO_AKCIPHER @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2 select CRYPTO_BLKCIPHER2 select CRYPTO_AKCIPHER2 select CRYPTO_KPP2 + select CRYPTO_ACOMP2 config CRYPTO_USER tristate "Userspace cryptographic algorithm configuration" @@ -236,10 +246,14 @@ config CRYPTO_ABLK_HELPER tristate select CRYPTO_CRYPTD +config CRYPTO_SIMD + tristate + select CRYPTO_CRYPTD + config CRYPTO_GLUE_HELPER_X86 tristate depends on X86 - select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER config CRYPTO_ENGINE tristate @@ -249,6 +263,7 @@ comment "Authenticated Encryption with Associated Data" config CRYPTO_CCM tristate "CCM support" select CRYPTO_CTR + select CRYPTO_HASH select CRYPTO_AEAD help Support for Counter with CBC MAC. Required for IPsec. @@ -360,6 +375,7 @@ config CRYPTO_XTS select CRYPTO_BLKCIPHER select CRYPTO_MANAGER select CRYPTO_GF128MUL + select CRYPTO_ECB help XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain, key size 256, 384 or 512 bits. This implementation currently @@ -437,7 +453,7 @@ config CRYPTO_CRC32C_INTEL gain performance compared with software implementation. Module will be crc32c-intel. -config CRYPT_CRC32C_VPMSUM +config CRYPTO_CRC32C_VPMSUM tristate "CRC32c CRC algorithm (powerpc64)" depends on PPC64 && ALTIVEC select CRYPTO_HASH @@ -881,6 +897,23 @@ config CRYPTO_AES See <http://csrc.nist.gov/CryptoToolkit/aes/> for more information. +config CRYPTO_AES_TI + tristate "Fixed time AES cipher" + select CRYPTO_ALGAPI + help + This is a generic implementation of AES that attempts to eliminate + data dependent latencies as much as possible without affecting + performance too much. It is intended for use by the generic CCM + and GCM drivers, and other CTR or CMAC/XCBC based modes that rely + solely on encryption (although decryption is supported as well, but + with a more dramatic performance hit) + + Instead of using 16 lookup tables of 1 KB each, (8 for encryption and + 8 for decryption), this implementation only uses just two S-boxes of + 256 bytes each, and attempts to eliminate data dependent latencies by + prefetching the entire table into the cache at the start of each + block. + config CRYPTO_AES_586 tristate "AES cipher algorithms (i586)" depends on (X86 || UML_X86) && !64BIT @@ -928,14 +961,13 @@ config CRYPTO_AES_X86_64 config CRYPTO_AES_NI_INTEL tristate "AES cipher algorithms (AES-NI)" depends on X86 + select CRYPTO_AEAD select CRYPTO_AES_X86_64 if 64BIT select CRYPTO_AES_586 if !64BIT - select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER select CRYPTO_GLUE_HELPER_X86 if 64BIT - select CRYPTO_LRW - select CRYPTO_XTS + select CRYPTO_SIMD help Use Intel AES-NI instructions for AES algorithm. @@ -1568,6 +1600,7 @@ comment "Compression" config CRYPTO_DEFLATE tristate "Deflate compression algorithm" select CRYPTO_ALGAPI + select CRYPTO_ACOMP2 select ZLIB_INFLATE select ZLIB_DEFLATE help @@ -1579,6 +1612,7 @@ config CRYPTO_DEFLATE config CRYPTO_LZO tristate "LZO compression algorithm" select CRYPTO_ALGAPI + select CRYPTO_ACOMP2 select LZO_COMPRESS select LZO_DECOMPRESS help @@ -1587,6 +1621,7 @@ config CRYPTO_LZO config CRYPTO_842 tristate "842 compression algorithm" select CRYPTO_ALGAPI + select CRYPTO_ACOMP2 select 842_COMPRESS select 842_DECOMPRESS help @@ -1595,6 +1630,7 @@ config CRYPTO_842 config CRYPTO_LZ4 tristate "LZ4 compression algorithm" select CRYPTO_ALGAPI + select CRYPTO_ACOMP2 select LZ4_COMPRESS select LZ4_DECOMPRESS help @@ -1603,6 +1639,7 @@ config CRYPTO_LZ4 config CRYPTO_LZ4HC tristate "LZ4HC compression algorithm" select CRYPTO_ALGAPI + select CRYPTO_ACOMP2 select LZ4HC_COMPRESS select LZ4_DECOMPRESS help diff --git a/crypto/Makefile b/crypto/Makefile index bd6a029094e6..8a44057240d5 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -51,6 +51,10 @@ rsa_generic-y += rsa_helper.o rsa_generic-y += rsa-pkcs1pad.o obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o +crypto_acompress-y := acompress.o +crypto_acompress-y += scompress.o +obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o + cryptomgr-y := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o @@ -71,6 +75,7 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o +CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o @@ -94,7 +99,9 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o +CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o +obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o obj-$(CONFIG_CRYPTO_CAST5) += cast5_generic.o @@ -139,3 +146,5 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx/ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o +crypto_simd-y := simd.o +obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index d676fc59521a..d880a4897159 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -19,6 +19,7 @@ #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/cryptouser.h> +#include <linux/compiler.h> #include <net/netlink.h> #include <crypto/scatterwalk.h> @@ -394,7 +395,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) { struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; @@ -468,7 +469,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) { struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; diff --git a/crypto/acompress.c b/crypto/acompress.c new file mode 100644 index 000000000000..47d11627cd20 --- /dev/null +++ b/crypto/acompress.c @@ -0,0 +1,170 @@ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li <weigang.li@intel.com> + * Giovanni Cabiddu <giovanni.cabiddu@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <linux/cryptouser.h> +#include <linux/compiler.h> +#include <net/netlink.h> +#include <crypto/internal/acompress.h> +#include <crypto/internal/scompress.h> +#include "internal.h" + +static const struct crypto_type crypto_acomp_type; + +#ifdef CONFIG_NET +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_acomp racomp; + + strncpy(racomp.type, "acomp", sizeof(racomp.type)); + + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, + sizeof(struct crypto_report_acomp), &racomp)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) + __maybe_unused; + +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) +{ + seq_puts(m, "type : acomp\n"); +} + +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); + struct acomp_alg *alg = crypto_acomp_alg(acomp); + + alg->exit(acomp); +} + +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); + struct acomp_alg *alg = crypto_acomp_alg(acomp); + + if (tfm->__crt_alg->cra_type != &crypto_acomp_type) + return crypto_init_scomp_ops_async(tfm); + + acomp->compress = alg->compress; + acomp->decompress = alg->decompress; + acomp->dst_free = alg->dst_free; + acomp->reqsize = alg->reqsize; + + if (alg->exit) + acomp->base.exit = crypto_acomp_exit_tfm; + + if (alg->init) + return alg->init(acomp); + + return 0; +} + +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) +{ + int extsize = crypto_alg_extsize(alg); + + if (alg->cra_type != &crypto_acomp_type) + extsize += sizeof(struct crypto_scomp *); + + return extsize; +} + +static const struct crypto_type crypto_acomp_type = { + .extsize = crypto_acomp_extsize, + .init_tfm = crypto_acomp_init_tfm, +#ifdef CONFIG_PROC_FS + .show = crypto_acomp_show, +#endif + .report = crypto_acomp_report, + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, + .type = CRYPTO_ALG_TYPE_ACOMPRESS, + .tfmsize = offsetof(struct crypto_acomp, base), +}; + +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_acomp); + +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) +{ + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); + struct acomp_req *req; + + req = __acomp_request_alloc(acomp); + if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type)) + return crypto_acomp_scomp_alloc_ctx(req); + + return req; +} +EXPORT_SYMBOL_GPL(acomp_request_alloc); + +void acomp_request_free(struct acomp_req *req) +{ + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); + + if (tfm->__crt_alg->cra_type != &crypto_acomp_type) + crypto_acomp_scomp_free_ctx(req); + + if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) { + acomp->dst_free(req->dst); + req->dst = NULL; + } + + __acomp_request_free(req); +} +EXPORT_SYMBOL_GPL(acomp_request_free); + +int crypto_register_acomp(struct acomp_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + base->cra_type = &crypto_acomp_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_acomp); + +int crypto_unregister_acomp(struct acomp_alg *alg) +{ + return crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_acomp); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Asynchronous compression type"); diff --git a/crypto/aead.c b/crypto/aead.c index 3f5c5ff004ab..f794b30a9407 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/cryptouser.h> +#include <linux/compiler.h> #include <net/netlink.h> #include "internal.h" @@ -132,7 +133,7 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) { struct aead_alg *aead = container_of(alg, struct aead_alg, base); diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 3dd101144a58..ca554d57d01e 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -54,6 +54,7 @@ #include <linux/errno.h> #include <linux/crypto.h> #include <asm/byteorder.h> +#include <asm/unaligned.h> static inline u8 byte(const u32 x, const unsigned n) { @@ -1216,7 +1217,6 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) { - const __le32 *key = (const __le32 *)in_key; u32 i, t, u, v, w, j; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && @@ -1225,10 +1225,15 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, ctx->key_length = key_len; - ctx->key_dec[key_len + 24] = ctx->key_enc[0] = le32_to_cpu(key[0]); - ctx->key_dec[key_len + 25] = ctx->key_enc[1] = le32_to_cpu(key[1]); - ctx->key_dec[key_len + 26] = ctx->key_enc[2] = le32_to_cpu(key[2]); - ctx->key_dec[key_len + 27] = ctx->key_enc[3] = le32_to_cpu(key[3]); + ctx->key_enc[0] = get_unaligned_le32(in_key); + ctx->key_enc[1] = get_unaligned_le32(in_key + 4); + ctx->key_enc[2] = get_unaligned_le32(in_key + 8); + ctx->key_enc[3] = get_unaligned_le32(in_key + 12); + + ctx->key_dec[key_len + 24] = ctx->key_enc[0]; + ctx->key_dec[key_len + 25] = ctx->key_enc[1]; + ctx->key_dec[key_len + 26] = ctx->key_enc[2]; + ctx->key_dec[key_len + 27] = ctx->key_enc[3]; switch (key_len) { case AES_KEYSIZE_128: @@ -1238,17 +1243,17 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, break; case AES_KEYSIZE_192: - ctx->key_enc[4] = le32_to_cpu(key[4]); - t = ctx->key_enc[5] = le32_to_cpu(key[5]); + ctx->key_enc[4] = get_unaligned_le32(in_key + 16); + t = ctx->key_enc[5] = get_unaligned_le32(in_key + 20); for (i = 0; i < 8; ++i) loop6(i); break; case AES_KEYSIZE_256: - ctx->key_enc[4] = le32_to_cpu(key[4]); - ctx->key_enc[5] = le32_to_cpu(key[5]); - ctx->key_enc[6] = le32_to_cpu(key[6]); - t = ctx->key_enc[7] = le32_to_cpu(key[7]); + ctx->key_enc[4] = get_unaligned_le32(in_key + 16); + ctx->key_enc[5] = get_unaligned_le32(in_key + 20); + ctx->key_enc[6] = get_unaligned_le32(in_key + 24); + t = ctx->key_enc[7] = get_unaligned_le32(in_key + 28); for (i = 0; i < 6; ++i) loop8(i); loop8tophalf(i); @@ -1329,16 +1334,14 @@ EXPORT_SYMBOL_GPL(crypto_aes_set_key); static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *src = (const __le32 *)in; - __le32 *dst = (__le32 *)out; u32 b0[4], b1[4]; const u32 *kp = ctx->key_enc + 4; const int key_len = ctx->key_length; - b0[0] = le32_to_cpu(src[0]) ^ ctx->key_enc[0]; - b0[1] = le32_to_cpu(src[1]) ^ ctx->key_enc[1]; - b0[2] = le32_to_cpu(src[2]) ^ ctx->key_enc[2]; - b0[3] = le32_to_cpu(src[3]) ^ ctx->key_enc[3]; + b0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in); + b0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4); + b0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8); + b0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12); if (key_len > 24) { f_nround(b1, b0, kp); @@ -1361,10 +1364,10 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) f_nround(b1, b0, kp); f_lround(b0, b1, kp); - dst[0] = cpu_to_le32(b0[0]); - dst[1] = cpu_to_le32(b0[1]); - dst[2] = cpu_to_le32(b0[2]); - dst[3] = cpu_to_le32(b0[3]); + put_unaligned_le32(b0[0], out); + put_unaligned_le32(b0[1], out + 4); + put_unaligned_le32(b0[2], out + 8); + put_unaligned_le32(b0[3], out + 12); } /* decrypt a block of text */ @@ -1401,16 +1404,14 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *src = (const __le32 *)in; - __le32 *dst = (__le32 *)out; u32 b0[4], b1[4]; const int key_len = ctx->key_length; const u32 *kp = ctx->key_dec + 4; - b0[0] = le32_to_cpu(src[0]) ^ ctx->key_dec[0]; - b0[1] = le32_to_cpu(src[1]) ^ ctx->key_dec[1]; - b0[2] = le32_to_cpu(src[2]) ^ ctx->key_dec[2]; - b0[3] = le32_to_cpu(src[3]) ^ ctx->key_dec[3]; + b0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in); + b0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4); + b0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8); + b0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12); if (key_len > 24) { i_nround(b1, b0, kp); @@ -1433,10 +1434,10 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) i_nround(b1, b0, kp); i_lround(b0, b1, kp); - dst[0] = cpu_to_le32(b0[0]); - dst[1] = cpu_to_le32(b0[1]); - dst[2] = cpu_to_le32(b0[2]); - dst[3] = cpu_to_le32(b0[3]); + put_unaligned_le32(b0[0], out); + put_unaligned_le32(b0[1], out + 4); + put_unaligned_le32(b0[2], out + 8); + put_unaligned_le32(b0[3], out + 12); } static struct crypto_alg aes_alg = { @@ -1446,7 +1447,6 @@ static struct crypto_alg aes_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c new file mode 100644 index 000000000000..92644fd1ac19 --- /dev/null +++ b/crypto/aes_ti.c @@ -0,0 +1,375 @@ +/* + * Scalar fixed time AES core transform + * + * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <crypto/aes.h> +#include <linux/crypto.h> +#include <linux/module.h> +#include <asm/unaligned.h> + +/* + * Emit the sbox as volatile const to prevent the compiler from doing + * constant folding on sbox references involving fixed indexes. + */ +static volatile const u8 __cacheline_aligned __aesti_sbox[] = { + 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, + 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, + 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, + 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, + 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, + 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, + 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, + 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, + 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, + 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, + 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, + 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, + 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, + 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, + 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, + 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, + 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, + 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, + 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, + 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, + 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, + 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, + 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, + 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, + 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, + 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, + 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, + 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, + 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, + 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, + 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, + 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16, +}; + +static volatile const u8 __cacheline_aligned __aesti_inv_sbox[] = { + 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, + 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, + 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, + 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, + 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, + 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, + 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, + 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, + 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, + 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, + 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, + 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, + 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, + 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, + 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, + 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, + 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, + 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, + 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, + 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, + 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, + 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, + 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, + 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, + 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, + 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, + 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, + 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, + 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, + 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, + 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d, +}; + +static u32 mul_by_x(u32 w) +{ + u32 x = w & 0x7f7f7f7f; + u32 y = w & 0x80808080; + + /* multiply by polynomial 'x' (0b10) in GF(2^8) */ + return (x << 1) ^ (y >> 7) * 0x1b; +} + +static u32 mul_by_x2(u32 w) +{ + u32 x = w & 0x3f3f3f3f; + u32 y = w & 0x80808080; + u32 z = w & 0x40404040; + + /* multiply by polynomial 'x^2' (0b100) in GF(2^8) */ + return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b; +} + +static u32 mix_columns(u32 x) +{ + /* + * Perform the following matrix multiplication in GF(2^8) + * + * | 0x2 0x3 0x1 0x1 | | x[0] | + * | 0x1 0x2 0x3 0x1 | | x[1] | + * | 0x1 0x1 0x2 0x3 | x | x[2] | + * | 0x3 0x1 0x1 0x3 | | x[3] | + */ + u32 y = mul_by_x(x) ^ ror32(x, 16); + + return y ^ ror32(x ^ y, 8); +} + +static u32 inv_mix_columns(u32 x) +{ + /* + * Perform the following matrix multiplication in GF(2^8) + * + * | 0xe 0xb 0xd 0x9 | | x[0] | + * | 0x9 0xe 0xb 0xd | | x[1] | + * | 0xd 0x9 0xe 0xb | x | x[2] | + * | 0xb 0xd 0x9 0xe | | x[3] | + * + * which can conveniently be reduced to + * + * | 0x2 0x3 0x1 0x1 | | 0x5 0x0 0x4 0x0 | | x[0] | + * | 0x1 0x2 0x3 0x1 | | 0x0 0x5 0x0 0x4 | | x[1] | + * | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] | + * | 0x3 0x1 0x1 0x2 | | 0x0 0x4 0x0 0x5 | | x[3] | + */ + u32 y = mul_by_x2(x); + + return mix_columns(x ^ y ^ ror32(y, 16)); +} + +static __always_inline u32 subshift(u32 in[], int pos) +{ + return (__aesti_sbox[in[pos] & 0xff]) ^ + (__aesti_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^ + (__aesti_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^ + (__aesti_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24); +} + +static __always_inline u32 inv_subshift(u32 in[], int pos) +{ + return (__aesti_inv_sbox[in[pos] & 0xff]) ^ + (__aesti_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^ + (__aesti_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^ + (__aesti_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24); +} + +static u32 subw(u32 in) +{ + return (__aesti_sbox[in & 0xff]) ^ + (__aesti_sbox[(in >> 8) & 0xff] << 8) ^ + (__aesti_sbox[(in >> 16) & 0xff] << 16) ^ + (__aesti_sbox[(in >> 24) & 0xff] << 24); +} + +static int aesti_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len) +{ + u32 kwords = key_len / sizeof(u32); + u32 rc, i, j; + + if (key_len != AES_KEYSIZE_128 && + key_len != AES_KEYSIZE_192 && + key_len != AES_KEYSIZE_256) + return -EINVAL; + + ctx->key_length = key_len; + + for (i = 0; i < kwords; i++) + ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32)); + + for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) { + u32 *rki = ctx->key_enc + (i * kwords); + u32 *rko = rki + kwords; + + rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0]; + rko[1] = rko[0] ^ rki[1]; + rko[2] = rko[1] ^ rki[2]; + rko[3] = rko[2] ^ rki[3]; + + if (key_len == 24) { + if (i >= 7) + break; + rko[4] = rko[3] ^ rki[4]; + rko[5] = rko[4] ^ rki[5]; + } else if (key_len == 32) { + if (i >= 6) + break; + rko[4] = subw(rko[3]) ^ rki[4]; + rko[5] = rko[4] ^ rki[5]; + rko[6] = rko[5] ^ rki[6]; + rko[7] = rko[6] ^ rki[7]; + } + } + + /* + * Generate the decryption keys for the Equivalent Inverse Cipher. + * This involves reversing the order of the round keys, and applying + * the Inverse Mix Columns transformation to all but the first and + * the last one. + */ + ctx->key_dec[0] = ctx->key_enc[key_len + 24]; + ctx->key_dec[1] = ctx->key_enc[key_len + 25]; + ctx->key_dec[2] = ctx->key_enc[key_len + 26]; + ctx->key_dec[3] = ctx->key_enc[key_len + 27]; + + for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) { + ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]); + ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]); + ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]); + ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]); + } + + ctx->key_dec[i] = ctx->key_enc[0]; + ctx->key_dec[i + 1] = ctx->key_enc[1]; + ctx->key_dec[i + 2] = ctx->key_enc[2]; + ctx->key_dec[i + 3] = ctx->key_enc[3]; + + return 0; +} + +static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); + int err; + + err = aesti_expand_key(ctx, in_key, key_len); + if (err) + return err; + + /* + * In order to force the compiler to emit data independent Sbox lookups + * at the start of each block, xor the first round key with values at + * fixed indexes in the Sbox. This will need to be repeated each time + * the key is used, which will pull the entire Sbox into the D-cache + * before any data dependent Sbox lookups are performed. + */ + ctx->key_enc[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; + ctx->key_enc[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; + ctx->key_enc[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; + ctx->key_enc[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224]; + + ctx->key_dec[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; + ctx->key_dec[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; + ctx->key_dec[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; + ctx->key_dec[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224]; + + return 0; +} + +static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); + const u32 *rkp = ctx->key_enc + 4; + int rounds = 6 + ctx->key_length / 4; + u32 st0[4], st1[4]; + int round; + + st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in); + st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4); + st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8); + st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12); + + st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; + st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; + st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; + st0[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224]; + + for (round = 0;; round += 2, rkp += 8) { + st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0]; + st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1]; + st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2]; + st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3]; + + if (round == rounds - 2) + break; + + st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4]; + st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5]; + st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6]; + st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7]; + } + + put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out); + put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4); + put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8); + put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12); +} + +static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); + const u32 *rkp = ctx->key_dec + 4; + int rounds = 6 + ctx->key_length / 4; + u32 st0[4], st1[4]; + int round; + + st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in); + st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4); + st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8); + st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12); + + st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; + st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; + st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; + st0[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224]; + + for (round = 0;; round += 2, rkp += 8) { + st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0]; + st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1]; + st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2]; + st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3]; + + if (round == rounds - 2) + break; + + st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4]; + st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5]; + st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6]; + st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7]; + } + + put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out); + put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4); + put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8); + put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-fixed-time", + .cra_priority = 100 + 1, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_module = THIS_MODULE, + + .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE, + .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE, + .cra_cipher.cia_setkey = aesti_set_key, + .cra_cipher.cia_encrypt = aesti_encrypt, + .cra_cipher.cia_decrypt = aesti_decrypt +}; + +static int __init aes_init(void) +{ + return crypto_register_alg(&aes_alg); +} + +static void __exit aes_fini(void) +{ + crypto_unregister_alg(&aes_alg); +} + +module_init(aes_init); +module_exit(aes_fini); + +MODULE_DESCRIPTION("Generic fixed time AES"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/crypto/ahash.c b/crypto/ahash.c index 2ce8bcb9049c..e58c4970c22b 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -23,6 +23,7 @@ #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/cryptouser.h> +#include <linux/compiler.h> #include <net/netlink.h> #include "internal.h" @@ -493,7 +494,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : ahash\n"); diff --git a/crypto/akcipher.c b/crypto/akcipher.c index def301ed1288..cfbdb06d8ca8 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/crypto.h> +#include <linux/compiler.h> #include <crypto/algapi.h> #include <linux/cryptouser.h> #include <net/netlink.h> @@ -47,7 +48,7 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg) { diff --git a/crypto/algapi.c b/crypto/algapi.c index df939b54b09f..6b52e8f0b95f 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg) struct crypto_larval *larval; int err; + alg->cra_flags &= ~CRYPTO_ALG_DEAD; err = crypto_check_alg(alg); if (err) return err; @@ -961,34 +962,66 @@ void crypto_inc(u8 *a, unsigned int size) __be32 *b = (__be32 *)(a + size); u32 c; - for (; size >= 4; size -= 4) { - c = be32_to_cpu(*--b) + 1; - *b = cpu_to_be32(c); - if (c) - return; - } + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !((unsigned long)b & (__alignof__(*b) - 1))) + for (; size >= 4; size -= 4) { + c = be32_to_cpu(*--b) + 1; + *b = cpu_to_be32(c); + if (c) + return; + } crypto_inc_byte(a, size); } EXPORT_SYMBOL_GPL(crypto_inc); -static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size) +void __crypto_xor(u8 *dst, const u8 *src, unsigned int len) { - for (; size; size--) - *a++ ^= *b++; -} + int relalign = 0; + + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { + int size = sizeof(unsigned long); + int d = ((unsigned long)dst ^ (unsigned long)src) & (size - 1); + + relalign = d ? 1 << __ffs(d) : size; + + /* + * If we care about alignment, process as many bytes as + * needed to advance dst and src to values whose alignments + * equal their relative alignment. This will allow us to + * process the remainder of the input using optimal strides. + */ + while (((unsigned long)dst & (relalign - 1)) && len > 0) { + *dst++ ^= *src++; + len--; + } + } -void crypto_xor(u8 *dst, const u8 *src, unsigned int size) -{ - u32 *a = (u32 *)dst; - u32 *b = (u32 *)src; + while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { + *(u64 *)dst ^= *(u64 *)src; + dst += 8; + src += 8; + len -= 8; + } - for (; size >= 4; size -= 4) - *a++ ^= *b++; + while (len >= 4 && !(relalign & 3)) { + *(u32 *)dst ^= *(u32 *)src; + dst += 4; + src += 4; + len -= 4; + } + + while (len >= 2 && !(relalign & 1)) { + *(u16 *)dst ^= *(u16 *)src; + dst += 2; + src += 2; + len -= 2; + } - crypto_xor_byte((u8 *)a, (u8 *)b, size); + while (len--) + *dst++ ^= *src++; } -EXPORT_SYMBOL_GPL(crypto_xor); +EXPORT_SYMBOL_GPL(__crypto_xor); unsigned int crypto_alg_extsize(struct crypto_alg *alg) { diff --git a/crypto/algboss.c b/crypto/algboss.c index 6e39d9c05b98..960d8548171b 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -19,7 +19,7 @@ #include <linux/module.h> #include <linux/notifier.h> #include <linux/rtnetlink.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> @@ -247,12 +247,8 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) memcpy(param->alg, alg->cra_name, sizeof(param->alg)); type = alg->cra_flags; - /* This piece of crap needs to disappear into per-type test hooks. */ - if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & - CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) && - ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : - alg->cra_ablkcipher.ivsize)) + /* Do not test internal algorithms. */ + if (type & CRYPTO_ALG_INTERNAL) type |= CRYPTO_ALG_TESTED; param->type = type; diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index e9c0993b131d..5a8053758657 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> @@ -136,28 +137,27 @@ static void aead_wmem_wakeup(struct sock *sk) static int aead_wait_for_data(struct sock *sk, unsigned flags) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); struct aead_ctx *ctx = ask->private; long timeout; - DEFINE_WAIT(wait); int err = -ERESTARTSYS; if (flags & MSG_DONTWAIT) return -EAGAIN; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - + add_wait_queue(sk_sleep(sk), &wait); for (;;) { if (signal_pending(current)) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, !ctx->more)) { + if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { err = 0; break; } } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); @@ -455,12 +455,13 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, used -= ctx->aead_assoclen; /* take over all tx sgls from ctx */ - areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, + areq->tsgl = sock_kmalloc(sk, + sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), GFP_KERNEL); if (unlikely(!areq->tsgl)) goto free; - sg_init_table(areq->tsgl, sgl->cur); + sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); for (i = 0; i < sgl->cur; i++) sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), sgl->sg[i].length, sgl->sg[i].offset); @@ -556,18 +557,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) lock_sock(sk); /* - * AEAD memory structure: For encryption, the tag is appended to the - * ciphertext which implies that the memory allocated for the ciphertext - * must be increased by the tag length. For decryption, the tag - * is expected to be concatenated to the ciphertext. The plaintext - * therefore has a memory size of the ciphertext minus the tag length. - * - * The memory structure for cipher operation has the following - * structure: - * AEAD encryption input: assoc data || plaintext - * AEAD encryption output: cipherntext || auth tag - * AEAD decryption input: assoc data || ciphertext || auth tag - * AEAD decryption output: plaintext + * Please see documentation of aead_request_set_crypt for the + * description of the AEAD memory structure expected from the caller. */ if (ctx->more) { @@ -671,9 +662,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) unlock: list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { af_alg_free_sg(&rsgl->sgl); + list_del(&rsgl->list); if (rsgl != &ctx->first_rsgl) sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - list_del(&rsgl->list); } INIT_LIST_HEAD(&ctx->list); aead_wmem_wakeup(sk); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index d19b09cdf284..54fc90e8339c 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -245,7 +245,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags) struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; struct ahash_request *req = &ctx->req; - char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))]; + char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1]; struct sock *sk2; struct alg_sock *ask2; struct hash_ctx *ctx2; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 28556fce4267..43839b00fe6c 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> @@ -199,26 +200,26 @@ static void skcipher_free_sgl(struct sock *sk) static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) { - long timeout; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int err = -ERESTARTSYS; + long timeout; if (flags & MSG_DONTWAIT) return -EAGAIN; sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + add_wait_queue(sk_sleep(sk), &wait); for (;;) { if (signal_pending(current)) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { + if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) { err = 0; break; } } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); return err; } @@ -242,10 +243,10 @@ static void skcipher_wmem_wakeup(struct sock *sk) static int skcipher_wait_for_data(struct sock *sk, unsigned flags) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; long timeout; - DEFINE_WAIT(wait); int err = -ERESTARTSYS; if (flags & MSG_DONTWAIT) { @@ -254,17 +255,17 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + add_wait_queue(sk_sleep(sk), &wait); for (;;) { if (signal_pending(current)) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, ctx->used)) { + if (sk_wait_event(sk, &timeout, ctx->used, &wait)) { err = 0; break; } } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); @@ -566,8 +567,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, * need to expand */ tmp = kcalloc(tx_nents * 2, sizeof(*tmp), GFP_KERNEL); - if (!tmp) + if (!tmp) { + err = -ENOMEM; goto free; + } sg_init_table(tmp, tx_nents * 2); for (x = 0; x < tx_nents; x++) diff --git a/crypto/api.c b/crypto/api.c index bbc147cb5dec..941cd4c6c7ec 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -21,7 +21,7 @@ #include <linux/kmod.h> #include <linux/module.h> #include <linux/param.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> #include "internal.h" @@ -211,8 +211,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) if (!name) return ERR_PTR(-ENOENT); + type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); - type &= mask; alg = crypto_alg_lookup(name, type, mask); if (!alg) { @@ -310,24 +310,8 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) { const struct crypto_type *type = tfm->__crt_alg->cra_type; - if (type) { - if (tfm->exit) - tfm->exit(tfm); - return; - } - - switch (crypto_tfm_alg_type(tfm)) { - case CRYPTO_ALG_TYPE_CIPHER: - crypto_exit_cipher_ops(tfm); - break; - - case CRYPTO_ALG_TYPE_COMPRESS: - crypto_exit_compress_ops(tfm); - break; - - default: - BUG(); - } + if (type && tfm->exit) + tfm->exit(tfm); } static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index fd76b5fc3b3a..d3a989e718f5 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -121,6 +121,7 @@ int public_key_verify_signature(const struct public_key *pkey, if (ret) goto error_free_req; + ret = -ENOMEM; outlen = crypto_akcipher_maxsize(tfm); output = kmalloc(outlen, GFP_KERNEL); if (!output) diff --git a/crypto/authenc.c b/crypto/authenc.c index a7e1ac786c5d..875470b0e026 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -324,7 +324,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) if (IS_ERR(auth)) return PTR_ERR(auth); - enc = crypto_spawn_skcipher2(&ictx->enc); + enc = crypto_spawn_skcipher(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) goto err_free_ahash; @@ -420,9 +420,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, goto err_free_inst; crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); - err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_auth; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 121010ac9962..6f8f6b86bfe2 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -342,7 +342,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) if (IS_ERR(auth)) return PTR_ERR(auth); - enc = crypto_spawn_skcipher2(&ictx->enc); + enc = crypto_spawn_skcipher(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) goto err_free_ahash; @@ -441,9 +441,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, goto err_free_inst; crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); - err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_auth; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index a832426820e8..6c43a0a17a55 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -1,6 +1,6 @@ /* * Block chaining cipher operations. - * + * * Generic encrypt/decrypt wrapper for ciphers, handles operations across * multiple page boundaries by using temporary blocks. In user context, * the kernel is given a chance to schedule us once per page. @@ -9,7 +9,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/cryptouser.h> +#include <linux/compiler.h> #include <net/netlink.h> #include "internal.h" @@ -534,7 +535,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : blkcipher\n"); diff --git a/crypto/cbc.c b/crypto/cbc.c index 780ee27b2d43..bc160a3186dc 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -1,7 +1,7 @@ /* * CBC: Cipher Block Chaining mode * - * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -10,191 +10,78 @@ * */ -#include <crypto/algapi.h> +#include <crypto/cbc.h> +#include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> -#include <linux/scatterlist.h> #include <linux/slab.h> struct crypto_cbc_ctx { struct crypto_cipher *child; }; -static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, +static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { - struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_cipher *child = ctx->child; int err; crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & + crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); + crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); return err; } -static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, - struct crypto_cipher *tfm) +static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm, + const u8 *src, u8 *dst) { - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = - crypto_cipher_alg(tfm)->cia_encrypt; - int bsize = crypto_cipher_blocksize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - - do { - crypto_xor(iv, src, bsize); - fn(crypto_cipher_tfm(tfm), dst, iv); - memcpy(iv, dst, bsize); - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - return nbytes; -} - -static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, - struct crypto_cipher *tfm) -{ - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = - crypto_cipher_alg(tfm)->cia_encrypt; - int bsize = crypto_cipher_blocksize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; - - do { - crypto_xor(src, iv, bsize); - fn(crypto_cipher_tfm(tfm), src, src); - iv = src; - - src += bsize; - } while ((nbytes -= bsize) >= bsize); - - memcpy(walk->iv, iv, bsize); - - return nbytes; -} - -static int crypto_cbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - int err; + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - - while ((nbytes = walk.nbytes)) { - if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); - else - nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - - return err; + crypto_cipher_encrypt_one(ctx->child, dst, src); } -static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, - struct crypto_cipher *tfm) +static int crypto_cbc_encrypt(struct skcipher_request *req) { - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = - crypto_cipher_alg(tfm)->cia_decrypt; - int bsize = crypto_cipher_blocksize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - - do { - fn(crypto_cipher_tfm(tfm), dst, src); - crypto_xor(dst, iv, bsize); - iv = src; - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - memcpy(walk->iv, iv, bsize); - - return nbytes; + return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one); } -static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, - struct crypto_cipher *tfm) +static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm, + const u8 *src, u8 *dst) { - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = - crypto_cipher_alg(tfm)->cia_decrypt; - int bsize = crypto_cipher_blocksize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 last_iv[bsize]; - - /* Start of the last block. */ - src += nbytes - (nbytes & (bsize - 1)) - bsize; - memcpy(last_iv, src, bsize); - - for (;;) { - fn(crypto_cipher_tfm(tfm), src, src); - if ((nbytes -= bsize) < bsize) - break; - crypto_xor(src, src - bsize, bsize); - src -= bsize; - } - - crypto_xor(src, walk->iv, bsize); - memcpy(walk->iv, last_iv, bsize); + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - return nbytes; + crypto_cipher_decrypt_one(ctx->child, dst, src); } -static int crypto_cbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_cbc_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_walk walk; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { - if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child); - else - nbytes = crypto_cbc_decrypt_segment(desc, &walk, child); - err = blkcipher_walk_done(desc, &walk, nbytes); + while (walk.nbytes) { + err = crypto_cbc_decrypt_blocks(&walk, tfm, + crypto_cbc_decrypt_one); + err = skcipher_walk_done(&walk, err); } return err; } -static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) +static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm) { - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_spawn *spawn = skcipher_instance_ctx(inst); + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_cipher *cipher; cipher = crypto_spawn_cipher(spawn); @@ -205,72 +92,91 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) return 0; } -static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) +static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm) { - struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) +static void crypto_cbc_free(struct skcipher_instance *inst) { - struct crypto_instance *inst; + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); +} + +static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct skcipher_instance *inst; + struct crypto_spawn *spawn; struct crypto_alg *alg; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); if (err) - return ERR_PTR(err); + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); + err = PTR_ERR(alg); if (IS_ERR(alg)) - return ERR_CAST(alg); + goto err_free_inst; - inst = ERR_PTR(-EINVAL); + spawn = skcipher_instance_ctx(inst); + err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + crypto_mod_put(alg); + if (err) + goto err_free_inst; + + err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); + if (err) + goto err_drop_spawn; + + err = -EINVAL; if (!is_power_of_2(alg->cra_blocksize)) - goto out_put_alg; + goto err_drop_spawn; - inst = crypto_alloc_instance("cbc", alg); - if (IS_ERR(inst)) - goto out_put_alg; + inst->alg.base.cra_priority = alg->cra_priority; + inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; + inst->alg.ivsize = alg->cra_blocksize; + inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; + inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - /* We access the data as u32s when xoring. */ - inst->alg.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx); - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + inst->alg.init = crypto_cbc_init_tfm; + inst->alg.exit = crypto_cbc_exit_tfm; - inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); + inst->alg.setkey = crypto_cbc_setkey; + inst->alg.encrypt = crypto_cbc_encrypt; + inst->alg.decrypt = crypto_cbc_decrypt; - inst->alg.cra_init = crypto_cbc_init_tfm; - inst->alg.cra_exit = crypto_cbc_exit_tfm; + inst->free = crypto_cbc_free; - inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; - inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; + err = skcipher_register_instance(tmpl, inst); + if (err) + goto err_drop_spawn; -out_put_alg: - crypto_mod_put(alg); - return inst; -} +out: + return err; -static void crypto_cbc_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); +err_drop_spawn: + crypto_drop_spawn(spawn); +err_free_inst: kfree(inst); + goto out; } static struct crypto_template crypto_cbc_tmpl = { .name = "cbc", - .alloc = crypto_cbc_alloc, - .free = crypto_cbc_free, + .create = crypto_cbc_create, .module = THIS_MODULE, }; diff --git a/crypto/ccm.c b/crypto/ccm.c index 006d8575ef5c..1ce37ae0ce56 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -11,6 +11,7 @@ */ #include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/err.h> @@ -23,11 +24,11 @@ struct ccm_instance_ctx { struct crypto_skcipher_spawn ctr; - struct crypto_spawn cipher; + struct crypto_ahash_spawn mac; }; struct crypto_ccm_ctx { - struct crypto_cipher *cipher; + struct crypto_ahash *mac; struct crypto_skcipher *ctr; }; @@ -46,13 +47,20 @@ struct crypto_ccm_req_priv_ctx { u8 odata[16]; u8 idata[16]; u8 auth_tag[16]; - u32 ilen; u32 flags; struct scatterlist src[3]; struct scatterlist dst[3]; struct skcipher_request skreq; }; +struct cbcmac_tfm_ctx { + struct crypto_cipher *child; +}; + +struct cbcmac_desc_ctx { + unsigned int len; +}; + static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( struct aead_request *req) { @@ -84,7 +92,7 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, { struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_skcipher *ctr = ctx->ctr; - struct crypto_cipher *tfm = ctx->cipher; + struct crypto_ahash *mac = ctx->mac; int err = 0; crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); @@ -96,11 +104,11 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, if (err) goto out; - crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) & + crypto_ahash_clear_flags(mac, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(mac, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(tfm, key, keylen); - crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) & + err = crypto_ahash_setkey(mac, key, keylen); + crypto_aead_set_flags(aead, crypto_ahash_get_flags(mac) & CRYPTO_TFM_RES_MASK); out: @@ -167,119 +175,61 @@ static int format_adata(u8 *adata, unsigned int a) return len; } -static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n, - struct crypto_ccm_req_priv_ctx *pctx) -{ - unsigned int bs = 16; - u8 *odata = pctx->odata; - u8 *idata = pctx->idata; - int datalen, getlen; - - datalen = n; - - /* first time in here, block may be partially filled. */ - getlen = bs - pctx->ilen; - if (datalen >= getlen) { - memcpy(idata + pctx->ilen, data, getlen); - crypto_xor(odata, idata, bs); - crypto_cipher_encrypt_one(tfm, odata, odata); - datalen -= getlen; - data += getlen; - pctx->ilen = 0; - } - - /* now encrypt rest of data */ - while (datalen >= bs) { - crypto_xor(odata, data, bs); - crypto_cipher_encrypt_one(tfm, odata, odata); - - datalen -= bs; - data += bs; - } - - /* check and see if there's leftover data that wasn't - * enough to fill a block. - */ - if (datalen) { - memcpy(idata + pctx->ilen, data, datalen); - pctx->ilen += datalen; - } -} - -static void get_data_to_compute(struct crypto_cipher *tfm, - struct crypto_ccm_req_priv_ctx *pctx, - struct scatterlist *sg, unsigned int len) -{ - struct scatter_walk walk; - u8 *data_src; - int n; - - scatterwalk_start(&walk, sg); - - while (len) { - n = scatterwalk_clamp(&walk, len); - if (!n) { - scatterwalk_start(&walk, sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, len); - } - data_src = scatterwalk_map(&walk); - - compute_mac(tfm, data_src, n, pctx); - len -= n; - - scatterwalk_unmap(data_src); - scatterwalk_advance(&walk, n); - scatterwalk_done(&walk, 0, len); - if (len) - crypto_yield(pctx->flags); - } - - /* any leftover needs padding and then encrypted */ - if (pctx->ilen) { - int padlen; - u8 *odata = pctx->odata; - u8 *idata = pctx->idata; - - padlen = 16 - pctx->ilen; - memset(idata + pctx->ilen, 0, padlen); - crypto_xor(odata, idata, 16); - crypto_cipher_encrypt_one(tfm, odata, odata); - pctx->ilen = 0; - } -} - static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, unsigned int cryptlen) { + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); - struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); - struct crypto_cipher *cipher = ctx->cipher; + AHASH_REQUEST_ON_STACK(ahreq, ctx->mac); unsigned int assoclen = req->assoclen; + struct scatterlist sg[3]; u8 *odata = pctx->odata; u8 *idata = pctx->idata; - int err; + int ilen, err; /* format control data for input */ err = format_input(odata, req, cryptlen); if (err) goto out; - /* encrypt first block to use as start in computing mac */ - crypto_cipher_encrypt_one(cipher, odata, odata); + sg_init_table(sg, 3); + sg_set_buf(&sg[0], odata, 16); /* format associated data and compute into mac */ if (assoclen) { - pctx->ilen = format_adata(idata, assoclen); - get_data_to_compute(cipher, pctx, req->src, req->assoclen); + ilen = format_adata(idata, assoclen); + sg_set_buf(&sg[1], idata, ilen); + sg_chain(sg, 3, req->src); } else { - pctx->ilen = 0; + ilen = 0; + sg_chain(sg, 2, req->src); } - /* compute plaintext into mac */ - if (cryptlen) - get_data_to_compute(cipher, pctx, plain, cryptlen); + ahash_request_set_tfm(ahreq, ctx->mac); + ahash_request_set_callback(ahreq, pctx->flags, NULL, NULL); + ahash_request_set_crypt(ahreq, sg, NULL, assoclen + ilen + 16); + err = crypto_ahash_init(ahreq); + if (err) + goto out; + err = crypto_ahash_update(ahreq); + if (err) + goto out; + + /* we need to pad the MAC input to a round multiple of the block size */ + ilen = 16 - (assoclen + ilen) % 16; + if (ilen < 16) { + memset(idata, 0, ilen); + sg_init_table(sg, 2); + sg_set_buf(&sg[0], idata, ilen); + if (plain) + sg_chain(sg, 2, plain); + plain = sg; + cryptlen += ilen; + } + ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen); + err = crypto_ahash_finup(ahreq); out: return err; } @@ -453,21 +403,21 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm) struct aead_instance *inst = aead_alg_instance(tfm); struct ccm_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); - struct crypto_cipher *cipher; + struct crypto_ahash *mac; struct crypto_skcipher *ctr; unsigned long align; int err; - cipher = crypto_spawn_cipher(&ictx->cipher); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); + mac = crypto_spawn_ahash(&ictx->mac); + if (IS_ERR(mac)) + return PTR_ERR(mac); - ctr = crypto_spawn_skcipher2(&ictx->ctr); + ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) - goto err_free_cipher; + goto err_free_mac; - ctx->cipher = cipher; + ctx->mac = mac; ctx->ctr = ctr; align = crypto_aead_alignmask(tfm); @@ -479,8 +429,8 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm) return 0; -err_free_cipher: - crypto_free_cipher(cipher); +err_free_mac: + crypto_free_ahash(mac); return err; } @@ -488,7 +438,7 @@ static void crypto_ccm_exit_tfm(struct crypto_aead *tfm) { struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); - crypto_free_cipher(ctx->cipher); + crypto_free_ahash(ctx->mac); crypto_free_skcipher(ctx->ctr); } @@ -496,7 +446,7 @@ static void crypto_ccm_free(struct aead_instance *inst) { struct ccm_instance_ctx *ctx = aead_instance_ctx(inst); - crypto_drop_spawn(&ctx->cipher); + crypto_drop_ahash(&ctx->mac); crypto_drop_skcipher(&ctx->ctr); kfree(inst); } @@ -505,12 +455,13 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, struct rtattr **tb, const char *full_name, const char *ctr_name, - const char *cipher_name) + const char *mac_name) { struct crypto_attr_type *algt; struct aead_instance *inst; struct skcipher_alg *ctr; - struct crypto_alg *cipher; + struct crypto_alg *mac_alg; + struct hash_alg_common *mac; struct ccm_instance_ctx *ictx; int err; @@ -521,34 +472,35 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; - cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); + mac_alg = crypto_find_alg(mac_name, &crypto_ahash_type, + CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_AHASH_MASK | + CRYPTO_ALG_ASYNC); + if (IS_ERR(mac_alg)) + return PTR_ERR(mac_alg); + mac = __crypto_hash_alg_common(mac_alg); err = -EINVAL; - if (cipher->cra_blocksize != 16) - goto out_put_cipher; + if (mac->digestsize != 16) + goto out_put_mac; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); err = -ENOMEM; if (!inst) - goto out_put_cipher; + goto out_put_mac; ictx = aead_instance_ctx(inst); - - err = crypto_init_spawn(&ictx->cipher, cipher, - aead_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); + err = crypto_init_ahash_spawn(&ictx->mac, mac, + aead_crypto_instance(inst)); if (err) goto err_free_inst; crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); - err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) - goto err_drop_cipher; + goto err_drop_mac; ctr = crypto_spawn_skcipher_alg(&ictx->ctr); @@ -564,18 +516,17 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", ctr->base.cra_driver_name, - cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_ctr; memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.base.cra_priority = (cipher->cra_priority + + inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = cipher->cra_alignmask | - ctr->base.cra_alignmask | - (__alignof__(u32) - 1); + inst->alg.base.cra_alignmask = mac->base.cra_alignmask | + ctr->base.cra_alignmask; inst->alg.ivsize = 16; inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); inst->alg.maxauthsize = 16; @@ -593,23 +544,24 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, if (err) goto err_drop_ctr; -out_put_cipher: - crypto_mod_put(cipher); +out_put_mac: + crypto_mod_put(mac_alg); return err; err_drop_ctr: crypto_drop_skcipher(&ictx->ctr); -err_drop_cipher: - crypto_drop_spawn(&ictx->cipher); +err_drop_mac: + crypto_drop_ahash(&ictx->mac); err_free_inst: kfree(inst); - goto out_put_cipher; + goto out_put_mac; } static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; + char mac_name[CRYPTO_MAX_ALG_NAME]; char full_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); @@ -620,12 +572,16 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) cipher_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; + if (snprintf(mac_name, CRYPTO_MAX_ALG_NAME, "cbcmac(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, - cipher_name); + mac_name); } static struct crypto_template crypto_ccm_tmpl = { @@ -899,14 +855,164 @@ static struct crypto_template crypto_rfc4309_tmpl = { .module = THIS_MODULE, }; +static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent, + const u8 *inkey, unsigned int keylen) +{ + struct cbcmac_tfm_ctx *ctx = crypto_shash_ctx(parent); + + return crypto_cipher_setkey(ctx->child, inkey, keylen); +} + +static int crypto_cbcmac_digest_init(struct shash_desc *pdesc) +{ + struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); + int bs = crypto_shash_digestsize(pdesc->tfm); + u8 *dg = (u8 *)ctx + crypto_shash_descsize(pdesc->tfm) - bs; + + ctx->len = 0; + memset(dg, 0, bs); + + return 0; +} + +static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p, + unsigned int len) +{ + struct crypto_shash *parent = pdesc->tfm; + struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); + struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); + struct crypto_cipher *tfm = tctx->child; + int bs = crypto_shash_digestsize(parent); + u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; + + while (len > 0) { + unsigned int l = min(len, bs - ctx->len); + + crypto_xor(dg + ctx->len, p, l); + ctx->len +=l; + len -= l; + p += l; + + if (ctx->len == bs) { + crypto_cipher_encrypt_one(tfm, dg, dg); + ctx->len = 0; + } + } + + return 0; +} + +static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out) +{ + struct crypto_shash *parent = pdesc->tfm; + struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); + struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); + struct crypto_cipher *tfm = tctx->child; + int bs = crypto_shash_digestsize(parent); + u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; + + if (ctx->len) + crypto_cipher_encrypt_one(tfm, dg, dg); + + memcpy(out, dg, bs); + return 0; +} + +static int cbcmac_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_cipher *cipher; + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct cbcmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + + return 0; +}; + +static void cbcmac_exit_tfm(struct crypto_tfm *tfm) +{ + struct cbcmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + crypto_free_cipher(ctx->child); +} + +static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct shash_instance *inst; + struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + if (err) + return err; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + inst = shash_alloc_instance("cbcmac", alg); + err = PTR_ERR(inst); + if (IS_ERR(inst)) + goto out_put_alg; + + err = crypto_init_spawn(shash_instance_ctx(inst), alg, + shash_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + if (err) + goto out_free_inst; + + inst->alg.base.cra_priority = alg->cra_priority; + inst->alg.base.cra_blocksize = 1; + + inst->alg.digestsize = alg->cra_blocksize; + inst->alg.descsize = ALIGN(sizeof(struct cbcmac_desc_ctx), + alg->cra_alignmask + 1) + + alg->cra_blocksize; + + inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx); + inst->alg.base.cra_init = cbcmac_init_tfm; + inst->alg.base.cra_exit = cbcmac_exit_tfm; + + inst->alg.init = crypto_cbcmac_digest_init; + inst->alg.update = crypto_cbcmac_digest_update; + inst->alg.final = crypto_cbcmac_digest_final; + inst->alg.setkey = crypto_cbcmac_digest_setkey; + + err = shash_register_instance(tmpl, inst); + +out_free_inst: + if (err) + shash_free_instance(shash_crypto_instance(inst)); + +out_put_alg: + crypto_mod_put(alg); + return err; +} + +static struct crypto_template crypto_cbcmac_tmpl = { + .name = "cbcmac", + .create = cbcmac_create, + .free = shash_free_instance, + .module = THIS_MODULE, +}; + static int __init crypto_ccm_module_init(void) { int err; - err = crypto_register_template(&crypto_ccm_base_tmpl); + err = crypto_register_template(&crypto_cbcmac_tmpl); if (err) goto out; + err = crypto_register_template(&crypto_ccm_base_tmpl); + if (err) + goto out_undo_cbcmac; + err = crypto_register_template(&crypto_ccm_tmpl); if (err) goto out_undo_base; @@ -922,6 +1028,8 @@ out_undo_ccm: crypto_unregister_template(&crypto_ccm_tmpl); out_undo_base: crypto_unregister_template(&crypto_ccm_base_tmpl); +out_undo_cbcmac: + crypto_register_template(&crypto_cbcmac_tmpl); goto out; } @@ -930,6 +1038,7 @@ static void __exit crypto_ccm_module_exit(void) crypto_unregister_template(&crypto_rfc4309_tmpl); crypto_unregister_template(&crypto_ccm_tmpl); crypto_unregister_template(&crypto_ccm_base_tmpl); + crypto_unregister_template(&crypto_cbcmac_tmpl); } module_init(crypto_ccm_module_init); diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index 1cab83146e33..8b3c04d625c3 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c @@ -10,10 +10,9 @@ */ #include <crypto/algapi.h> -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/module.h> #include <crypto/chacha20.h> +#include <crypto/internal/skcipher.h> +#include <linux/module.h> static inline u32 le32_to_cpuvp(const void *p) { @@ -63,10 +62,10 @@ void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) } EXPORT_SYMBOL_GPL(crypto_chacha20_init); -int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, +int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize) { - struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm); + struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm); int i; if (keysize != CHACHA20_KEY_SIZE) @@ -79,66 +78,54 @@ int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_chacha20_setkey); -int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +int crypto_chacha20_crypt(struct skcipher_request *req) { - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; u32 state[16]; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); - - crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); + err = skcipher_walk_virt(&walk, req, true); - while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { - chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, - rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); - err = blkcipher_walk_done(desc, &walk, - walk.nbytes % CHACHA20_BLOCK_SIZE); - } + crypto_chacha20_init(state, ctx, walk.iv); - if (walk.nbytes) { + while (walk.nbytes > 0) { chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); - err = blkcipher_walk_done(desc, &walk, 0); + err = skcipher_walk_done(&walk, 0); } return err; } EXPORT_SYMBOL_GPL(crypto_chacha20_crypt); -static struct crypto_alg alg = { - .cra_name = "chacha20", - .cra_driver_name = "chacha20-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_type = &crypto_blkcipher_type, - .cra_ctxsize = sizeof(struct chacha20_ctx), - .cra_alignmask = sizeof(u32) - 1, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = CHACHA20_KEY_SIZE, - .max_keysize = CHACHA20_KEY_SIZE, - .ivsize = CHACHA20_IV_SIZE, - .geniv = "seqiv", - .setkey = crypto_chacha20_setkey, - .encrypt = crypto_chacha20_crypt, - .decrypt = crypto_chacha20_crypt, - }, - }, +static struct skcipher_alg alg = { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-generic", + .base.cra_priority = 100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha20_ctx), + .base.cra_alignmask = sizeof(u32) - 1, + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA20_KEY_SIZE, + .max_keysize = CHACHA20_KEY_SIZE, + .ivsize = CHACHA20_IV_SIZE, + .chunksize = CHACHA20_BLOCK_SIZE, + .setkey = crypto_chacha20_setkey, + .encrypt = crypto_chacha20_crypt, + .decrypt = crypto_chacha20_crypt, }; static int __init chacha20_generic_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_skcipher(&alg); } static void __exit chacha20_generic_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_skcipher(&alg); } module_init(chacha20_generic_mod_init); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index e899ef51dc8e..db1bc3147bc4 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm) if (IS_ERR(poly)) return PTR_ERR(poly); - chacha = crypto_spawn_skcipher2(&ictx->chacha); + chacha = crypto_spawn_skcipher(&ictx->chacha); if (IS_ERR(chacha)) { crypto_free_ahash(poly); return PTR_ERR(chacha); @@ -625,9 +625,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); - err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_poly; diff --git a/crypto/cipher.c b/crypto/cipher.c index 39541e0e537d..94fa3551476b 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -116,7 +116,3 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm) return 0; } - -void crypto_exit_cipher_ops(struct crypto_tfm *tfm) -{ -} diff --git a/crypto/cmac.c b/crypto/cmac.c index 7a8bfbd548f6..16301f52858c 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -57,7 +57,8 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent, unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); unsigned int bs = crypto_shash_blocksize(parent); - __be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); + __be64 *consts = PTR_ALIGN((void *)ctx->ctx, + (alignmask | (__alignof__(__be64) - 1)) + 1); u64 _const[2]; int i, err = 0; u8 msb_mask, gfmask; @@ -173,7 +174,8 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1); + u8 *consts = PTR_ALIGN((void *)tctx->ctx, + (alignmask | (__alignof__(__be64) - 1)) + 1); u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); u8 *prev = odds + bs; unsigned int offset = 0; @@ -243,6 +245,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) case 8: break; default: + err = -EINVAL; goto out_put_alg; } @@ -257,7 +260,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto out_free_inst; - alignmask = alg->cra_alignmask | (sizeof(long) - 1); + alignmask = alg->cra_alignmask; inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; @@ -269,7 +272,9 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) + alg->cra_blocksize * 2; inst->alg.base.cra_ctxsize = - ALIGN(sizeof(struct cmac_tfm_ctx), alignmask + 1) + ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment()) + + ((alignmask | (__alignof__(__be64) - 1)) & + ~(crypto_tfm_ctx_alignment() - 1)) + alg->cra_blocksize * 2; inst->alg.base.cra_init = cmac_init_tfm; diff --git a/crypto/compress.c b/crypto/compress.c index c33f0763a956..f2d522924a07 100644 --- a/crypto/compress.c +++ b/crypto/compress.c @@ -42,7 +42,3 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm) return 0; } - -void crypto_exit_compress_ops(struct crypto_tfm *tfm) -{ -} diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0c654e59f215..0508c48a45c4 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -17,9 +17,9 @@ * */ -#include <crypto/algapi.h> #include <crypto/internal/hash.h> #include <crypto/internal/aead.h> +#include <crypto/internal/skcipher.h> #include <crypto/cryptd.h> #include <crypto/crypto_wq.h> #include <linux/atomic.h> @@ -48,6 +48,11 @@ struct cryptd_instance_ctx { struct cryptd_queue *queue; }; +struct skcipherd_instance_ctx { + struct crypto_skcipher_spawn spawn; + struct cryptd_queue *queue; +}; + struct hashd_instance_ctx { struct crypto_shash_spawn spawn; struct cryptd_queue *queue; @@ -67,6 +72,15 @@ struct cryptd_blkcipher_request_ctx { crypto_completion_t complete; }; +struct cryptd_skcipher_ctx { + atomic_t refcnt; + struct crypto_skcipher *child; +}; + +struct cryptd_skcipher_request_ctx { + crypto_completion_t complete; +}; + struct cryptd_hash_ctx { atomic_t refcnt; struct crypto_shash *child; @@ -122,7 +136,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, { int cpu, err; struct cryptd_cpu_queue *cpu_queue; - struct crypto_tfm *tfm; atomic_t *refcnt; bool may_backlog; @@ -141,7 +154,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, if (!atomic_read(refcnt)) goto out_put_cpu; - tfm = request->tfm; atomic_inc(refcnt); out_put_cpu: @@ -432,6 +444,216 @@ out_put_alg: return err; } +static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, + const u8 *key, unsigned int keylen) +{ + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); + struct crypto_skcipher *child = ctx->child; + int err; + + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(child, key, keylen); + crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static void cryptd_skcipher_complete(struct skcipher_request *req, int err) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); + int refcnt = atomic_read(&ctx->refcnt); + + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); + + if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + crypto_free_skcipher(tfm); +} + +static void cryptd_skcipher_encrypt(struct crypto_async_request *base, + int err) +{ + struct skcipher_request *req = skcipher_request_cast(base); + struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *child = ctx->child; + SKCIPHER_REQUEST_ON_STACK(subreq, child); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + skcipher_request_set_tfm(subreq, child); + skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + + err = crypto_skcipher_encrypt(subreq); + skcipher_request_zero(subreq); + + req->base.complete = rctx->complete; + +out: + cryptd_skcipher_complete(req, err); +} + +static void cryptd_skcipher_decrypt(struct crypto_async_request *base, + int err) +{ + struct skcipher_request *req = skcipher_request_cast(base); + struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *child = ctx->child; + SKCIPHER_REQUEST_ON_STACK(subreq, child); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + skcipher_request_set_tfm(subreq, child); + skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + + err = crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); + + req->base.complete = rctx->complete; + +out: + cryptd_skcipher_complete(req, err); +} + +static int cryptd_skcipher_enqueue(struct skcipher_request *req, + crypto_completion_t compl) +{ + struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct cryptd_queue *queue; + + queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); + rctx->complete = req->base.complete; + req->base.complete = compl; + + return cryptd_enqueue_request(queue, &req->base); +} + +static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) +{ + return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); +} + +static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) +{ + return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); +} + +static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) +{ + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); + struct crypto_skcipher_spawn *spawn = &ictx->spawn; + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *cipher; + + cipher = crypto_spawn_skcipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + crypto_skcipher_set_reqsize( + tfm, sizeof(struct cryptd_skcipher_request_ctx)); + return 0; +} + +static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) +{ + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_skcipher(ctx->child); +} + +static void cryptd_skcipher_free(struct skcipher_instance *inst) +{ + struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); + + crypto_drop_skcipher(&ctx->spawn); +} + +static int cryptd_create_skcipher(struct crypto_template *tmpl, + struct rtattr **tb, + struct cryptd_queue *queue) +{ + struct skcipherd_instance_ctx *ctx; + struct skcipher_instance *inst; + struct skcipher_alg *alg; + const char *name; + u32 type; + u32 mask; + int err; + + type = 0; + mask = CRYPTO_ALG_ASYNC; + + cryptd_check_internal(tb, &type, &mask); + + name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(name)) + return PTR_ERR(name); + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + ctx = skcipher_instance_ctx(inst); + ctx->queue = queue; + + crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); + err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); + if (err) + goto out_free_inst; + + alg = crypto_spawn_skcipher_alg(&ctx->spawn); + err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); + if (err) + goto out_drop_skcipher; + + inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + + inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); + inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); + inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); + + inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); + + inst->alg.init = cryptd_skcipher_init_tfm; + inst->alg.exit = cryptd_skcipher_exit_tfm; + + inst->alg.setkey = cryptd_skcipher_setkey; + inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; + inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; + + inst->free = cryptd_skcipher_free; + + err = skcipher_register_instance(tmpl, inst); + if (err) { +out_drop_skcipher: + crypto_drop_skcipher(&ctx->spawn); +out_free_inst: + kfree(inst); + } + return err; +} + static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); @@ -895,7 +1117,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_BLKCIPHER: - return cryptd_create_blkcipher(tmpl, tb, &queue); + if ((algt->type & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return cryptd_create_blkcipher(tmpl, tb, &queue); + + return cryptd_create_skcipher(tmpl, tb, &queue); case CRYPTO_ALG_TYPE_DIGEST: return cryptd_create_hash(tmpl, tb, &queue); case CRYPTO_ALG_TYPE_AEAD: @@ -985,6 +1211,58 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) } EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); +struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, + u32 type, u32 mask) +{ + char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; + struct cryptd_skcipher_ctx *ctx; + struct crypto_skcipher *tfm; + + if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, + "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-EINVAL); + + tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { + crypto_free_skcipher(tfm); + return ERR_PTR(-EINVAL); + } + + ctx = crypto_skcipher_ctx(tfm); + atomic_set(&ctx->refcnt, 1); + + return container_of(tfm, struct cryptd_skcipher, base); +} +EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); + +struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) +{ + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); + + return ctx->child; +} +EXPORT_SYMBOL_GPL(cryptd_skcipher_child); + +bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) +{ + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); + + return atomic_read(&ctx->refcnt) - 1; +} +EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); + +void cryptd_free_skcipher(struct cryptd_skcipher *tfm) +{ + struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); + + if (atomic_dec_and_test(&ctx->refcnt)) + crypto_free_skcipher(&tfm->base); +} +EXPORT_SYMBOL_GPL(cryptd_free_skcipher); + struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask) { diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 6989ba0046df..727bd5c3569e 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <crypto/engine.h> #include <crypto/internal/hash.h> +#include <uapi/linux/sched/types.h> #include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 @@ -47,7 +48,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, /* If another context is idling then defer */ if (engine->idling) { - kthread_queue_work(&engine->kworker, &engine->pump_requests); + kthread_queue_work(engine->kworker, &engine->pump_requests); goto out; } @@ -58,7 +59,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, /* Only do teardown in the thread */ if (!in_kthread) { - kthread_queue_work(&engine->kworker, + kthread_queue_work(engine->kworker, &engine->pump_requests); goto out; } @@ -189,7 +190,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine, ret = ablkcipher_enqueue_request(&engine->queue, req); if (!engine->busy && need_pump) - kthread_queue_work(&engine->kworker, &engine->pump_requests); + kthread_queue_work(engine->kworker, &engine->pump_requests); spin_unlock_irqrestore(&engine->queue_lock, flags); return ret; @@ -231,7 +232,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine, ret = ahash_enqueue_request(&engine->queue, req); if (!engine->busy && need_pump) - kthread_queue_work(&engine->kworker, &engine->pump_requests); + kthread_queue_work(engine->kworker, &engine->pump_requests); spin_unlock_irqrestore(&engine->queue_lock, flags); return ret; @@ -284,7 +285,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine, req->base.complete(&req->base, err); - kthread_queue_work(&engine->kworker, &engine->pump_requests); + kthread_queue_work(engine->kworker, &engine->pump_requests); } EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); @@ -321,7 +322,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine, req->base.complete(&req->base, err); - kthread_queue_work(&engine->kworker, &engine->pump_requests); + kthread_queue_work(engine->kworker, &engine->pump_requests); } EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); @@ -345,7 +346,7 @@ int crypto_engine_start(struct crypto_engine *engine) engine->running = true; spin_unlock_irqrestore(&engine->queue_lock, flags); - kthread_queue_work(&engine->kworker, &engine->pump_requests); + kthread_queue_work(engine->kworker, &engine->pump_requests); return 0; } @@ -422,11 +423,8 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); spin_lock_init(&engine->queue_lock); - kthread_init_worker(&engine->kworker); - engine->kworker_task = kthread_run(kthread_worker_fn, - &engine->kworker, "%s", - engine->name); - if (IS_ERR(engine->kworker_task)) { + engine->kworker = kthread_create_worker(0, "%s", engine->name); + if (IS_ERR(engine->kworker)) { dev_err(dev, "failed to create crypto request pump task\n"); return NULL; } @@ -434,7 +432,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) if (engine->rt) { dev_info(dev, "will run requests pump with realtime priority\n"); - sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m); + sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); } return engine; @@ -455,8 +453,7 @@ int crypto_engine_exit(struct crypto_engine *engine) if (ret) return ret; - kthread_flush_worker(&engine->kworker); - kthread_stop(engine->kworker_task); + kthread_destroy_worker(engine->kworker); return 0; } diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 1c5705481c69..a90404a0c5ff 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -112,6 +112,21 @@ nla_put_failure: return -EMSGSIZE; } +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_acomp racomp; + + strncpy(racomp.type, "acomp", sizeof(racomp.type)); + + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, + sizeof(struct crypto_report_acomp), &racomp)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_akcipher rakcipher; @@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg, goto nla_put_failure; break; + case CRYPTO_ALG_TYPE_ACOMPRESS: + if (crypto_report_acomp(skb, alg)) + goto nla_put_failure; + break; case CRYPTO_ALG_TYPE_AKCIPHER: if (crypto_report_akcipher(skb, alg)) goto nla_put_failure; diff --git a/crypto/ctr.c b/crypto/ctr.c index ff4d21eddb83..a4f4a8983169 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -209,7 +209,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; - inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1); + inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; @@ -312,7 +312,7 @@ static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm) unsigned long align; unsigned int reqsize; - cipher = crypto_spawn_skcipher2(spawn); + cipher = crypto_spawn_skcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); @@ -370,9 +370,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, spawn = skcipher_instance_ctx(inst); crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher2(spawn, cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(spawn, cipher_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_free_inst; diff --git a/crypto/cts.c b/crypto/cts.c index 51976187b2bf..243f591dc409 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -49,6 +49,7 @@ #include <linux/scatterlist.h> #include <crypto/scatterwalk.h> #include <linux/slab.h> +#include <linux/compiler.h> struct crypto_cts_ctx { struct crypto_skcipher *child; @@ -103,7 +104,7 @@ static int cts_cbc_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_request *subreq = &rctx->subreq; int bsize = crypto_skcipher_blocksize(tfm); - u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32)))); + u8 d[bsize * 2] __aligned(__alignof__(u32)); struct scatterlist *sg; unsigned int offset; int lastn; @@ -183,7 +184,7 @@ static int cts_cbc_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_request *subreq = &rctx->subreq; int bsize = crypto_skcipher_blocksize(tfm); - u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32)))); + u8 d[bsize * 2] __aligned(__alignof__(u32)); struct scatterlist *sg; unsigned int offset; u8 *space; @@ -290,7 +291,7 @@ static int crypto_cts_init_tfm(struct crypto_skcipher *tfm) unsigned bsize; unsigned align; - cipher = crypto_spawn_skcipher2(spawn); + cipher = crypto_spawn_skcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); @@ -348,9 +349,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher2(spawn, cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(spawn, cipher_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_free_inst; @@ -373,9 +374,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - /* We access the data as u32s when xoring. */ - inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; - inst->alg.ivsize = alg->base.cra_blocksize; inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); diff --git a/crypto/deflate.c b/crypto/deflate.c index 95d8d37c5021..f942cb391890 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -32,6 +32,7 @@ #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/net.h> +#include <crypto/internal/scompress.h> #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION #define DEFLATE_DEF_WINBITS 11 @@ -101,9 +102,8 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx) vfree(ctx->decomp_stream.workspace); } -static int deflate_init(struct crypto_tfm *tfm) +static int __deflate_init(void *ctx) { - struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); int ret; ret = deflate_comp_init(ctx); @@ -116,19 +116,55 @@ out: return ret; } -static void deflate_exit(struct crypto_tfm *tfm) +static void *deflate_alloc_ctx(struct crypto_scomp *tfm) +{ + struct deflate_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ret = __deflate_init(ctx); + if (ret) { + kfree(ctx); + return ERR_PTR(ret); + } + + return ctx; +} + +static int deflate_init(struct crypto_tfm *tfm) { struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); + return __deflate_init(ctx); +} + +static void __deflate_exit(void *ctx) +{ deflate_comp_exit(ctx); deflate_decomp_exit(ctx); } -static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) +{ + __deflate_exit(ctx); + kzfree(ctx); +} + +static void deflate_exit(struct crypto_tfm *tfm) +{ + struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); + + __deflate_exit(ctx); +} + +static int __deflate_compress(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) { int ret = 0; - struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + struct deflate_ctx *dctx = ctx; struct z_stream_s *stream = &dctx->comp_stream; ret = zlib_deflateReset(stream); @@ -153,12 +189,27 @@ out: return ret; } -static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + + return __deflate_compress(src, slen, dst, dlen, dctx); +} + +static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) +{ + return __deflate_compress(src, slen, dst, dlen, ctx); +} + +static int __deflate_decompress(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) { int ret = 0; - struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + struct deflate_ctx *dctx = ctx; struct z_stream_s *stream = &dctx->decomp_stream; ret = zlib_inflateReset(stream); @@ -194,6 +245,21 @@ out: return ret; } +static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + + return __deflate_decompress(src, slen, dst, dlen, dctx); +} + +static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) +{ + return __deflate_decompress(src, slen, dst, dlen, ctx); +} + static struct crypto_alg alg = { .cra_name = "deflate", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, @@ -206,14 +272,39 @@ static struct crypto_alg alg = { .coa_decompress = deflate_decompress } } }; +static struct scomp_alg scomp = { + .alloc_ctx = deflate_alloc_ctx, + .free_ctx = deflate_free_ctx, + .compress = deflate_scompress, + .decompress = deflate_sdecompress, + .base = { + .cra_name = "deflate", + .cra_driver_name = "deflate-scomp", + .cra_module = THIS_MODULE, + } +}; + static int __init deflate_mod_init(void) { - return crypto_register_alg(&alg); + int ret; + + ret = crypto_register_alg(&alg); + if (ret) + return ret; + + ret = crypto_register_scomp(&scomp); + if (ret) { + crypto_unregister_alg(&alg); + return ret; + } + + return ret; } static void __exit deflate_mod_fini(void) { crypto_unregister_alg(&alg); + crypto_unregister_scomp(&scomp); } module_init(deflate_mod_init); diff --git a/crypto/dh.c b/crypto/dh.c index 9d19360e7189..ddcb528ab2cc 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -118,7 +118,7 @@ static int dh_compute_value(struct kpp_request *req) if (req->src) { base = mpi_read_raw_from_sgl(req->src, req->src_len); if (!base) { - ret = EINVAL; + ret = -EINVAL; goto err_free_val; } } else { diff --git a/crypto/drbg.c b/crypto/drbg.c index 053035b5c8f8..8a4d98b4adba 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1782,6 +1782,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, memcpy(outbuf, drbg->outscratchpad, cryptlen); outlen -= cryptlen; + outbuf += cryptlen; } ret = 0; diff --git a/crypto/gcm.c b/crypto/gcm.c index f624ac98c94e..b7ad808be3d4 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm) if (IS_ERR(ghash)) return PTR_ERR(ghash); - ctr = crypto_spawn_skcipher2(&ictx->ctr); + ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_hash; @@ -663,20 +663,20 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, goto err_drop_ghash; crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); - err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_ghash; ctr = crypto_spawn_skcipher_alg(&ctx->ctr); /* We only support 16-byte blocks. */ + err = -EINVAL; if (crypto_skcipher_alg_ivsize(ctr) != 16) goto out_put_ctr; /* Not a stream cipher? */ - err = -EINVAL; if (ctr->base.cra_blocksize != 1) goto out_put_ctr; diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index 5276607c72d0..72015fee533d 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -263,48 +263,6 @@ EXPORT_SYMBOL(gf128mul_bbe); * t[1][BYTE] contains g*x^8*BYTE * .. * t[15][BYTE] contains g*x^120*BYTE */ -struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g) -{ - struct gf128mul_64k *t; - int i, j, k; - - t = kzalloc(sizeof(*t), GFP_KERNEL); - if (!t) - goto out; - - for (i = 0; i < 16; i++) { - t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL); - if (!t->t[i]) { - gf128mul_free_64k(t); - t = NULL; - goto out; - } - } - - t->t[0]->t[128] = *g; - for (j = 64; j > 0; j >>= 1) - gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]); - - for (i = 0;;) { - for (j = 2; j < 256; j += j) - for (k = 1; k < j; ++k) - be128_xor(&t->t[i]->t[j + k], - &t->t[i]->t[j], &t->t[i]->t[k]); - - if (++i >= 16) - break; - - for (j = 128; j > 0; j >>= 1) { - t->t[i]->t[j] = t->t[i - 1]->t[j]; - gf128mul_x8_lle(&t->t[i]->t[j]); - } - } - -out: - return t; -} -EXPORT_SYMBOL(gf128mul_init_64k_lle); - struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g) { struct gf128mul_64k *t; @@ -352,24 +310,11 @@ void gf128mul_free_64k(struct gf128mul_64k *t) int i; for (i = 0; i < 16; i++) - kfree(t->t[i]); - kfree(t); + kzfree(t->t[i]); + kzfree(t); } EXPORT_SYMBOL(gf128mul_free_64k); -void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t) -{ - u8 *ap = (u8 *)a; - be128 r[1]; - int i; - - *r = t->t[0]->t[ap[0]]; - for (i = 1; i < 16; ++i) - be128_xor(r, r, &t->t[i]->t[ap[i]]); - *a = *r; -} -EXPORT_SYMBOL(gf128mul_64k_lle); - void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t) { u8 *ap = (u8 *)a; diff --git a/crypto/internal.h b/crypto/internal.h index 7eefcdb00227..f07320423191 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -76,9 +76,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); int crypto_init_cipher_ops(struct crypto_tfm *tfm); int crypto_init_compress_ops(struct crypto_tfm *tfm); -void crypto_exit_cipher_ops(struct crypto_tfm *tfm); -void crypto_exit_compress_ops(struct crypto_tfm *tfm); - struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); void crypto_larval_kill(struct crypto_alg *alg); struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index c4938497eedb..787dccca3715 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -39,7 +39,6 @@ #include <linux/module.h> #include <linux/slab.h> -#include <linux/module.h> #include <linux/fips.h> #include <linux/time.h> #include <linux/crypto.h> diff --git a/crypto/kpp.c b/crypto/kpp.c index d36ce05eee43..a90edc27af77 100644 --- a/crypto/kpp.c +++ b/crypto/kpp.c @@ -19,6 +19,7 @@ #include <linux/crypto.h> #include <crypto/algapi.h> #include <linux/cryptouser.h> +#include <linux/compiler.h> #include <net/netlink.h> #include <crypto/kpp.h> #include <crypto/internal/kpp.h> @@ -47,7 +48,7 @@ static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg) { diff --git a/crypto/lrw.c b/crypto/lrw.c index 6f9908a7ebcb..ecd8474018e3 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -17,7 +17,8 @@ * * The test vectors are included in the testing module tcrypt.[ch] */ -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -29,11 +30,30 @@ #include <crypto/gf128mul.h> #include <crypto/lrw.h> +#define LRW_BUFFER_SIZE 128u + struct priv { - struct crypto_cipher *child; + struct crypto_skcipher *child; struct lrw_table_ctx table; }; +struct rctx { + be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; + + be128 t; + + be128 *ext; + + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct scatterlist *src; + struct scatterlist *dst; + + unsigned int left; + + struct skcipher_request subreq; +}; + static inline void setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - @@ -76,32 +96,26 @@ void lrw_free_table(struct lrw_table_ctx *ctx) } EXPORT_SYMBOL_GPL(lrw_free_table); -static int setkey(struct crypto_tfm *parent, const u8 *key, +static int setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { - struct priv *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->child; + struct priv *ctx = crypto_skcipher_ctx(parent); + struct crypto_skcipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen - bsize); + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(child, key, keylen - bsize); + crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); if (err) return err; - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); return lrw_init_table(&ctx->table, tweak); } -struct sinfo { - be128 t; - struct crypto_tfm *tfm; - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); -}; - static inline void inc(be128 *iv) { be64_add_cpu(&iv->b, 1); @@ -109,13 +123,6 @@ static inline void inc(be128 *iv) be64_add_cpu(&iv->a, 1); } -static inline void lrw_round(struct sinfo *s, void *dst, const void *src) -{ - be128_xor(dst, &s->t, src); /* PP <- T xor P */ - s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ - be128_xor(dst, dst, &s->t); /* C <- T xor CC */ -} - /* this returns the number of consequative 1 bits starting * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ static inline int get_index128(be128 *block) @@ -135,83 +142,263 @@ static inline int get_index128(be128 *block) return x; } -static int crypt(struct blkcipher_desc *d, - struct blkcipher_walk *w, struct priv *ctx, - void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) +static int post_crypt(struct skcipher_request *req) { + struct rctx *rctx = skcipher_request_ctx(req); + be128 *buf = rctx->ext ?: rctx->buf; + struct skcipher_request *subreq; + const int bs = LRW_BLOCK_SIZE; + struct skcipher_walk w; + struct scatterlist *sg; + unsigned offset; int err; - unsigned int avail; + + subreq = &rctx->subreq; + err = skcipher_walk_virt(&w, subreq, false); + + while (w.nbytes) { + unsigned int avail = w.nbytes; + be128 *wdst; + + wdst = w.dst.virt.addr; + + do { + be128_xor(wdst, buf++, wdst); + wdst++; + } while ((avail -= bs) >= bs); + + err = skcipher_walk_done(&w, avail); + } + + rctx->left -= subreq->cryptlen; + + if (err || !rctx->left) + goto out; + + rctx->dst = rctx->dstbuf; + + scatterwalk_done(&w.out, 0, 1); + sg = w.out.sg; + offset = w.out.offset; + + if (rctx->dst != sg) { + rctx->dst[0] = *sg; + sg_unmark_end(rctx->dst); + scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); + } + rctx->dst[0].length -= offset - sg->offset; + rctx->dst[0].offset = offset; + +out: + return err; +} + +static int pre_crypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct rctx *rctx = skcipher_request_ctx(req); + struct priv *ctx = crypto_skcipher_ctx(tfm); + be128 *buf = rctx->ext ?: rctx->buf; + struct skcipher_request *subreq; const int bs = LRW_BLOCK_SIZE; - struct sinfo s = { - .tfm = crypto_cipher_tfm(ctx->child), - .fn = fn - }; + struct skcipher_walk w; + struct scatterlist *sg; + unsigned cryptlen; + unsigned offset; be128 *iv; - u8 *wsrc; - u8 *wdst; + bool more; + int err; - err = blkcipher_walk_virt(d, w); - if (!(avail = w->nbytes)) - return err; + subreq = &rctx->subreq; + skcipher_request_set_tfm(subreq, tfm); - wsrc = w->src.virt.addr; - wdst = w->dst.virt.addr; + cryptlen = subreq->cryptlen; + more = rctx->left > cryptlen; + if (!more) + cryptlen = rctx->left; - /* calculate first value of T */ - iv = (be128 *)w->iv; - s.t = *iv; + skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, + cryptlen, req->iv); - /* T <- I*Key2 */ - gf128mul_64k_bbe(&s.t, ctx->table.table); + err = skcipher_walk_virt(&w, subreq, false); + iv = w.iv; - goto first; + while (w.nbytes) { + unsigned int avail = w.nbytes; + be128 *wsrc; + be128 *wdst; + + wsrc = w.src.virt.addr; + wdst = w.dst.virt.addr; - for (;;) { do { + *buf++ = rctx->t; + be128_xor(wdst++, &rctx->t, wsrc++); + /* T <- I*Key2, using the optimization * discussed in the specification */ - be128_xor(&s.t, &s.t, + be128_xor(&rctx->t, &rctx->t, &ctx->table.mulinc[get_index128(iv)]); inc(iv); + } while ((avail -= bs) >= bs); -first: - lrw_round(&s, wdst, wsrc); + err = skcipher_walk_done(&w, avail); + } - wsrc += bs; - wdst += bs; - } while ((avail -= bs) >= bs); + skcipher_request_set_tfm(subreq, ctx->child); + skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, + cryptlen, NULL); - err = blkcipher_walk_done(d, w, avail); - if (!(avail = w->nbytes)) - break; + if (err || !more) + goto out; + + rctx->src = rctx->srcbuf; + + scatterwalk_done(&w.in, 0, 1); + sg = w.in.sg; + offset = w.in.offset; + + if (rctx->src != sg) { + rctx->src[0] = *sg; + sg_unmark_end(rctx->src); + scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); + } + rctx->src[0].length -= offset - sg->offset; + rctx->src[0].offset = offset; + +out: + return err; +} + +static int init_crypt(struct skcipher_request *req, crypto_completion_t done) +{ + struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct rctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq; + gfp_t gfp; + + subreq = &rctx->subreq; + skcipher_request_set_callback(subreq, req->base.flags, done, req); + + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : + GFP_ATOMIC; + rctx->ext = NULL; + + subreq->cryptlen = LRW_BUFFER_SIZE; + if (req->cryptlen > LRW_BUFFER_SIZE) { + subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); + rctx->ext = kmalloc(subreq->cryptlen, gfp); + } + + rctx->src = req->src; + rctx->dst = req->dst; + rctx->left = req->cryptlen; + + /* calculate first value of T */ + memcpy(&rctx->t, req->iv, sizeof(rctx->t)); + + /* T <- I*Key2 */ + gf128mul_64k_bbe(&rctx->t, ctx->table.table); - wsrc = w->src.virt.addr; - wdst = w->dst.virt.addr; + return 0; +} + +static void exit_crypt(struct skcipher_request *req) +{ + struct rctx *rctx = skcipher_request_ctx(req); + + rctx->left = 0; + + if (rctx->ext) + kfree(rctx->ext); +} + +static int do_encrypt(struct skcipher_request *req, int err) +{ + struct rctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq; + + subreq = &rctx->subreq; + + while (!err && rctx->left) { + err = pre_crypt(req) ?: + crypto_skcipher_encrypt(subreq) ?: + post_crypt(req); + + if (err == -EINPROGRESS || + (err == -EBUSY && + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return err; } + exit_crypt(req); return err; } -static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static void encrypt_done(struct crypto_async_request *areq, int err) +{ + struct skcipher_request *req = areq->data; + struct skcipher_request *subreq; + struct rctx *rctx; + + rctx = skcipher_request_ctx(req); + subreq = &rctx->subreq; + subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; + + err = do_encrypt(req, err ?: post_crypt(req)); + if (rctx->left) + return; + + skcipher_request_complete(req, err); +} + +static int encrypt(struct skcipher_request *req) +{ + return do_encrypt(req, init_crypt(req, encrypt_done)); +} + +static int do_decrypt(struct skcipher_request *req, int err) { - struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk w; + struct rctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq; + + subreq = &rctx->subreq; + + while (!err && rctx->left) { + err = pre_crypt(req) ?: + crypto_skcipher_decrypt(subreq) ?: + post_crypt(req); + + if (err == -EINPROGRESS || + (err == -EBUSY && + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return err; + } - blkcipher_walk_init(&w, dst, src, nbytes); - return crypt(desc, &w, ctx, - crypto_cipher_alg(ctx->child)->cia_encrypt); + exit_crypt(req); + return err; } -static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static void decrypt_done(struct crypto_async_request *areq, int err) { - struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk w; + struct skcipher_request *req = areq->data; + struct skcipher_request *subreq; + struct rctx *rctx; + + rctx = skcipher_request_ctx(req); + subreq = &rctx->subreq; + subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; + + err = do_decrypt(req, err ?: post_crypt(req)); + if (rctx->left) + return; - blkcipher_walk_init(&w, dst, src, nbytes); - return crypt(desc, &w, ctx, - crypto_cipher_alg(ctx->child)->cia_decrypt); + skcipher_request_complete(req, err); +} + +static int decrypt(struct skcipher_request *req) +{ + return do_decrypt(req, init_crypt(req, decrypt_done)); } int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, @@ -293,95 +480,161 @@ first: } EXPORT_SYMBOL_GPL(lrw_crypt); -static int init_tfm(struct crypto_tfm *tfm) +static int init_tfm(struct crypto_skcipher *tfm) { - struct crypto_cipher *cipher; - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct priv *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); + struct priv *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *cipher; - cipher = crypto_spawn_cipher(spawn); + cipher = crypto_spawn_skcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); - if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { - *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; - crypto_free_cipher(cipher); - return -EINVAL; - } - ctx->child = cipher; + + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + + sizeof(struct rctx)); + return 0; } -static void exit_tfm(struct crypto_tfm *tfm) +static void exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_tfm_ctx(tfm); + struct priv *ctx = crypto_skcipher_ctx(tfm); lrw_free_table(&ctx->table); - crypto_free_cipher(ctx->child); + crypto_free_skcipher(ctx->child); +} + +static void free(struct skcipher_instance *inst) +{ + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); } -static struct crypto_instance *alloc(struct rtattr **tb) +static int create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_instance *inst; - struct crypto_alg *alg; + struct crypto_skcipher_spawn *spawn; + struct skcipher_instance *inst; + struct crypto_attr_type *algt; + struct skcipher_alg *alg; + const char *cipher_name; + char ecb_name[CRYPTO_MAX_ALG_NAME]; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) + return -EINVAL; + + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return PTR_ERR(cipher_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = skcipher_instance_ctx(inst); + + crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); + err = crypto_grab_skcipher(spawn, cipher_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err == -ENOENT) { + err = -ENAMETOOLONG; + if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + err = crypto_grab_skcipher(spawn, ecb_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + } + if (err) - return ERR_PTR(err); + goto err_free_inst; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); + alg = crypto_skcipher_spawn_alg(spawn); - inst = crypto_alloc_instance("lrw", alg); - if (IS_ERR(inst)) - goto out_put_alg; + err = -EINVAL; + if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) + goto err_drop_spawn; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; + if (crypto_skcipher_alg_ivsize(alg)) + goto err_drop_spawn; - if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; - else inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; + err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", + &alg->base); + if (err) + goto err_drop_spawn; - if (!(alg->cra_blocksize % 4)) - inst->alg.cra_alignmask |= 3; - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; - inst->alg.cra_blkcipher.min_keysize = - alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; - inst->alg.cra_blkcipher.max_keysize = - alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; + err = -EINVAL; + cipher_name = alg->base.cra_name; - inst->alg.cra_ctxsize = sizeof(struct priv); + /* Alas we screwed up the naming so we have to mangle the + * cipher name. + */ + if (!strncmp(cipher_name, "ecb(", 4)) { + unsigned len; - inst->alg.cra_init = init_tfm; - inst->alg.cra_exit = exit_tfm; + len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); + if (len < 2 || len >= sizeof(ecb_name)) + goto err_drop_spawn; - inst->alg.cra_blkcipher.setkey = setkey; - inst->alg.cra_blkcipher.encrypt = encrypt; - inst->alg.cra_blkcipher.decrypt = decrypt; + if (ecb_name[len - 1] != ')') + goto err_drop_spawn; -out_put_alg: - crypto_mod_put(alg); - return inst; -} + ecb_name[len - 1] = 0; -static void free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + } + + inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask | + (__alignof__(u64) - 1); + + inst->alg.ivsize = LRW_BLOCK_SIZE; + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + + LRW_BLOCK_SIZE; + inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + + LRW_BLOCK_SIZE; + + inst->alg.base.cra_ctxsize = sizeof(struct priv); + + inst->alg.init = init_tfm; + inst->alg.exit = exit_tfm; + + inst->alg.setkey = setkey; + inst->alg.encrypt = encrypt; + inst->alg.decrypt = decrypt; + + inst->free = free; + + err = skcipher_register_instance(tmpl, inst); + if (err) + goto err_drop_spawn; + +out: + return err; + +err_drop_spawn: + crypto_drop_skcipher(spawn); +err_free_inst: kfree(inst); + goto out; } static struct crypto_template crypto_tmpl = { .name = "lrw", - .alloc = alloc, - .free = free, + .create = create, .module = THIS_MODULE, }; diff --git a/crypto/lz4.c b/crypto/lz4.c index aefbceaf3104..71eff9b01b12 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c @@ -23,57 +23,98 @@ #include <linux/crypto.h> #include <linux/vmalloc.h> #include <linux/lz4.h> +#include <crypto/internal/scompress.h> struct lz4_ctx { void *lz4_comp_mem; }; +static void *lz4_alloc_ctx(struct crypto_scomp *tfm) +{ + void *ctx; + + ctx = vmalloc(LZ4_MEM_COMPRESS); + if (!ctx) + return ERR_PTR(-ENOMEM); + + return ctx; +} + static int lz4_init(struct crypto_tfm *tfm) { struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS); - if (!ctx->lz4_comp_mem) + ctx->lz4_comp_mem = lz4_alloc_ctx(NULL); + if (IS_ERR(ctx->lz4_comp_mem)) return -ENOMEM; return 0; } +static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx) +{ + vfree(ctx); +} + static void lz4_exit(struct crypto_tfm *tfm) { struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - vfree(ctx->lz4_comp_mem); + + lz4_free_ctx(NULL, ctx->lz4_comp_mem); +} + +static int __lz4_compress_crypto(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + int out_len = LZ4_compress_default(src, dst, + slen, *dlen, ctx); + + if (!out_len) + return -EINVAL; + + *dlen = out_len; + return 0; +} + +static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) +{ + return __lz4_compress_crypto(src, slen, dst, dlen, ctx); } static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) + unsigned int slen, u8 *dst, unsigned int *dlen) { struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - size_t tmp_len = *dlen; - int err; - err = lz4_compress(src, slen, dst, &tmp_len, ctx->lz4_comp_mem); + return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem); +} - if (err < 0) - return -EINVAL; +static int __lz4_decompress_crypto(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + int out_len = LZ4_decompress_safe(src, dst, slen, *dlen); - *dlen = tmp_len; + if (out_len < 0) + return out_len; + + *dlen = out_len; return 0; } -static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) { - int err; - size_t tmp_len = *dlen; - size_t __slen = slen; - - err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len); - if (err < 0) - return -EINVAL; + return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); +} - *dlen = tmp_len; - return err; +static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, + unsigned int *dlen) +{ + return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); } static struct crypto_alg alg_lz4 = { @@ -89,14 +130,39 @@ static struct crypto_alg alg_lz4 = { .coa_decompress = lz4_decompress_crypto } } }; +static struct scomp_alg scomp = { + .alloc_ctx = lz4_alloc_ctx, + .free_ctx = lz4_free_ctx, + .compress = lz4_scompress, + .decompress = lz4_sdecompress, + .base = { + .cra_name = "lz4", + .cra_driver_name = "lz4-scomp", + .cra_module = THIS_MODULE, + } +}; + static int __init lz4_mod_init(void) { - return crypto_register_alg(&alg_lz4); + int ret; + + ret = crypto_register_alg(&alg_lz4); + if (ret) + return ret; + + ret = crypto_register_scomp(&scomp); + if (ret) { + crypto_unregister_alg(&alg_lz4); + return ret; + } + + return ret; } static void __exit lz4_mod_fini(void) { crypto_unregister_alg(&alg_lz4); + crypto_unregister_scomp(&scomp); } module_init(lz4_mod_init); diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index a1d3b5bd3d85..03a34a8109c0 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c @@ -22,58 +22,100 @@ #include <linux/crypto.h> #include <linux/vmalloc.h> #include <linux/lz4.h> +#include <crypto/internal/scompress.h> struct lz4hc_ctx { void *lz4hc_comp_mem; }; +static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) +{ + void *ctx; + + ctx = vmalloc(LZ4HC_MEM_COMPRESS); + if (!ctx) + return ERR_PTR(-ENOMEM); + + return ctx; +} + static int lz4hc_init(struct crypto_tfm *tfm) { struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS); - if (!ctx->lz4hc_comp_mem) + ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL); + if (IS_ERR(ctx->lz4hc_comp_mem)) return -ENOMEM; return 0; } +static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx) +{ + vfree(ctx); +} + static void lz4hc_exit(struct crypto_tfm *tfm) { struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - vfree(ctx->lz4hc_comp_mem); + lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem); +} + +static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + int out_len = LZ4_compress_HC(src, dst, slen, + *dlen, LZ4HC_DEFAULT_CLEVEL, ctx); + + if (!out_len) + return -EINVAL; + + *dlen = out_len; + return 0; +} + +static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) +{ + return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx); } static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) + unsigned int slen, u8 *dst, + unsigned int *dlen) { struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - size_t tmp_len = *dlen; - int err; - err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem); + return __lz4hc_compress_crypto(src, slen, dst, dlen, + ctx->lz4hc_comp_mem); +} - if (err < 0) - return -EINVAL; +static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + int out_len = LZ4_decompress_safe(src, dst, slen, *dlen); - *dlen = tmp_len; + if (out_len < 0) + return out_len; + + *dlen = out_len; return 0; } -static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) { - int err; - size_t tmp_len = *dlen; - size_t __slen = slen; - - err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len); - if (err < 0) - return -EINVAL; + return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); +} - *dlen = tmp_len; - return err; +static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, + unsigned int *dlen) +{ + return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); } static struct crypto_alg alg_lz4hc = { @@ -89,14 +131,39 @@ static struct crypto_alg alg_lz4hc = { .coa_decompress = lz4hc_decompress_crypto } } }; +static struct scomp_alg scomp = { + .alloc_ctx = lz4hc_alloc_ctx, + .free_ctx = lz4hc_free_ctx, + .compress = lz4hc_scompress, + .decompress = lz4hc_sdecompress, + .base = { + .cra_name = "lz4hc", + .cra_driver_name = "lz4hc-scomp", + .cra_module = THIS_MODULE, + } +}; + static int __init lz4hc_mod_init(void) { - return crypto_register_alg(&alg_lz4hc); + int ret; + + ret = crypto_register_alg(&alg_lz4hc); + if (ret) + return ret; + + ret = crypto_register_scomp(&scomp); + if (ret) { + crypto_unregister_alg(&alg_lz4hc); + return ret; + } + + return ret; } static void __exit lz4hc_mod_fini(void) { crypto_unregister_alg(&alg_lz4hc); + crypto_unregister_scomp(&scomp); } module_init(lz4hc_mod_init); diff --git a/crypto/lzo.c b/crypto/lzo.c index c3f3dd9a28c5..168df784da84 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -22,40 +22,55 @@ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/lzo.h> +#include <crypto/internal/scompress.h> struct lzo_ctx { void *lzo_comp_mem; }; +static void *lzo_alloc_ctx(struct crypto_scomp *tfm) +{ + void *ctx; + + ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN); + if (!ctx) + ctx = vmalloc(LZO1X_MEM_COMPRESS); + if (!ctx) + return ERR_PTR(-ENOMEM); + + return ctx; +} + static int lzo_init(struct crypto_tfm *tfm) { struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS, - GFP_KERNEL | __GFP_NOWARN); - if (!ctx->lzo_comp_mem) - ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); - if (!ctx->lzo_comp_mem) + ctx->lzo_comp_mem = lzo_alloc_ctx(NULL); + if (IS_ERR(ctx->lzo_comp_mem)) return -ENOMEM; return 0; } +static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx) +{ + kvfree(ctx); +} + static void lzo_exit(struct crypto_tfm *tfm) { struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - kvfree(ctx->lzo_comp_mem); + lzo_free_ctx(NULL, ctx->lzo_comp_mem); } -static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int __lzo_compress(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) { - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem); + err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; @@ -64,8 +79,23 @@ static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, return 0; } -static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); + + return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem); +} + +static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) +{ + return __lzo_compress(src, slen, dst, dlen, ctx); +} + +static int __lzo_decompress(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) { int err; size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ @@ -77,7 +107,19 @@ static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, *dlen = tmp_len; return 0; +} +static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + return __lzo_decompress(src, slen, dst, dlen); +} + +static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) +{ + return __lzo_decompress(src, slen, dst, dlen); } static struct crypto_alg alg = { @@ -88,18 +130,43 @@ static struct crypto_alg alg = { .cra_init = lzo_init, .cra_exit = lzo_exit, .cra_u = { .compress = { - .coa_compress = lzo_compress, - .coa_decompress = lzo_decompress } } + .coa_compress = lzo_compress, + .coa_decompress = lzo_decompress } } +}; + +static struct scomp_alg scomp = { + .alloc_ctx = lzo_alloc_ctx, + .free_ctx = lzo_free_ctx, + .compress = lzo_scompress, + .decompress = lzo_sdecompress, + .base = { + .cra_name = "lzo", + .cra_driver_name = "lzo-scomp", + .cra_module = THIS_MODULE, + } }; static int __init lzo_mod_init(void) { - return crypto_register_alg(&alg); + int ret; + + ret = crypto_register_alg(&alg); + if (ret) + return ret; + + ret = crypto_register_scomp(&scomp); + if (ret) { + crypto_unregister_alg(&alg); + return ret; + } + + return ret; } static void __exit lzo_mod_fini(void) { crypto_unregister_alg(&alg); + crypto_unregister_scomp(&scomp); } module_init(lzo_mod_init); diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index c207458d6299..4e6472658852 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/sched.h> +#include <linux/sched/stat.h> #include <linux/slab.h> #include <linux/hardirq.h> diff --git a/crypto/pcbc.c b/crypto/pcbc.c index f654965f0933..29dd2b4a3b85 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -14,40 +14,38 @@ * */ -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/scatterlist.h> #include <linux/slab.h> +#include <linux/compiler.h> struct crypto_pcbc_ctx { struct crypto_cipher *child; }; -static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key, +static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { - struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent); + struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_cipher *child = ctx->child; int err; crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); + crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); return err; } -static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, +static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, + struct skcipher_walk *walk, struct crypto_cipher *tfm) { - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = - crypto_cipher_alg(tfm)->cia_encrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; @@ -56,7 +54,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, do { crypto_xor(iv, src, bsize); - fn(crypto_cipher_tfm(tfm), dst, iv); + crypto_cipher_encrypt_one(tfm, dst, iv); memcpy(iv, dst, bsize); crypto_xor(iv, src, bsize); @@ -67,12 +65,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, return nbytes; } -static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, +static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, + struct skcipher_walk *walk, struct crypto_cipher *tfm) { - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = - crypto_cipher_alg(tfm)->cia_encrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; @@ -82,7 +78,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, do { memcpy(tmpbuf, src, bsize); crypto_xor(iv, src, bsize); - fn(crypto_cipher_tfm(tfm), src, iv); + crypto_cipher_encrypt_one(tfm, src, iv); memcpy(iv, tmpbuf, bsize); crypto_xor(iv, src, bsize); @@ -94,38 +90,34 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, return nbytes; } -static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_pcbc_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, + nbytes = crypto_pcbc_encrypt_inplace(req, &walk, child); else - nbytes = crypto_pcbc_encrypt_segment(desc, &walk, + nbytes = crypto_pcbc_encrypt_segment(req, &walk, child); - err = blkcipher_walk_done(desc, &walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } return err; } -static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, +static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, + struct skcipher_walk *walk, struct crypto_cipher *tfm) { - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = - crypto_cipher_alg(tfm)->cia_decrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; @@ -133,7 +125,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, u8 *iv = walk->iv; do { - fn(crypto_cipher_tfm(tfm), dst, src); + crypto_cipher_decrypt_one(tfm, dst, src); crypto_xor(dst, iv, bsize); memcpy(iv, src, bsize); crypto_xor(iv, dst, bsize); @@ -147,21 +139,19 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, return nbytes; } -static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, +static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, + struct skcipher_walk *walk, struct crypto_cipher *tfm) { - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = - crypto_cipher_alg(tfm)->cia_decrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *iv = walk->iv; - u8 tmpbuf[bsize]; + u8 tmpbuf[bsize] __aligned(__alignof__(u32)); do { memcpy(tmpbuf, src, bsize); - fn(crypto_cipher_tfm(tfm), src, src); + crypto_cipher_decrypt_one(tfm, src, src); crypto_xor(src, iv, bsize); memcpy(iv, tmpbuf, bsize); crypto_xor(iv, src, bsize); @@ -174,37 +164,35 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, return nbytes; } -static int crypto_pcbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_pcbc_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, + nbytes = crypto_pcbc_decrypt_inplace(req, &walk, child); else - nbytes = crypto_pcbc_decrypt_segment(desc, &walk, + nbytes = crypto_pcbc_decrypt_segment(req, &walk, child); - err = blkcipher_walk_done(desc, &walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } return err; } -static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) +static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm) { - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_spawn *spawn = skcipher_instance_ctx(inst); + struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_cipher *cipher; cipher = crypto_spawn_cipher(spawn); @@ -215,68 +203,95 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) return 0; } -static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) +static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm) { - struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); + crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) +static void crypto_pcbc_free(struct skcipher_instance *inst) +{ + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); +} + +static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_instance *inst; + struct skcipher_instance *inst; + struct crypto_attr_type *algt; + struct crypto_spawn *spawn; struct crypto_alg *alg; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) - return ERR_PTR(err); + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) & + ~CRYPTO_ALG_INTERNAL) + return -EINVAL; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER | + (algt->type & CRYPTO_ALG_INTERNAL), + CRYPTO_ALG_TYPE_MASK | + (algt->mask & CRYPTO_ALG_INTERNAL)); + err = PTR_ERR(alg); if (IS_ERR(alg)) - return ERR_CAST(alg); + goto err_free_inst; - inst = crypto_alloc_instance("pcbc", alg); - if (IS_ERR(inst)) - goto out_put_alg; + spawn = skcipher_instance_ctx(inst); + err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + crypto_mod_put(alg); + if (err) + goto err_free_inst; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; + err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); + if (err) + goto err_drop_spawn; - /* We access the data as u32s when xoring. */ - inst->alg.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL; + inst->alg.base.cra_priority = alg->cra_priority; + inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + inst->alg.ivsize = alg->cra_blocksize; + inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; + inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); + inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); - inst->alg.cra_init = crypto_pcbc_init_tfm; - inst->alg.cra_exit = crypto_pcbc_exit_tfm; + inst->alg.init = crypto_pcbc_init_tfm; + inst->alg.exit = crypto_pcbc_exit_tfm; - inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt; - inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt; + inst->alg.setkey = crypto_pcbc_setkey; + inst->alg.encrypt = crypto_pcbc_encrypt; + inst->alg.decrypt = crypto_pcbc_decrypt; -out_put_alg: - crypto_mod_put(alg); - return inst; -} + inst->free = crypto_pcbc_free; -static void crypto_pcbc_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); + err = skcipher_register_instance(tmpl, inst); + if (err) + goto err_drop_spawn; + +out: + return err; + +err_drop_spawn: + crypto_drop_spawn(spawn); +err_free_inst: kfree(inst); + goto out; } static struct crypto_template crypto_pcbc_tmpl = { .name = "pcbc", - .alloc = crypto_pcbc_alloc, - .free = crypto_pcbc_free, + .create = crypto_pcbc_create, .module = THIS_MODULE, }; diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index 2df9835dfbc0..b1c2d57dc734 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -17,6 +17,7 @@ #include <linux/crypto.h> #include <linux/kernel.h> #include <linux/module.h> +#include <asm/unaligned.h> static inline u64 mlt(u64 a, u64 b) { @@ -33,11 +34,6 @@ static inline u32 and(u32 v, u32 mask) return v & mask; } -static inline u32 le32_to_cpuvp(const void *p) -{ - return le32_to_cpup(p); -} - int crypto_poly1305_init(struct shash_desc *desc) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); @@ -65,19 +61,19 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) { /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ - dctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff; - dctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03; - dctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff; - dctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff; - dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff; + dctx->r[0] = (get_unaligned_le32(key + 0) >> 0) & 0x3ffffff; + dctx->r[1] = (get_unaligned_le32(key + 3) >> 2) & 0x3ffff03; + dctx->r[2] = (get_unaligned_le32(key + 6) >> 4) & 0x3ffc0ff; + dctx->r[3] = (get_unaligned_le32(key + 9) >> 6) & 0x3f03fff; + dctx->r[4] = (get_unaligned_le32(key + 12) >> 8) & 0x00fffff; } static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) { - dctx->s[0] = le32_to_cpuvp(key + 0); - dctx->s[1] = le32_to_cpuvp(key + 4); - dctx->s[2] = le32_to_cpuvp(key + 8); - dctx->s[3] = le32_to_cpuvp(key + 12); + dctx->s[0] = get_unaligned_le32(key + 0); + dctx->s[1] = get_unaligned_le32(key + 4); + dctx->s[2] = get_unaligned_le32(key + 8); + dctx->s[3] = get_unaligned_le32(key + 12); } unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, @@ -137,11 +133,11 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, while (likely(srclen >= POLY1305_BLOCK_SIZE)) { /* h += m[i] */ - h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff; - h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff; - h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff; - h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff; - h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit; + h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; + h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; + h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; + h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; + h4 += (get_unaligned_le32(src + 12) >> 8) | hibit; /* h *= r */ d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + diff --git a/crypto/rng.c b/crypto/rng.c index b81cffb13bab..f46dac5288b9 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -23,6 +23,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/cryptouser.h> +#include <linux/compiler.h> #include <net/netlink.h> #include "internal.h" @@ -95,7 +96,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : rng\n"); diff --git a/crypto/scompress.c b/crypto/scompress.c new file mode 100644 index 000000000000..6b048b36312d --- /dev/null +++ b/crypto/scompress.c @@ -0,0 +1,357 @@ +/* + * Synchronous Compression operations + * + * Copyright 2015 LG Electronics Inc. + * Copyright (c) 2016, Intel Corporation + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/crypto.h> +#include <linux/compiler.h> +#include <linux/vmalloc.h> +#include <crypto/algapi.h> +#include <linux/cryptouser.h> +#include <net/netlink.h> +#include <linux/scatterlist.h> +#include <crypto/scatterwalk.h> +#include <crypto/internal/acompress.h> +#include <crypto/internal/scompress.h> +#include "internal.h" + +static const struct crypto_type crypto_scomp_type; +static void * __percpu *scomp_src_scratches; +static void * __percpu *scomp_dst_scratches; +static int scomp_scratch_users; +static DEFINE_MUTEX(scomp_lock); + +#ifdef CONFIG_NET +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_comp rscomp; + + strncpy(rscomp.type, "scomp", sizeof(rscomp.type)); + + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, + sizeof(struct crypto_report_comp), &rscomp)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) + __maybe_unused; + +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) +{ + seq_puts(m, "type : scomp\n"); +} + +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) +{ + return 0; +} + +static void crypto_scomp_free_scratches(void * __percpu *scratches) +{ + int i; + + if (!scratches) + return; + + for_each_possible_cpu(i) + vfree(*per_cpu_ptr(scratches, i)); + + free_percpu(scratches); +} + +static void * __percpu *crypto_scomp_alloc_scratches(void) +{ + void * __percpu *scratches; + int i; + + scratches = alloc_percpu(void *); + if (!scratches) + return NULL; + + for_each_possible_cpu(i) { + void *scratch; + + scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); + if (!scratch) + goto error; + *per_cpu_ptr(scratches, i) = scratch; + } + + return scratches; + +error: + crypto_scomp_free_scratches(scratches); + return NULL; +} + +static void crypto_scomp_free_all_scratches(void) +{ + if (!--scomp_scratch_users) { + crypto_scomp_free_scratches(scomp_src_scratches); + crypto_scomp_free_scratches(scomp_dst_scratches); + scomp_src_scratches = NULL; + scomp_dst_scratches = NULL; + } +} + +static int crypto_scomp_alloc_all_scratches(void) +{ + if (!scomp_scratch_users++) { + scomp_src_scratches = crypto_scomp_alloc_scratches(); + if (!scomp_src_scratches) + return -ENOMEM; + scomp_dst_scratches = crypto_scomp_alloc_scratches(); + if (!scomp_dst_scratches) + return -ENOMEM; + } + return 0; +} + +static void crypto_scomp_sg_free(struct scatterlist *sgl) +{ + int i, n; + struct page *page; + + if (!sgl) + return; + + n = sg_nents(sgl); + for_each_sg(sgl, sgl, n, i) { + page = sg_page(sgl); + if (page) + __free_page(page); + } + + kfree(sgl); +} + +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp) +{ + struct scatterlist *sgl; + struct page *page; + int i, n; + + n = ((size - 1) >> PAGE_SHIFT) + 1; + + sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp); + if (!sgl) + return NULL; + + sg_init_table(sgl, n); + + for (i = 0; i < n; i++) { + page = alloc_page(gfp); + if (!page) + goto err; + sg_set_page(sgl + i, page, PAGE_SIZE, 0); + } + + return sgl; + +err: + sg_mark_end(sgl + i); + crypto_scomp_sg_free(sgl); + return NULL; +} + +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + void **tfm_ctx = acomp_tfm_ctx(tfm); + struct crypto_scomp *scomp = *tfm_ctx; + void **ctx = acomp_request_ctx(req); + const int cpu = get_cpu(); + u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); + u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); + int ret; + + if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) { + ret = -EINVAL; + goto out; + } + + if (req->dst && !req->dlen) { + ret = -EINVAL; + goto out; + } + + if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) + req->dlen = SCOMP_SCRATCH_SIZE; + + scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0); + if (dir) + ret = crypto_scomp_compress(scomp, scratch_src, req->slen, + scratch_dst, &req->dlen, *ctx); + else + ret = crypto_scomp_decompress(scomp, scratch_src, req->slen, + scratch_dst, &req->dlen, *ctx); + if (!ret) { + if (!req->dst) { + req->dst = crypto_scomp_sg_alloc(req->dlen, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC); + if (!req->dst) + goto out; + } + scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen, + 1); + } +out: + put_cpu(); + return ret; +} + +static int scomp_acomp_compress(struct acomp_req *req) +{ + return scomp_acomp_comp_decomp(req, 1); +} + +static int scomp_acomp_decompress(struct acomp_req *req) +{ + return scomp_acomp_comp_decomp(req, 0); +} + +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) +{ + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); + + crypto_free_scomp(*ctx); +} + +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) +{ + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); + struct crypto_scomp *scomp; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + scomp = crypto_create_tfm(calg, &crypto_scomp_type); + if (IS_ERR(scomp)) { + crypto_mod_put(calg); + return PTR_ERR(scomp); + } + + *ctx = scomp; + tfm->exit = crypto_exit_scomp_ops_async; + + crt->compress = scomp_acomp_compress; + crt->decompress = scomp_acomp_decompress; + crt->dst_free = crypto_scomp_sg_free; + crt->reqsize = sizeof(void *); + + return 0; +} + +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) +{ + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); + struct crypto_scomp *scomp = *tfm_ctx; + void *ctx; + + ctx = crypto_scomp_alloc_ctx(scomp); + if (IS_ERR(ctx)) { + kfree(req); + return NULL; + } + + *req->__ctx = ctx; + + return req; +} + +void crypto_acomp_scomp_free_ctx(struct acomp_req *req) +{ + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); + struct crypto_scomp *scomp = *tfm_ctx; + void *ctx = *req->__ctx; + + if (ctx) + crypto_scomp_free_ctx(scomp, ctx); +} + +static const struct crypto_type crypto_scomp_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_scomp_init_tfm, +#ifdef CONFIG_PROC_FS + .show = crypto_scomp_show, +#endif + .report = crypto_scomp_report, + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_SCOMPRESS, + .tfmsize = offsetof(struct crypto_scomp, base), +}; + +int crypto_register_scomp(struct scomp_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int ret = -ENOMEM; + + mutex_lock(&scomp_lock); + if (crypto_scomp_alloc_all_scratches()) + goto error; + + base->cra_type = &crypto_scomp_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; + + ret = crypto_register_alg(base); + if (ret) + goto error; + + mutex_unlock(&scomp_lock); + return ret; + +error: + crypto_scomp_free_all_scratches(); + mutex_unlock(&scomp_lock); + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_scomp); + +int crypto_unregister_scomp(struct scomp_alg *alg) +{ + int ret; + + mutex_lock(&scomp_lock); + ret = crypto_unregister_alg(&alg->base); + crypto_scomp_free_all_scratches(); + mutex_unlock(&scomp_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_unregister_scomp); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Synchronous compression type"); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index c7049231861f..570b7d1aa0ca 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -153,8 +153,6 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) if (IS_ERR(inst)) return PTR_ERR(inst); - inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; - spawn = aead_instance_ctx(inst); alg = crypto_spawn_aead_alg(spawn); diff --git a/crypto/shash.c b/crypto/shash.c index a051541a4a17..5e31c8d776df 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -19,6 +19,7 @@ #include <linux/seq_file.h> #include <linux/cryptouser.h> #include <net/netlink.h> +#include <linux/compiler.h> #include "internal.h" @@ -67,7 +68,7 @@ EXPORT_SYMBOL_GPL(crypto_shash_setkey); static inline unsigned int shash_align_buffer_size(unsigned len, unsigned long mask) { - typedef u8 __attribute__ ((aligned)) u8_aligned; + typedef u8 __aligned_largest u8_aligned; return len + (mask & ~(__alignof__(u8_aligned) - 1)); } @@ -80,7 +81,7 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, unsigned int unaligned_len = alignmask + 1 - ((unsigned long)data & alignmask); u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] - __attribute__ ((aligned)); + __aligned_largest; u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; @@ -116,7 +117,7 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) struct shash_alg *shash = crypto_shash_alg(tfm); unsigned int ds = crypto_shash_digestsize(tfm); u8 ubuf[shash_align_buffer_size(ds, alignmask)] - __attribute__ ((aligned)); + __aligned_largest; u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; @@ -403,7 +404,7 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) { struct shash_alg *salg = __crypto_shash_alg(alg); diff --git a/crypto/simd.c b/crypto/simd.c new file mode 100644 index 000000000000..88203370a62f --- /dev/null +++ b/crypto/simd.c @@ -0,0 +1,226 @@ +/* + * Shared crypto simd helpers + * + * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> + * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> + * + * Based on aesni-intel_glue.c by: + * Copyright (C) 2008, Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#include <crypto/cryptd.h> +#include <crypto/internal/simd.h> +#include <crypto/internal/skcipher.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/preempt.h> +#include <asm/simd.h> + +struct simd_skcipher_alg { + const char *ialg_name; + struct skcipher_alg alg; +}; + +struct simd_skcipher_ctx { + struct cryptd_skcipher *cryptd_tfm; +}; + +static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *child = &ctx->cryptd_tfm->base; + int err; + + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(child, key, key_len); + crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int simd_skcipher_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_request *subreq; + struct crypto_skcipher *child; + + subreq = skcipher_request_ctx(req); + *subreq = *req; + + if (!may_use_simd() || + (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) + child = &ctx->cryptd_tfm->base; + else + child = cryptd_skcipher_child(ctx->cryptd_tfm); + + skcipher_request_set_tfm(subreq, child); + + return crypto_skcipher_encrypt(subreq); +} + +static int simd_skcipher_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_request *subreq; + struct crypto_skcipher *child; + + subreq = skcipher_request_ctx(req); + *subreq = *req; + + if (!may_use_simd() || + (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) + child = &ctx->cryptd_tfm->base; + else + child = cryptd_skcipher_child(ctx->cryptd_tfm); + + skcipher_request_set_tfm(subreq, child); + + return crypto_skcipher_decrypt(subreq); +} + +static void simd_skcipher_exit(struct crypto_skcipher *tfm) +{ + struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + cryptd_free_skcipher(ctx->cryptd_tfm); +} + +static int simd_skcipher_init(struct crypto_skcipher *tfm) +{ + struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct cryptd_skcipher *cryptd_tfm; + struct simd_skcipher_alg *salg; + struct skcipher_alg *alg; + unsigned reqsize; + + alg = crypto_skcipher_alg(tfm); + salg = container_of(alg, struct simd_skcipher_alg, alg); + + cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name, + CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + ctx->cryptd_tfm = cryptd_tfm; + + reqsize = sizeof(struct skcipher_request); + reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base); + + crypto_skcipher_set_reqsize(tfm, reqsize); + + return 0; +} + +struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, + const char *drvname, + const char *basename) +{ + struct simd_skcipher_alg *salg; + struct crypto_skcipher *tfm; + struct skcipher_alg *ialg; + struct skcipher_alg *alg; + int err; + + tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + ialg = crypto_skcipher_alg(tfm); + + salg = kzalloc(sizeof(*salg), GFP_KERNEL); + if (!salg) { + salg = ERR_PTR(-ENOMEM); + goto out_put_tfm; + } + + salg->ialg_name = basename; + alg = &salg->alg; + + err = -ENAMETOOLONG; + if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= + CRYPTO_MAX_ALG_NAME) + goto out_free_salg; + + if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + drvname) >= CRYPTO_MAX_ALG_NAME) + goto out_free_salg; + + alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_priority = ialg->base.cra_priority; + alg->base.cra_blocksize = ialg->base.cra_blocksize; + alg->base.cra_alignmask = ialg->base.cra_alignmask; + alg->base.cra_module = ialg->base.cra_module; + alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx); + + alg->ivsize = ialg->ivsize; + alg->chunksize = ialg->chunksize; + alg->min_keysize = ialg->min_keysize; + alg->max_keysize = ialg->max_keysize; + + alg->init = simd_skcipher_init; + alg->exit = simd_skcipher_exit; + + alg->setkey = simd_skcipher_setkey; + alg->encrypt = simd_skcipher_encrypt; + alg->decrypt = simd_skcipher_decrypt; + + err = crypto_register_skcipher(alg); + if (err) + goto out_free_salg; + +out_put_tfm: + crypto_free_skcipher(tfm); + return salg; + +out_free_salg: + kfree(salg); + salg = ERR_PTR(err); + goto out_put_tfm; +} +EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); + +struct simd_skcipher_alg *simd_skcipher_create(const char *algname, + const char *basename) +{ + char drvname[CRYPTO_MAX_ALG_NAME]; + + if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= + CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-ENAMETOOLONG); + + return simd_skcipher_create_compat(algname, drvname, basename); +} +EXPORT_SYMBOL_GPL(simd_skcipher_create); + +void simd_skcipher_free(struct simd_skcipher_alg *salg) +{ + crypto_unregister_skcipher(&salg->alg); + kfree(salg); +} +EXPORT_SYMBOL_GPL(simd_skcipher_free); + +MODULE_LICENSE("GPL"); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index f7d0018dcaee..014af741fc6a 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -14,9 +14,13 @@ * */ +#include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <linux/bug.h> #include <linux/cryptouser.h> +#include <linux/compiler.h> +#include <linux/list.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/seq_file.h> @@ -24,6 +28,545 @@ #include "internal.h" +enum { + SKCIPHER_WALK_PHYS = 1 << 0, + SKCIPHER_WALK_SLOW = 1 << 1, + SKCIPHER_WALK_COPY = 1 << 2, + SKCIPHER_WALK_DIFF = 1 << 3, + SKCIPHER_WALK_SLEEP = 1 << 4, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[]; +}; + +static int skcipher_walk_next(struct skcipher_walk *walk); + +static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) +{ + if (PageHighMem(scatterwalk_page(walk))) + kunmap_atomic(vaddr); +} + +static inline void *skcipher_map(struct scatter_walk *walk) +{ + struct page *page = scatterwalk_page(walk); + + return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + + offset_in_page(walk->offset); +} + +static inline void skcipher_map_src(struct skcipher_walk *walk) +{ + walk->src.virt.addr = skcipher_map(&walk->in); +} + +static inline void skcipher_map_dst(struct skcipher_walk *walk) +{ + walk->dst.virt.addr = skcipher_map(&walk->out); +} + +static inline void skcipher_unmap_src(struct skcipher_walk *walk) +{ + skcipher_unmap(&walk->in, walk->src.virt.addr); +} + +static inline void skcipher_unmap_dst(struct skcipher_walk *walk) +{ + skcipher_unmap(&walk->out, walk->dst.virt.addr); +} + +static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) +{ + return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; +} + +/* Get a spot of the specified length that does not straddle a page. + * The caller needs to ensure that there is enough space for this operation. + */ +static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) +{ + u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); + + return max(start, end_page); +} + +static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) +{ + u8 *addr; + + addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); + addr = skcipher_get_spot(addr, bsize); + scatterwalk_copychunks(addr, &walk->out, bsize, + (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); + return 0; +} + +int skcipher_walk_done(struct skcipher_walk *walk, int err) +{ + unsigned int n = walk->nbytes - err; + unsigned int nbytes; + + nbytes = walk->total - n; + + if (unlikely(err < 0)) { + nbytes = 0; + n = 0; + } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | + SKCIPHER_WALK_SLOW | + SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF)))) { +unmap_src: + skcipher_unmap_src(walk); + } else if (walk->flags & SKCIPHER_WALK_DIFF) { + skcipher_unmap_dst(walk); + goto unmap_src; + } else if (walk->flags & SKCIPHER_WALK_COPY) { + skcipher_map_dst(walk); + memcpy(walk->dst.virt.addr, walk->page, n); + skcipher_unmap_dst(walk); + } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { + if (WARN_ON(err)) { + err = -EINVAL; + nbytes = 0; + } else + n = skcipher_done_slow(walk, n); + } + + if (err > 0) + err = 0; + + walk->total = nbytes; + walk->nbytes = nbytes; + + scatterwalk_advance(&walk->in, n); + scatterwalk_advance(&walk->out, n); + scatterwalk_done(&walk->in, 0, nbytes); + scatterwalk_done(&walk->out, 1, nbytes); + + if (nbytes) { + crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? + CRYPTO_TFM_REQ_MAY_SLEEP : 0); + return skcipher_walk_next(walk); + } + + /* Short-circuit for the common/fast path. */ + if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) + goto out; + + if (walk->flags & SKCIPHER_WALK_PHYS) + goto out; + + if (walk->iv != walk->oiv) + memcpy(walk->oiv, walk->iv, walk->ivsize); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); + +out: + return err; +} +EXPORT_SYMBOL_GPL(skcipher_walk_done); + +void skcipher_walk_complete(struct skcipher_walk *walk, int err) +{ + struct skcipher_walk_buffer *p, *tmp; + + list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { + u8 *data; + + if (err) + goto done; + + data = p->data; + if (!data) { + data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); + data = skcipher_get_spot(data, walk->stride); + } + + scatterwalk_copychunks(data, &p->dst, p->len, 1); + + if (offset_in_page(p->data) + p->len + walk->stride > + PAGE_SIZE) + free_page((unsigned long)p->data); + +done: + list_del(&p->entry); + kfree(p); + } + + if (!err && walk->iv != walk->oiv) + memcpy(walk->oiv, walk->iv, walk->ivsize); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); +} +EXPORT_SYMBOL_GPL(skcipher_walk_complete); + +static void skcipher_queue_write(struct skcipher_walk *walk, + struct skcipher_walk_buffer *p) +{ + p->dst = walk->out; + list_add_tail(&p->entry, &walk->buffers); +} + +static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) +{ + bool phys = walk->flags & SKCIPHER_WALK_PHYS; + unsigned alignmask = walk->alignmask; + struct skcipher_walk_buffer *p; + unsigned a; + unsigned n; + u8 *buffer; + void *v; + + if (!phys) { + if (!walk->buffer) + walk->buffer = walk->page; + buffer = walk->buffer; + if (buffer) + goto ok; + } + + /* Start with the minimum alignment of kmalloc. */ + a = crypto_tfm_ctx_alignment() - 1; + n = bsize; + + if (phys) { + /* Calculate the minimum alignment of p->buffer. */ + a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; + n += sizeof(*p); + } + + /* Minimum size to align p->buffer by alignmask. */ + n += alignmask & ~a; + + /* Minimum size to ensure p->buffer does not straddle a page. */ + n += (bsize - 1) & ~(alignmask | a); + + v = kzalloc(n, skcipher_walk_gfp(walk)); + if (!v) + return skcipher_walk_done(walk, -ENOMEM); + + if (phys) { + p = v; + p->len = bsize; + skcipher_queue_write(walk, p); + buffer = p->buffer; + } else { + walk->buffer = v; + buffer = v; + } + +ok: + walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); + walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); + walk->src.virt.addr = walk->dst.virt.addr; + + scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); + + walk->nbytes = bsize; + walk->flags |= SKCIPHER_WALK_SLOW; + + return 0; +} + +static int skcipher_next_copy(struct skcipher_walk *walk) +{ + struct skcipher_walk_buffer *p; + u8 *tmp = walk->page; + + skcipher_map_src(walk); + memcpy(tmp, walk->src.virt.addr, walk->nbytes); + skcipher_unmap_src(walk); + + walk->src.virt.addr = tmp; + walk->dst.virt.addr = tmp; + + if (!(walk->flags & SKCIPHER_WALK_PHYS)) + return 0; + + p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); + if (!p) + return -ENOMEM; + + p->data = walk->page; + p->len = walk->nbytes; + skcipher_queue_write(walk, p); + + if (offset_in_page(walk->page) + walk->nbytes + walk->stride > + PAGE_SIZE) + walk->page = NULL; + else + walk->page += walk->nbytes; + + return 0; +} + +static int skcipher_next_fast(struct skcipher_walk *walk) +{ + unsigned long diff; + + walk->src.phys.page = scatterwalk_page(&walk->in); + walk->src.phys.offset = offset_in_page(walk->in.offset); + walk->dst.phys.page = scatterwalk_page(&walk->out); + walk->dst.phys.offset = offset_in_page(walk->out.offset); + + if (walk->flags & SKCIPHER_WALK_PHYS) + return 0; + + diff = walk->src.phys.offset - walk->dst.phys.offset; + diff |= walk->src.virt.page - walk->dst.virt.page; + + skcipher_map_src(walk); + walk->dst.virt.addr = walk->src.virt.addr; + + if (diff) { + walk->flags |= SKCIPHER_WALK_DIFF; + skcipher_map_dst(walk); + } + + return 0; +} + +static int skcipher_walk_next(struct skcipher_walk *walk) +{ + unsigned int bsize; + unsigned int n; + int err; + + walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF); + + n = walk->total; + bsize = min(walk->stride, max(n, walk->blocksize)); + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + + if (unlikely(n < bsize)) { + if (unlikely(walk->total < walk->blocksize)) + return skcipher_walk_done(walk, -EINVAL); + +slow_path: + err = skcipher_next_slow(walk, bsize); + goto set_phys_lowmem; + } + + if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { + if (!walk->page) { + gfp_t gfp = skcipher_walk_gfp(walk); + + walk->page = (void *)__get_free_page(gfp); + if (!walk->page) + goto slow_path; + } + + walk->nbytes = min_t(unsigned, n, + PAGE_SIZE - offset_in_page(walk->page)); + walk->flags |= SKCIPHER_WALK_COPY; + err = skcipher_next_copy(walk); + goto set_phys_lowmem; + } + + walk->nbytes = n; + + return skcipher_next_fast(walk); + +set_phys_lowmem: + if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { + walk->src.phys.page = virt_to_page(walk->src.virt.addr); + walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); + walk->src.phys.offset &= PAGE_SIZE - 1; + walk->dst.phys.offset &= PAGE_SIZE - 1; + } + return err; +} +EXPORT_SYMBOL_GPL(skcipher_walk_next); + +static int skcipher_copy_iv(struct skcipher_walk *walk) +{ + unsigned a = crypto_tfm_ctx_alignment() - 1; + unsigned alignmask = walk->alignmask; + unsigned ivsize = walk->ivsize; + unsigned bs = walk->stride; + unsigned aligned_bs; + unsigned size; + u8 *iv; + + aligned_bs = ALIGN(bs, alignmask); + + /* Minimum size to align buffer by alignmask. */ + size = alignmask & ~a; + + if (walk->flags & SKCIPHER_WALK_PHYS) + size += ivsize; + else { + size += aligned_bs + ivsize; + + /* Minimum size to ensure buffer does not straddle a page. */ + size += (bs - 1) & ~(alignmask | a); + } + + walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); + if (!walk->buffer) + return -ENOMEM; + + iv = PTR_ALIGN(walk->buffer, alignmask + 1); + iv = skcipher_get_spot(iv, bs) + aligned_bs; + + walk->iv = memcpy(iv, walk->iv, walk->ivsize); + return 0; +} + +static int skcipher_walk_first(struct skcipher_walk *walk) +{ + walk->nbytes = 0; + + if (WARN_ON_ONCE(in_irq())) + return -EDEADLK; + + if (unlikely(!walk->total)) + return 0; + + walk->buffer = NULL; + if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { + int err = skcipher_copy_iv(walk); + if (err) + return err; + } + + walk->page = NULL; + walk->nbytes = walk->total; + + return skcipher_walk_next(walk); +} + +static int skcipher_walk_skcipher(struct skcipher_walk *walk, + struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + scatterwalk_start(&walk->in, req->src); + scatterwalk_start(&walk->out, req->dst); + + walk->total = req->cryptlen; + walk->iv = req->iv; + walk->oiv = req->iv; + + walk->flags &= ~SKCIPHER_WALK_SLEEP; + walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + SKCIPHER_WALK_SLEEP : 0; + + walk->blocksize = crypto_skcipher_blocksize(tfm); + walk->stride = crypto_skcipher_walksize(tfm); + walk->ivsize = crypto_skcipher_ivsize(tfm); + walk->alignmask = crypto_skcipher_alignmask(tfm); + + return skcipher_walk_first(walk); +} + +int skcipher_walk_virt(struct skcipher_walk *walk, + struct skcipher_request *req, bool atomic) +{ + int err; + + walk->flags &= ~SKCIPHER_WALK_PHYS; + + err = skcipher_walk_skcipher(walk, req); + + walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; + + return err; +} +EXPORT_SYMBOL_GPL(skcipher_walk_virt); + +void skcipher_walk_atomise(struct skcipher_walk *walk) +{ + walk->flags &= ~SKCIPHER_WALK_SLEEP; +} +EXPORT_SYMBOL_GPL(skcipher_walk_atomise); + +int skcipher_walk_async(struct skcipher_walk *walk, + struct skcipher_request *req) +{ + walk->flags |= SKCIPHER_WALK_PHYS; + + INIT_LIST_HEAD(&walk->buffers); + + return skcipher_walk_skcipher(walk, req); +} +EXPORT_SYMBOL_GPL(skcipher_walk_async); + +static int skcipher_walk_aead_common(struct skcipher_walk *walk, + struct aead_request *req, bool atomic) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + int err; + + walk->flags &= ~SKCIPHER_WALK_PHYS; + + scatterwalk_start(&walk->in, req->src); + scatterwalk_start(&walk->out, req->dst); + + scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); + scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); + + walk->iv = req->iv; + walk->oiv = req->iv; + + if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) + walk->flags |= SKCIPHER_WALK_SLEEP; + else + walk->flags &= ~SKCIPHER_WALK_SLEEP; + + walk->blocksize = crypto_aead_blocksize(tfm); + walk->stride = crypto_aead_chunksize(tfm); + walk->ivsize = crypto_aead_ivsize(tfm); + walk->alignmask = crypto_aead_alignmask(tfm); + + err = skcipher_walk_first(walk); + + if (atomic) + walk->flags &= ~SKCIPHER_WALK_SLEEP; + + return err; +} + +int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, + bool atomic) +{ + walk->total = req->cryptlen; + + return skcipher_walk_aead_common(walk, req, atomic); +} +EXPORT_SYMBOL_GPL(skcipher_walk_aead); + +int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic) +{ + walk->total = req->cryptlen; + + return skcipher_walk_aead_common(walk, req, atomic); +} +EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); + +int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + + walk->total = req->cryptlen - crypto_aead_authsize(tfm); + + return skcipher_walk_aead_common(walk, req, atomic); +} +EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); + static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) { if (alg->cra_type == &crypto_blkcipher_type) @@ -265,7 +808,7 @@ static void crypto_skcipher_free_instance(struct crypto_instance *inst) } static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); + __maybe_unused; static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) { struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, @@ -279,6 +822,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); seq_printf(m, "ivsize : %u\n", skcipher->ivsize); seq_printf(m, "chunksize : %u\n", skcipher->chunksize); + seq_printf(m, "walksize : %u\n", skcipher->walksize); } #ifdef CONFIG_NET @@ -351,11 +895,14 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg) { struct crypto_alg *base = &alg->base; - if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) + if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || + alg->walksize > PAGE_SIZE / 8) return -EINVAL; if (!alg->chunksize) alg->chunksize = base->cra_blocksize; + if (!alg->walksize) + alg->walksize = alg->chunksize; base->cra_type = &crypto_skcipher_type2; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ae22f05d5936..9a11f3c2bf98 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -22,6 +22,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <crypto/aead.h> #include <crypto/hash.h> #include <crypto/skcipher.h> @@ -1010,6 +1012,8 @@ static inline int tcrypt_test(const char *alg) { int ret; + pr_debug("testing %s\n", alg); + ret = alg_test(alg, alg, 0, 0); /* non-fips algs return -EINVAL in fips mode */ if (fips_enabled && ret == -EINVAL) @@ -2059,6 +2063,8 @@ static int __init tcrypt_mod_init(void) if (err) { printk(KERN_ERR "tcrypt: one or more tests failed!\n"); goto err_free_tv; + } else { + pr_debug("all tests passed\n"); } /* We intentionaly return -EAGAIN to prevent keeping the module, diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 62dffa0028ac..f9c378af3907 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -33,6 +33,7 @@ #include <crypto/drbg.h> #include <crypto/akcipher.h> #include <crypto/kpp.h> +#include <crypto/acompress.h> #include "internal.h" @@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) */ #define IDX1 32 #define IDX2 32400 -#define IDX3 1 +#define IDX3 1511 #define IDX4 8193 #define IDX5 22222 #define IDX6 17101 @@ -264,6 +265,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, const int align_offset) { const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); + size_t digest_size = crypto_ahash_digestsize(tfm); unsigned int i, j, k, temp; struct scatterlist sg[8]; char *result; @@ -274,7 +276,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, char *xbuf[XBUFSIZE]; int ret = -ENOMEM; - result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL); + result = kmalloc(digest_size, GFP_KERNEL); if (!result) return ret; key = kmalloc(MAX_KEYLEN, GFP_KERNEL); @@ -304,7 +306,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, goto out; j++; - memset(result, 0, MAX_DIGEST_SIZE); + memset(result, 0, digest_size); hash_buff = xbuf[0]; hash_buff += align_offset; @@ -379,7 +381,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, continue; j++; - memset(result, 0, MAX_DIGEST_SIZE); + memset(result, 0, digest_size); temp = 0; sg_init_table(sg, template[i].np); @@ -457,7 +459,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, continue; j++; - memset(result, 0, MAX_DIGEST_SIZE); + memset(result, 0, digest_size); ret = -EINVAL; hash_buff = xbuf[0]; @@ -1442,6 +1444,150 @@ out: return ret; } +static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, + struct comp_testvec *dtemplate, int ctcount, int dtcount) +{ + const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); + unsigned int i; + char *output; + int ret; + struct scatterlist src, dst; + struct acomp_req *req; + struct tcrypt_result result; + + output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); + if (!output) + return -ENOMEM; + + for (i = 0; i < ctcount; i++) { + unsigned int dlen = COMP_BUF_SIZE; + int ilen = ctemplate[i].inlen; + void *input_vec; + + input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL); + if (!input_vec) { + ret = -ENOMEM; + goto out; + } + + memset(output, 0, dlen); + init_completion(&result.completion); + sg_init_one(&src, input_vec, ilen); + sg_init_one(&dst, output, dlen); + + req = acomp_request_alloc(tfm); + if (!req) { + pr_err("alg: acomp: request alloc failed for %s\n", + algo); + kfree(input_vec); + ret = -ENOMEM; + goto out; + } + + acomp_request_set_params(req, &src, &dst, ilen, dlen); + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); + + ret = wait_async_op(&result, crypto_acomp_compress(req)); + if (ret) { + pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", + i + 1, algo, -ret); + kfree(input_vec); + acomp_request_free(req); + goto out; + } + + if (req->dlen != ctemplate[i].outlen) { + pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", + i + 1, algo, req->dlen); + ret = -EINVAL; + kfree(input_vec); + acomp_request_free(req); + goto out; + } + + if (memcmp(output, ctemplate[i].output, req->dlen)) { + pr_err("alg: acomp: Compression test %d failed for %s\n", + i + 1, algo); + hexdump(output, req->dlen); + ret = -EINVAL; + kfree(input_vec); + acomp_request_free(req); + goto out; + } + + kfree(input_vec); + acomp_request_free(req); + } + + for (i = 0; i < dtcount; i++) { + unsigned int dlen = COMP_BUF_SIZE; + int ilen = dtemplate[i].inlen; + void *input_vec; + + input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL); + if (!input_vec) { + ret = -ENOMEM; + goto out; + } + + memset(output, 0, dlen); + init_completion(&result.completion); + sg_init_one(&src, input_vec, ilen); + sg_init_one(&dst, output, dlen); + + req = acomp_request_alloc(tfm); + if (!req) { + pr_err("alg: acomp: request alloc failed for %s\n", + algo); + kfree(input_vec); + ret = -ENOMEM; + goto out; + } + + acomp_request_set_params(req, &src, &dst, ilen, dlen); + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); + + ret = wait_async_op(&result, crypto_acomp_decompress(req)); + if (ret) { + pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", + i + 1, algo, -ret); + kfree(input_vec); + acomp_request_free(req); + goto out; + } + + if (req->dlen != dtemplate[i].outlen) { + pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n", + i + 1, algo, req->dlen); + ret = -EINVAL; + kfree(input_vec); + acomp_request_free(req); + goto out; + } + + if (memcmp(output, dtemplate[i].output, req->dlen)) { + pr_err("alg: acomp: Decompression test %d failed for %s\n", + i + 1, algo); + hexdump(output, req->dlen); + ret = -EINVAL; + kfree(input_vec); + acomp_request_free(req); + goto out; + } + + kfree(input_vec); + acomp_request_free(req); + } + + ret = 0; + +out: + kfree(output); + return ret; +} + static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, unsigned int tcount) { @@ -1509,7 +1655,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, struct crypto_aead *tfm; int err = 0; - tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask); + tfm = crypto_alloc_aead(driver, type, mask); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: aead: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); @@ -1538,7 +1684,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc, struct crypto_cipher *tfm; int err = 0; - tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask); + tfm = crypto_alloc_cipher(driver, type, mask); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: cipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); @@ -1567,7 +1713,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, struct crypto_skcipher *tfm; int err = 0; - tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); + tfm = crypto_alloc_skcipher(driver, type, mask); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: skcipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); @@ -1593,22 +1739,38 @@ out: static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { - struct crypto_comp *tfm; + struct crypto_comp *comp; + struct crypto_acomp *acomp; int err; + u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK; + + if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { + acomp = crypto_alloc_acomp(driver, type, mask); + if (IS_ERR(acomp)) { + pr_err("alg: acomp: Failed to load transform for %s: %ld\n", + driver, PTR_ERR(acomp)); + return PTR_ERR(acomp); + } + err = test_acomp(acomp, desc->suite.comp.comp.vecs, + desc->suite.comp.decomp.vecs, + desc->suite.comp.comp.count, + desc->suite.comp.decomp.count); + crypto_free_acomp(acomp); + } else { + comp = crypto_alloc_comp(driver, type, mask); + if (IS_ERR(comp)) { + pr_err("alg: comp: Failed to load transform for %s: %ld\n", + driver, PTR_ERR(comp)); + return PTR_ERR(comp); + } - tfm = crypto_alloc_comp(driver, type, mask); - if (IS_ERR(tfm)) { - printk(KERN_ERR "alg: comp: Failed to load transform for %s: " - "%ld\n", driver, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - err = test_comp(tfm, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); + err = test_comp(comp, desc->suite.comp.comp.vecs, + desc->suite.comp.decomp.vecs, + desc->suite.comp.comp.count, + desc->suite.comp.decomp.count); - crypto_free_comp(tfm); + crypto_free_comp(comp); + } return err; } @@ -1618,7 +1780,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, struct crypto_ahash *tfm; int err; - tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask); + tfm = crypto_alloc_ahash(driver, type, mask); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: hash: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); @@ -1646,7 +1808,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, if (err) goto out; - tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask); + tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); @@ -1688,7 +1850,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, struct crypto_rng *rng; int err; - rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask); + rng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(rng)) { printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(rng)); @@ -1715,7 +1877,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr, if (!buf) return -ENOMEM; - drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask); + drng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(drng)) { printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); @@ -1909,7 +2071,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, struct crypto_kpp *tfm; int err = 0; - tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask); + tfm = crypto_alloc_kpp(driver, type, mask); if (IS_ERR(tfm)) { pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); @@ -2068,7 +2230,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, struct crypto_akcipher *tfm; int err = 0; - tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); + tfm = crypto_alloc_akcipher(driver, type, mask); if (IS_ERR(tfm)) { pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); @@ -2088,112 +2250,23 @@ static int alg_test_null(const struct alg_test_desc *desc, return 0; } +#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) } + /* Please keep this list sorted by algorithm name. */ static const struct alg_test_desc alg_test_descs[] = { { - .alg = "__cbc-cast5-avx", - .test = alg_test_null, - }, { - .alg = "__cbc-cast6-avx", - .test = alg_test_null, - }, { - .alg = "__cbc-serpent-avx", - .test = alg_test_null, - }, { - .alg = "__cbc-serpent-avx2", - .test = alg_test_null, - }, { - .alg = "__cbc-serpent-sse2", - .test = alg_test_null, - }, { - .alg = "__cbc-twofish-avx", - .test = alg_test_null, - }, { - .alg = "__driver-cbc-aes-aesni", - .test = alg_test_null, - .fips_allowed = 1, - }, { - .alg = "__driver-cbc-camellia-aesni", - .test = alg_test_null, - }, { - .alg = "__driver-cbc-camellia-aesni-avx2", - .test = alg_test_null, - }, { - .alg = "__driver-cbc-cast5-avx", - .test = alg_test_null, - }, { - .alg = "__driver-cbc-cast6-avx", - .test = alg_test_null, - }, { - .alg = "__driver-cbc-serpent-avx", - .test = alg_test_null, - }, { - .alg = "__driver-cbc-serpent-avx2", - .test = alg_test_null, - }, { - .alg = "__driver-cbc-serpent-sse2", - .test = alg_test_null, - }, { - .alg = "__driver-cbc-twofish-avx", - .test = alg_test_null, - }, { - .alg = "__driver-ecb-aes-aesni", - .test = alg_test_null, - .fips_allowed = 1, - }, { - .alg = "__driver-ecb-camellia-aesni", - .test = alg_test_null, - }, { - .alg = "__driver-ecb-camellia-aesni-avx2", - .test = alg_test_null, - }, { - .alg = "__driver-ecb-cast5-avx", - .test = alg_test_null, - }, { - .alg = "__driver-ecb-cast6-avx", - .test = alg_test_null, - }, { - .alg = "__driver-ecb-serpent-avx", - .test = alg_test_null, - }, { - .alg = "__driver-ecb-serpent-avx2", - .test = alg_test_null, - }, { - .alg = "__driver-ecb-serpent-sse2", - .test = alg_test_null, - }, { - .alg = "__driver-ecb-twofish-avx", - .test = alg_test_null, - }, { - .alg = "__driver-gcm-aes-aesni", - .test = alg_test_null, - .fips_allowed = 1, - }, { - .alg = "__ghash-pclmulqdqni", - .test = alg_test_null, - .fips_allowed = 1, - }, { .alg = "ansi_cprng", .test = alg_test_cprng, .suite = { - .cprng = { - .vecs = ansi_cprng_aes_tv_template, - .count = ANSI_CPRNG_AES_TEST_VECTORS - } + .cprng = __VECS(ansi_cprng_aes_tv_template) } }, { .alg = "authenc(hmac(md5),ecb(cipher_null))", .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = hmac_md5_ecb_cipher_null_enc_tv_template, - .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS - }, - .dec = { - .vecs = hmac_md5_ecb_cipher_null_dec_tv_template, - .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS - } + .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template), + .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template) } } }, { @@ -2201,12 +2274,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha1_aes_cbc_enc_tv_temp, - .count = - HMAC_SHA1_AES_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp) } } }, { @@ -2214,12 +2282,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha1_des_cbc_enc_tv_temp, - .count = - HMAC_SHA1_DES_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp) } } }, { @@ -2228,12 +2291,7 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha1_des3_ede_cbc_enc_tv_temp, - .count = - HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp) } } }, { @@ -2245,18 +2303,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha1_ecb_cipher_null_enc_tv_temp, - .count = - HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC - }, - .dec = { - .vecs = - hmac_sha1_ecb_cipher_null_dec_tv_temp, - .count = - HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC - } + .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp), + .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp) } } }, { @@ -2268,12 +2316,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha224_des_cbc_enc_tv_temp, - .count = - HMAC_SHA224_DES_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp) } } }, { @@ -2282,12 +2325,7 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha224_des3_ede_cbc_enc_tv_temp, - .count = - HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp) } } }, { @@ -2296,12 +2334,7 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha256_aes_cbc_enc_tv_temp, - .count = - HMAC_SHA256_AES_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp) } } }, { @@ -2309,12 +2342,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha256_des_cbc_enc_tv_temp, - .count = - HMAC_SHA256_DES_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp) } } }, { @@ -2323,12 +2351,7 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha256_des3_ede_cbc_enc_tv_temp, - .count = - HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp) } } }, { @@ -2344,12 +2367,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha384_des_cbc_enc_tv_temp, - .count = - HMAC_SHA384_DES_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp) } } }, { @@ -2358,12 +2376,7 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha384_des3_ede_cbc_enc_tv_temp, - .count = - HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp) } } }, { @@ -2380,12 +2393,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha512_aes_cbc_enc_tv_temp, - .count = - HMAC_SHA512_AES_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp) } } }, { @@ -2393,12 +2401,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha512_des_cbc_enc_tv_temp, - .count = - HMAC_SHA512_DES_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp) } } }, { @@ -2407,12 +2410,7 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = - hmac_sha512_des3_ede_cbc_enc_tv_temp, - .count = - HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC - } + .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp) } } }, { @@ -2429,14 +2427,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = aes_cbc_enc_tv_template, - .count = AES_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_cbc_dec_tv_template, - .count = AES_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(aes_cbc_enc_tv_template), + .dec = __VECS(aes_cbc_dec_tv_template) } } }, { @@ -2444,14 +2436,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = anubis_cbc_enc_tv_template, - .count = ANUBIS_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = anubis_cbc_dec_tv_template, - .count = ANUBIS_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(anubis_cbc_enc_tv_template), + .dec = __VECS(anubis_cbc_dec_tv_template) } } }, { @@ -2459,14 +2445,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = bf_cbc_enc_tv_template, - .count = BF_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = bf_cbc_dec_tv_template, - .count = BF_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(bf_cbc_enc_tv_template), + .dec = __VECS(bf_cbc_dec_tv_template) } } }, { @@ -2474,14 +2454,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = camellia_cbc_enc_tv_template, - .count = CAMELLIA_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = camellia_cbc_dec_tv_template, - .count = CAMELLIA_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(camellia_cbc_enc_tv_template), + .dec = __VECS(camellia_cbc_dec_tv_template) } } }, { @@ -2489,14 +2463,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cast5_cbc_enc_tv_template, - .count = CAST5_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cast5_cbc_dec_tv_template, - .count = CAST5_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(cast5_cbc_enc_tv_template), + .dec = __VECS(cast5_cbc_dec_tv_template) } } }, { @@ -2504,14 +2472,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cast6_cbc_enc_tv_template, - .count = CAST6_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cast6_cbc_dec_tv_template, - .count = CAST6_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(cast6_cbc_enc_tv_template), + .dec = __VECS(cast6_cbc_dec_tv_template) } } }, { @@ -2519,14 +2481,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = des_cbc_enc_tv_template, - .count = DES_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = des_cbc_dec_tv_template, - .count = DES_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(des_cbc_enc_tv_template), + .dec = __VECS(des_cbc_dec_tv_template) } } }, { @@ -2535,14 +2491,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = des3_ede_cbc_enc_tv_template, - .count = DES3_EDE_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = des3_ede_cbc_dec_tv_template, - .count = DES3_EDE_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(des3_ede_cbc_enc_tv_template), + .dec = __VECS(des3_ede_cbc_dec_tv_template) } } }, { @@ -2550,14 +2500,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = serpent_cbc_enc_tv_template, - .count = SERPENT_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = serpent_cbc_dec_tv_template, - .count = SERPENT_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(serpent_cbc_enc_tv_template), + .dec = __VECS(serpent_cbc_dec_tv_template) } } }, { @@ -2565,30 +2509,25 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = tf_cbc_enc_tv_template, - .count = TF_CBC_ENC_TEST_VECTORS - }, - .dec = { - .vecs = tf_cbc_dec_tv_template, - .count = TF_CBC_DEC_TEST_VECTORS - } + .enc = __VECS(tf_cbc_enc_tv_template), + .dec = __VECS(tf_cbc_dec_tv_template) } } }, { + .alg = "cbcmac(aes)", + .fips_allowed = 1, + .test = alg_test_hash, + .suite = { + .hash = __VECS(aes_cbcmac_tv_template) + } + }, { .alg = "ccm(aes)", .test = alg_test_aead, .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = aes_ccm_enc_tv_template, - .count = AES_CCM_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_ccm_dec_tv_template, - .count = AES_CCM_DEC_TEST_VECTORS - } + .enc = __VECS(aes_ccm_enc_tv_template), + .dec = __VECS(aes_ccm_dec_tv_template) } } }, { @@ -2596,14 +2535,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = chacha20_enc_tv_template, - .count = CHACHA20_ENC_TEST_VECTORS - }, - .dec = { - .vecs = chacha20_enc_tv_template, - .count = CHACHA20_ENC_TEST_VECTORS - }, + .enc = __VECS(chacha20_enc_tv_template), + .dec = __VECS(chacha20_enc_tv_template), } } }, { @@ -2611,20 +2544,14 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_hash, .suite = { - .hash = { - .vecs = aes_cmac128_tv_template, - .count = CMAC_AES_TEST_VECTORS - } + .hash = __VECS(aes_cmac128_tv_template) } }, { .alg = "cmac(des3_ede)", .fips_allowed = 1, .test = alg_test_hash, .suite = { - .hash = { - .vecs = des3_ede_cmac64_tv_template, - .count = CMAC_DES3_EDE_TEST_VECTORS - } + .hash = __VECS(des3_ede_cmac64_tv_template) } }, { .alg = "compress_null", @@ -2633,94 +2560,30 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "crc32", .test = alg_test_hash, .suite = { - .hash = { - .vecs = crc32_tv_template, - .count = CRC32_TEST_VECTORS - } + .hash = __VECS(crc32_tv_template) } }, { .alg = "crc32c", .test = alg_test_crc32c, .fips_allowed = 1, .suite = { - .hash = { - .vecs = crc32c_tv_template, - .count = CRC32C_TEST_VECTORS - } + .hash = __VECS(crc32c_tv_template) } }, { .alg = "crct10dif", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = crct10dif_tv_template, - .count = CRCT10DIF_TEST_VECTORS - } + .hash = __VECS(crct10dif_tv_template) } }, { - .alg = "cryptd(__driver-cbc-aes-aesni)", - .test = alg_test_null, - .fips_allowed = 1, - }, { - .alg = "cryptd(__driver-cbc-camellia-aesni)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-cbc-serpent-avx2)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-aes-aesni)", - .test = alg_test_null, - .fips_allowed = 1, - }, { - .alg = "cryptd(__driver-ecb-camellia-aesni)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-cast5-avx)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-cast6-avx)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-serpent-avx)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-serpent-avx2)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-serpent-sse2)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-twofish-avx)", - .test = alg_test_null, - }, { - .alg = "cryptd(__driver-gcm-aes-aesni)", - .test = alg_test_null, - .fips_allowed = 1, - }, { - .alg = "cryptd(__ghash-pclmulqdqni)", - .test = alg_test_null, - .fips_allowed = 1, - }, { .alg = "ctr(aes)", .test = alg_test_skcipher, .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = aes_ctr_enc_tv_template, - .count = AES_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_ctr_dec_tv_template, - .count = AES_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(aes_ctr_enc_tv_template), + .dec = __VECS(aes_ctr_dec_tv_template) } } }, { @@ -2728,14 +2591,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = bf_ctr_enc_tv_template, - .count = BF_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = bf_ctr_dec_tv_template, - .count = BF_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(bf_ctr_enc_tv_template), + .dec = __VECS(bf_ctr_dec_tv_template) } } }, { @@ -2743,14 +2600,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = camellia_ctr_enc_tv_template, - .count = CAMELLIA_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = camellia_ctr_dec_tv_template, - .count = CAMELLIA_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(camellia_ctr_enc_tv_template), + .dec = __VECS(camellia_ctr_dec_tv_template) } } }, { @@ -2758,14 +2609,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cast5_ctr_enc_tv_template, - .count = CAST5_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cast5_ctr_dec_tv_template, - .count = CAST5_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(cast5_ctr_enc_tv_template), + .dec = __VECS(cast5_ctr_dec_tv_template) } } }, { @@ -2773,14 +2618,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cast6_ctr_enc_tv_template, - .count = CAST6_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cast6_ctr_dec_tv_template, - .count = CAST6_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(cast6_ctr_enc_tv_template), + .dec = __VECS(cast6_ctr_dec_tv_template) } } }, { @@ -2788,14 +2627,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = des_ctr_enc_tv_template, - .count = DES_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = des_ctr_dec_tv_template, - .count = DES_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(des_ctr_enc_tv_template), + .dec = __VECS(des_ctr_dec_tv_template) } } }, { @@ -2803,14 +2636,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = des3_ede_ctr_enc_tv_template, - .count = DES3_EDE_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = des3_ede_ctr_dec_tv_template, - .count = DES3_EDE_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(des3_ede_ctr_enc_tv_template), + .dec = __VECS(des3_ede_ctr_dec_tv_template) } } }, { @@ -2818,14 +2645,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = serpent_ctr_enc_tv_template, - .count = SERPENT_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = serpent_ctr_dec_tv_template, - .count = SERPENT_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(serpent_ctr_enc_tv_template), + .dec = __VECS(serpent_ctr_dec_tv_template) } } }, { @@ -2833,14 +2654,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = tf_ctr_enc_tv_template, - .count = TF_CTR_ENC_TEST_VECTORS - }, - .dec = { - .vecs = tf_ctr_dec_tv_template, - .count = TF_CTR_DEC_TEST_VECTORS - } + .enc = __VECS(tf_ctr_enc_tv_template), + .dec = __VECS(tf_ctr_dec_tv_template) } } }, { @@ -2848,14 +2663,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cts_mode_enc_tv_template, - .count = CTS_MODE_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cts_mode_dec_tv_template, - .count = CTS_MODE_DEC_TEST_VECTORS - } + .enc = __VECS(cts_mode_enc_tv_template), + .dec = __VECS(cts_mode_dec_tv_template) } } }, { @@ -2864,14 +2673,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .comp = { - .comp = { - .vecs = deflate_comp_tv_template, - .count = DEFLATE_COMP_TEST_VECTORS - }, - .decomp = { - .vecs = deflate_decomp_tv_template, - .count = DEFLATE_DECOMP_TEST_VECTORS - } + .comp = __VECS(deflate_comp_tv_template), + .decomp = __VECS(deflate_decomp_tv_template) } } }, { @@ -2879,10 +2682,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_kpp, .fips_allowed = 1, .suite = { - .kpp = { - .vecs = dh_tv_template, - .count = DH_TEST_VECTORS - } + .kpp = __VECS(dh_tv_template) } }, { .alg = "digest_null", @@ -2892,30 +2692,21 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_drbg, .fips_allowed = 1, .suite = { - .drbg = { - .vecs = drbg_nopr_ctr_aes128_tv_template, - .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template) - } + .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template) } }, { .alg = "drbg_nopr_ctr_aes192", .test = alg_test_drbg, .fips_allowed = 1, .suite = { - .drbg = { - .vecs = drbg_nopr_ctr_aes192_tv_template, - .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template) - } + .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template) } }, { .alg = "drbg_nopr_ctr_aes256", .test = alg_test_drbg, .fips_allowed = 1, .suite = { - .drbg = { - .vecs = drbg_nopr_ctr_aes256_tv_template, - .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template) - } + .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template) } }, { /* @@ -2930,11 +2721,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_drbg, .fips_allowed = 1, .suite = { - .drbg = { - .vecs = drbg_nopr_hmac_sha256_tv_template, - .count = - ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template) - } + .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template) } }, { /* covered by drbg_nopr_hmac_sha256 test */ @@ -2954,10 +2741,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_drbg, .fips_allowed = 1, .suite = { - .drbg = { - .vecs = drbg_nopr_sha256_tv_template, - .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template) - } + .drbg = __VECS(drbg_nopr_sha256_tv_template) } }, { /* covered by drbg_nopr_sha256 test */ @@ -2973,10 +2757,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_drbg, .fips_allowed = 1, .suite = { - .drbg = { - .vecs = drbg_pr_ctr_aes128_tv_template, - .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template) - } + .drbg = __VECS(drbg_pr_ctr_aes128_tv_template) } }, { /* covered by drbg_pr_ctr_aes128 test */ @@ -2996,10 +2777,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_drbg, .fips_allowed = 1, .suite = { - .drbg = { - .vecs = drbg_pr_hmac_sha256_tv_template, - .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template) - } + .drbg = __VECS(drbg_pr_hmac_sha256_tv_template) } }, { /* covered by drbg_pr_hmac_sha256 test */ @@ -3019,10 +2797,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_drbg, .fips_allowed = 1, .suite = { - .drbg = { - .vecs = drbg_pr_sha256_tv_template, - .count = ARRAY_SIZE(drbg_pr_sha256_tv_template) - } + .drbg = __VECS(drbg_pr_sha256_tv_template) } }, { /* covered by drbg_pr_sha256 test */ @@ -3034,23 +2809,13 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_null, }, { - .alg = "ecb(__aes-aesni)", - .test = alg_test_null, - .fips_allowed = 1, - }, { .alg = "ecb(aes)", .test = alg_test_skcipher, .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = aes_enc_tv_template, - .count = AES_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_dec_tv_template, - .count = AES_DEC_TEST_VECTORS - } + .enc = __VECS(aes_enc_tv_template), + .dec = __VECS(aes_dec_tv_template) } } }, { @@ -3058,14 +2823,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = anubis_enc_tv_template, - .count = ANUBIS_ENC_TEST_VECTORS - }, - .dec = { - .vecs = anubis_dec_tv_template, - .count = ANUBIS_DEC_TEST_VECTORS - } + .enc = __VECS(anubis_enc_tv_template), + .dec = __VECS(anubis_dec_tv_template) } } }, { @@ -3073,14 +2832,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = arc4_enc_tv_template, - .count = ARC4_ENC_TEST_VECTORS - }, - .dec = { - .vecs = arc4_dec_tv_template, - .count = ARC4_DEC_TEST_VECTORS - } + .enc = __VECS(arc4_enc_tv_template), + .dec = __VECS(arc4_dec_tv_template) } } }, { @@ -3088,14 +2841,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = bf_enc_tv_template, - .count = BF_ENC_TEST_VECTORS - }, - .dec = { - .vecs = bf_dec_tv_template, - .count = BF_DEC_TEST_VECTORS - } + .enc = __VECS(bf_enc_tv_template), + .dec = __VECS(bf_dec_tv_template) } } }, { @@ -3103,14 +2850,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = camellia_enc_tv_template, - .count = CAMELLIA_ENC_TEST_VECTORS - }, - .dec = { - .vecs = camellia_dec_tv_template, - .count = CAMELLIA_DEC_TEST_VECTORS - } + .enc = __VECS(camellia_enc_tv_template), + .dec = __VECS(camellia_dec_tv_template) } } }, { @@ -3118,14 +2859,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cast5_enc_tv_template, - .count = CAST5_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cast5_dec_tv_template, - .count = CAST5_DEC_TEST_VECTORS - } + .enc = __VECS(cast5_enc_tv_template), + .dec = __VECS(cast5_dec_tv_template) } } }, { @@ -3133,14 +2868,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cast6_enc_tv_template, - .count = CAST6_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cast6_dec_tv_template, - .count = CAST6_DEC_TEST_VECTORS - } + .enc = __VECS(cast6_enc_tv_template), + .dec = __VECS(cast6_dec_tv_template) } } }, { @@ -3151,14 +2880,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = des_enc_tv_template, - .count = DES_ENC_TEST_VECTORS - }, - .dec = { - .vecs = des_dec_tv_template, - .count = DES_DEC_TEST_VECTORS - } + .enc = __VECS(des_enc_tv_template), + .dec = __VECS(des_dec_tv_template) } } }, { @@ -3167,14 +2890,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = des3_ede_enc_tv_template, - .count = DES3_EDE_ENC_TEST_VECTORS - }, - .dec = { - .vecs = des3_ede_dec_tv_template, - .count = DES3_EDE_DEC_TEST_VECTORS - } + .enc = __VECS(des3_ede_enc_tv_template), + .dec = __VECS(des3_ede_dec_tv_template) } } }, { @@ -3197,14 +2914,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = khazad_enc_tv_template, - .count = KHAZAD_ENC_TEST_VECTORS - }, - .dec = { - .vecs = khazad_dec_tv_template, - .count = KHAZAD_DEC_TEST_VECTORS - } + .enc = __VECS(khazad_enc_tv_template), + .dec = __VECS(khazad_dec_tv_template) } } }, { @@ -3212,14 +2923,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = seed_enc_tv_template, - .count = SEED_ENC_TEST_VECTORS - }, - .dec = { - .vecs = seed_dec_tv_template, - .count = SEED_DEC_TEST_VECTORS - } + .enc = __VECS(seed_enc_tv_template), + .dec = __VECS(seed_dec_tv_template) } } }, { @@ -3227,14 +2932,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = serpent_enc_tv_template, - .count = SERPENT_ENC_TEST_VECTORS - }, - .dec = { - .vecs = serpent_dec_tv_template, - .count = SERPENT_DEC_TEST_VECTORS - } + .enc = __VECS(serpent_enc_tv_template), + .dec = __VECS(serpent_dec_tv_template) } } }, { @@ -3242,14 +2941,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = tea_enc_tv_template, - .count = TEA_ENC_TEST_VECTORS - }, - .dec = { - .vecs = tea_dec_tv_template, - .count = TEA_DEC_TEST_VECTORS - } + .enc = __VECS(tea_enc_tv_template), + .dec = __VECS(tea_dec_tv_template) } } }, { @@ -3257,14 +2950,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = tnepres_enc_tv_template, - .count = TNEPRES_ENC_TEST_VECTORS - }, - .dec = { - .vecs = tnepres_dec_tv_template, - .count = TNEPRES_DEC_TEST_VECTORS - } + .enc = __VECS(tnepres_enc_tv_template), + .dec = __VECS(tnepres_dec_tv_template) } } }, { @@ -3272,14 +2959,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = tf_enc_tv_template, - .count = TF_ENC_TEST_VECTORS - }, - .dec = { - .vecs = tf_dec_tv_template, - .count = TF_DEC_TEST_VECTORS - } + .enc = __VECS(tf_enc_tv_template), + .dec = __VECS(tf_dec_tv_template) } } }, { @@ -3287,14 +2968,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = xeta_enc_tv_template, - .count = XETA_ENC_TEST_VECTORS - }, - .dec = { - .vecs = xeta_dec_tv_template, - .count = XETA_DEC_TEST_VECTORS - } + .enc = __VECS(xeta_enc_tv_template), + .dec = __VECS(xeta_dec_tv_template) } } }, { @@ -3302,14 +2977,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = xtea_enc_tv_template, - .count = XTEA_ENC_TEST_VECTORS - }, - .dec = { - .vecs = xtea_dec_tv_template, - .count = XTEA_DEC_TEST_VECTORS - } + .enc = __VECS(xtea_enc_tv_template), + .dec = __VECS(xtea_dec_tv_template) } } }, { @@ -3317,10 +2986,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_kpp, .fips_allowed = 1, .suite = { - .kpp = { - .vecs = ecdh_tv_template, - .count = ECDH_TEST_VECTORS - } + .kpp = __VECS(ecdh_tv_template) } }, { .alg = "gcm(aes)", @@ -3328,14 +2994,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = aes_gcm_enc_tv_template, - .count = AES_GCM_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_gcm_dec_tv_template, - .count = AES_GCM_DEC_TEST_VECTORS - } + .enc = __VECS(aes_gcm_enc_tv_template), + .dec = __VECS(aes_gcm_dec_tv_template) } } }, { @@ -3343,136 +3003,94 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = ghash_tv_template, - .count = GHASH_TEST_VECTORS - } + .hash = __VECS(ghash_tv_template) } }, { .alg = "hmac(crc32)", .test = alg_test_hash, .suite = { - .hash = { - .vecs = bfin_crc_tv_template, - .count = BFIN_CRC_TEST_VECTORS - } + .hash = __VECS(bfin_crc_tv_template) } }, { .alg = "hmac(md5)", .test = alg_test_hash, .suite = { - .hash = { - .vecs = hmac_md5_tv_template, - .count = HMAC_MD5_TEST_VECTORS - } + .hash = __VECS(hmac_md5_tv_template) } }, { .alg = "hmac(rmd128)", .test = alg_test_hash, .suite = { - .hash = { - .vecs = hmac_rmd128_tv_template, - .count = HMAC_RMD128_TEST_VECTORS - } + .hash = __VECS(hmac_rmd128_tv_template) } }, { .alg = "hmac(rmd160)", .test = alg_test_hash, .suite = { - .hash = { - .vecs = hmac_rmd160_tv_template, - .count = HMAC_RMD160_TEST_VECTORS - } + .hash = __VECS(hmac_rmd160_tv_template) } }, { .alg = "hmac(sha1)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha1_tv_template, - .count = HMAC_SHA1_TEST_VECTORS - } + .hash = __VECS(hmac_sha1_tv_template) } }, { .alg = "hmac(sha224)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha224_tv_template, - .count = HMAC_SHA224_TEST_VECTORS - } + .hash = __VECS(hmac_sha224_tv_template) } }, { .alg = "hmac(sha256)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha256_tv_template, - .count = HMAC_SHA256_TEST_VECTORS - } + .hash = __VECS(hmac_sha256_tv_template) } }, { .alg = "hmac(sha3-224)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha3_224_tv_template, - .count = HMAC_SHA3_224_TEST_VECTORS - } + .hash = __VECS(hmac_sha3_224_tv_template) } }, { .alg = "hmac(sha3-256)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha3_256_tv_template, - .count = HMAC_SHA3_256_TEST_VECTORS - } + .hash = __VECS(hmac_sha3_256_tv_template) } }, { .alg = "hmac(sha3-384)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha3_384_tv_template, - .count = HMAC_SHA3_384_TEST_VECTORS - } + .hash = __VECS(hmac_sha3_384_tv_template) } }, { .alg = "hmac(sha3-512)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha3_512_tv_template, - .count = HMAC_SHA3_512_TEST_VECTORS - } + .hash = __VECS(hmac_sha3_512_tv_template) } }, { .alg = "hmac(sha384)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha384_tv_template, - .count = HMAC_SHA384_TEST_VECTORS - } + .hash = __VECS(hmac_sha384_tv_template) } }, { .alg = "hmac(sha512)", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = hmac_sha512_tv_template, - .count = HMAC_SHA512_TEST_VECTORS - } + .hash = __VECS(hmac_sha512_tv_template) } }, { .alg = "jitterentropy_rng", @@ -3484,14 +3102,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = aes_kw_enc_tv_template, - .count = ARRAY_SIZE(aes_kw_enc_tv_template) - }, - .dec = { - .vecs = aes_kw_dec_tv_template, - .count = ARRAY_SIZE(aes_kw_dec_tv_template) - } + .enc = __VECS(aes_kw_enc_tv_template), + .dec = __VECS(aes_kw_dec_tv_template) } } }, { @@ -3499,14 +3111,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = aes_lrw_enc_tv_template, - .count = AES_LRW_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_lrw_dec_tv_template, - .count = AES_LRW_DEC_TEST_VECTORS - } + .enc = __VECS(aes_lrw_enc_tv_template), + .dec = __VECS(aes_lrw_dec_tv_template) } } }, { @@ -3514,14 +3120,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = camellia_lrw_enc_tv_template, - .count = CAMELLIA_LRW_ENC_TEST_VECTORS - }, - .dec = { - .vecs = camellia_lrw_dec_tv_template, - .count = CAMELLIA_LRW_DEC_TEST_VECTORS - } + .enc = __VECS(camellia_lrw_enc_tv_template), + .dec = __VECS(camellia_lrw_dec_tv_template) } } }, { @@ -3529,14 +3129,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cast6_lrw_enc_tv_template, - .count = CAST6_LRW_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cast6_lrw_dec_tv_template, - .count = CAST6_LRW_DEC_TEST_VECTORS - } + .enc = __VECS(cast6_lrw_enc_tv_template), + .dec = __VECS(cast6_lrw_dec_tv_template) } } }, { @@ -3544,14 +3138,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = serpent_lrw_enc_tv_template, - .count = SERPENT_LRW_ENC_TEST_VECTORS - }, - .dec = { - .vecs = serpent_lrw_dec_tv_template, - .count = SERPENT_LRW_DEC_TEST_VECTORS - } + .enc = __VECS(serpent_lrw_enc_tv_template), + .dec = __VECS(serpent_lrw_dec_tv_template) } } }, { @@ -3559,14 +3147,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = tf_lrw_enc_tv_template, - .count = TF_LRW_ENC_TEST_VECTORS - }, - .dec = { - .vecs = tf_lrw_dec_tv_template, - .count = TF_LRW_DEC_TEST_VECTORS - } + .enc = __VECS(tf_lrw_enc_tv_template), + .dec = __VECS(tf_lrw_dec_tv_template) } } }, { @@ -3575,14 +3157,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .comp = { - .comp = { - .vecs = lz4_comp_tv_template, - .count = LZ4_COMP_TEST_VECTORS - }, - .decomp = { - .vecs = lz4_decomp_tv_template, - .count = LZ4_DECOMP_TEST_VECTORS - } + .comp = __VECS(lz4_comp_tv_template), + .decomp = __VECS(lz4_decomp_tv_template) } } }, { @@ -3591,14 +3167,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .comp = { - .comp = { - .vecs = lz4hc_comp_tv_template, - .count = LZ4HC_COMP_TEST_VECTORS - }, - .decomp = { - .vecs = lz4hc_decomp_tv_template, - .count = LZ4HC_DECOMP_TEST_VECTORS - } + .comp = __VECS(lz4hc_comp_tv_template), + .decomp = __VECS(lz4hc_decomp_tv_template) } } }, { @@ -3607,42 +3177,27 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .comp = { - .comp = { - .vecs = lzo_comp_tv_template, - .count = LZO_COMP_TEST_VECTORS - }, - .decomp = { - .vecs = lzo_decomp_tv_template, - .count = LZO_DECOMP_TEST_VECTORS - } + .comp = __VECS(lzo_comp_tv_template), + .decomp = __VECS(lzo_decomp_tv_template) } } }, { .alg = "md4", .test = alg_test_hash, .suite = { - .hash = { - .vecs = md4_tv_template, - .count = MD4_TEST_VECTORS - } + .hash = __VECS(md4_tv_template) } }, { .alg = "md5", .test = alg_test_hash, .suite = { - .hash = { - .vecs = md5_tv_template, - .count = MD5_TEST_VECTORS - } + .hash = __VECS(md5_tv_template) } }, { .alg = "michael_mic", .test = alg_test_hash, .suite = { - .hash = { - .vecs = michael_mic_tv_template, - .count = MICHAEL_MIC_TEST_VECTORS - } + .hash = __VECS(michael_mic_tv_template) } }, { .alg = "ofb(aes)", @@ -3650,14 +3205,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = aes_ofb_enc_tv_template, - .count = AES_OFB_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_ofb_dec_tv_template, - .count = AES_OFB_DEC_TEST_VECTORS - } + .enc = __VECS(aes_ofb_enc_tv_template), + .dec = __VECS(aes_ofb_dec_tv_template) } } }, { @@ -3665,24 +3214,15 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = fcrypt_pcbc_enc_tv_template, - .count = FCRYPT_ENC_TEST_VECTORS - }, - .dec = { - .vecs = fcrypt_pcbc_dec_tv_template, - .count = FCRYPT_DEC_TEST_VECTORS - } + .enc = __VECS(fcrypt_pcbc_enc_tv_template), + .dec = __VECS(fcrypt_pcbc_dec_tv_template) } } }, { .alg = "poly1305", .test = alg_test_hash, .suite = { - .hash = { - .vecs = poly1305_tv_template, - .count = POLY1305_TEST_VECTORS - } + .hash = __VECS(poly1305_tv_template) } }, { .alg = "rfc3686(ctr(aes))", @@ -3690,14 +3230,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = aes_ctr_rfc3686_enc_tv_template, - .count = AES_CTR_3686_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_ctr_rfc3686_dec_tv_template, - .count = AES_CTR_3686_DEC_TEST_VECTORS - } + .enc = __VECS(aes_ctr_rfc3686_enc_tv_template), + .dec = __VECS(aes_ctr_rfc3686_dec_tv_template) } } }, { @@ -3706,14 +3240,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = aes_gcm_rfc4106_enc_tv_template, - .count = AES_GCM_4106_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_gcm_rfc4106_dec_tv_template, - .count = AES_GCM_4106_DEC_TEST_VECTORS - } + .enc = __VECS(aes_gcm_rfc4106_enc_tv_template), + .dec = __VECS(aes_gcm_rfc4106_dec_tv_template) } } }, { @@ -3722,14 +3250,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .aead = { - .enc = { - .vecs = aes_ccm_rfc4309_enc_tv_template, - .count = AES_CCM_4309_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_ccm_rfc4309_dec_tv_template, - .count = AES_CCM_4309_DEC_TEST_VECTORS - } + .enc = __VECS(aes_ccm_rfc4309_enc_tv_template), + .dec = __VECS(aes_ccm_rfc4309_dec_tv_template) } } }, { @@ -3737,14 +3259,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = aes_gcm_rfc4543_enc_tv_template, - .count = AES_GCM_4543_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_gcm_rfc4543_dec_tv_template, - .count = AES_GCM_4543_DEC_TEST_VECTORS - }, + .enc = __VECS(aes_gcm_rfc4543_enc_tv_template), + .dec = __VECS(aes_gcm_rfc4543_dec_tv_template), } } }, { @@ -3752,14 +3268,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = rfc7539_enc_tv_template, - .count = RFC7539_ENC_TEST_VECTORS - }, - .dec = { - .vecs = rfc7539_dec_tv_template, - .count = RFC7539_DEC_TEST_VECTORS - }, + .enc = __VECS(rfc7539_enc_tv_template), + .dec = __VECS(rfc7539_dec_tv_template), } } }, { @@ -3767,71 +3277,47 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .suite = { .aead = { - .enc = { - .vecs = rfc7539esp_enc_tv_template, - .count = RFC7539ESP_ENC_TEST_VECTORS - }, - .dec = { - .vecs = rfc7539esp_dec_tv_template, - .count = RFC7539ESP_DEC_TEST_VECTORS - }, + .enc = __VECS(rfc7539esp_enc_tv_template), + .dec = __VECS(rfc7539esp_dec_tv_template), } } }, { .alg = "rmd128", .test = alg_test_hash, .suite = { - .hash = { - .vecs = rmd128_tv_template, - .count = RMD128_TEST_VECTORS - } + .hash = __VECS(rmd128_tv_template) } }, { .alg = "rmd160", .test = alg_test_hash, .suite = { - .hash = { - .vecs = rmd160_tv_template, - .count = RMD160_TEST_VECTORS - } + .hash = __VECS(rmd160_tv_template) } }, { .alg = "rmd256", .test = alg_test_hash, .suite = { - .hash = { - .vecs = rmd256_tv_template, - .count = RMD256_TEST_VECTORS - } + .hash = __VECS(rmd256_tv_template) } }, { .alg = "rmd320", .test = alg_test_hash, .suite = { - .hash = { - .vecs = rmd320_tv_template, - .count = RMD320_TEST_VECTORS - } + .hash = __VECS(rmd320_tv_template) } }, { .alg = "rsa", .test = alg_test_akcipher, .fips_allowed = 1, .suite = { - .akcipher = { - .vecs = rsa_tv_template, - .count = RSA_TEST_VECTORS - } + .akcipher = __VECS(rsa_tv_template) } }, { .alg = "salsa20", .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = salsa20_stream_enc_tv_template, - .count = SALSA20_STREAM_ENC_TEST_VECTORS - } + .enc = __VECS(salsa20_stream_enc_tv_template) } } }, { @@ -3839,162 +3325,111 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha1_tv_template, - .count = SHA1_TEST_VECTORS - } + .hash = __VECS(sha1_tv_template) } }, { .alg = "sha224", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha224_tv_template, - .count = SHA224_TEST_VECTORS - } + .hash = __VECS(sha224_tv_template) } }, { .alg = "sha256", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha256_tv_template, - .count = SHA256_TEST_VECTORS - } + .hash = __VECS(sha256_tv_template) } }, { .alg = "sha3-224", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha3_224_tv_template, - .count = SHA3_224_TEST_VECTORS - } + .hash = __VECS(sha3_224_tv_template) } }, { .alg = "sha3-256", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha3_256_tv_template, - .count = SHA3_256_TEST_VECTORS - } + .hash = __VECS(sha3_256_tv_template) } }, { .alg = "sha3-384", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha3_384_tv_template, - .count = SHA3_384_TEST_VECTORS - } + .hash = __VECS(sha3_384_tv_template) } }, { .alg = "sha3-512", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha3_512_tv_template, - .count = SHA3_512_TEST_VECTORS - } + .hash = __VECS(sha3_512_tv_template) } }, { .alg = "sha384", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha384_tv_template, - .count = SHA384_TEST_VECTORS - } + .hash = __VECS(sha384_tv_template) } }, { .alg = "sha512", .test = alg_test_hash, .fips_allowed = 1, .suite = { - .hash = { - .vecs = sha512_tv_template, - .count = SHA512_TEST_VECTORS - } + .hash = __VECS(sha512_tv_template) } }, { .alg = "tgr128", .test = alg_test_hash, .suite = { - .hash = { - .vecs = tgr128_tv_template, - .count = TGR128_TEST_VECTORS - } + .hash = __VECS(tgr128_tv_template) } }, { .alg = "tgr160", .test = alg_test_hash, .suite = { - .hash = { - .vecs = tgr160_tv_template, - .count = TGR160_TEST_VECTORS - } + .hash = __VECS(tgr160_tv_template) } }, { .alg = "tgr192", .test = alg_test_hash, .suite = { - .hash = { - .vecs = tgr192_tv_template, - .count = TGR192_TEST_VECTORS - } + .hash = __VECS(tgr192_tv_template) } }, { .alg = "vmac(aes)", .test = alg_test_hash, .suite = { - .hash = { - .vecs = aes_vmac128_tv_template, - .count = VMAC_AES_TEST_VECTORS - } + .hash = __VECS(aes_vmac128_tv_template) } }, { .alg = "wp256", .test = alg_test_hash, .suite = { - .hash = { - .vecs = wp256_tv_template, - .count = WP256_TEST_VECTORS - } + .hash = __VECS(wp256_tv_template) } }, { .alg = "wp384", .test = alg_test_hash, .suite = { - .hash = { - .vecs = wp384_tv_template, - .count = WP384_TEST_VECTORS - } + .hash = __VECS(wp384_tv_template) } }, { .alg = "wp512", .test = alg_test_hash, .suite = { - .hash = { - .vecs = wp512_tv_template, - .count = WP512_TEST_VECTORS - } + .hash = __VECS(wp512_tv_template) } }, { .alg = "xcbc(aes)", .test = alg_test_hash, .suite = { - .hash = { - .vecs = aes_xcbc128_tv_template, - .count = XCBC_AES_TEST_VECTORS - } + .hash = __VECS(aes_xcbc128_tv_template) } }, { .alg = "xts(aes)", @@ -4002,14 +3437,8 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .suite = { .cipher = { - .enc = { - .vecs = aes_xts_enc_tv_template, - .count = AES_XTS_ENC_TEST_VECTORS - }, - .dec = { - .vecs = aes_xts_dec_tv_template, - .count = AES_XTS_DEC_TEST_VECTORS - } + .enc = __VECS(aes_xts_enc_tv_template), + .dec = __VECS(aes_xts_dec_tv_template) } } }, { @@ -4017,14 +3446,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = camellia_xts_enc_tv_template, - .count = CAMELLIA_XTS_ENC_TEST_VECTORS - }, - .dec = { - .vecs = camellia_xts_dec_tv_template, - .count = CAMELLIA_XTS_DEC_TEST_VECTORS - } + .enc = __VECS(camellia_xts_enc_tv_template), + .dec = __VECS(camellia_xts_dec_tv_template) } } }, { @@ -4032,14 +3455,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = cast6_xts_enc_tv_template, - .count = CAST6_XTS_ENC_TEST_VECTORS - }, - .dec = { - .vecs = cast6_xts_dec_tv_template, - .count = CAST6_XTS_DEC_TEST_VECTORS - } + .enc = __VECS(cast6_xts_enc_tv_template), + .dec = __VECS(cast6_xts_dec_tv_template) } } }, { @@ -4047,14 +3464,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = serpent_xts_enc_tv_template, - .count = SERPENT_XTS_ENC_TEST_VECTORS - }, - .dec = { - .vecs = serpent_xts_dec_tv_template, - .count = SERPENT_XTS_DEC_TEST_VECTORS - } + .enc = __VECS(serpent_xts_enc_tv_template), + .dec = __VECS(serpent_xts_dec_tv_template) } } }, { @@ -4062,14 +3473,8 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_skcipher, .suite = { .cipher = { - .enc = { - .vecs = tf_xts_enc_tv_template, - .count = TF_XTS_ENC_TEST_VECTORS - }, - .dec = { - .vecs = tf_xts_dec_tv_template, - .count = TF_XTS_DEC_TEST_VECTORS - } + .enc = __VECS(tf_xts_enc_tv_template), + .dec = __VECS(tf_xts_dec_tv_template) } } } diff --git a/crypto/testmgr.h b/crypto/testmgr.h index e64a4ef9d8ca..03f473116f78 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -151,11 +151,6 @@ static char zeroed_string[48]; /* * RSA test vectors. Borrowed from openSSL. */ -#ifdef CONFIG_CRYPTO_FIPS -#define RSA_TEST_VECTORS 2 -#else -#define RSA_TEST_VECTORS 5 -#endif static struct akcipher_testvec rsa_tv_template[] = { { #ifndef CONFIG_CRYPTO_FIPS @@ -340,6 +335,7 @@ static struct akcipher_testvec rsa_tv_template[] = { .m_size = 8, .c_size = 256, .public_key_vec = true, +#ifndef CONFIG_CRYPTO_FIPS }, { .key = "\x30\x82\x09\x29" /* sequence of 2345 bytes */ @@ -538,11 +534,10 @@ static struct akcipher_testvec rsa_tv_template[] = { .key_len = 2349, .m_size = 8, .c_size = 512, +#endif } }; -#define DH_TEST_VECTORS 2 - struct kpp_testvec dh_tv_template[] = { { .secret = @@ -760,11 +755,6 @@ struct kpp_testvec dh_tv_template[] = { } }; -#ifdef CONFIG_CRYPTO_FIPS -#define ECDH_TEST_VECTORS 1 -#else -#define ECDH_TEST_VECTORS 2 -#endif struct kpp_testvec ecdh_tv_template[] = { { #ifndef CONFIG_CRYPTO_FIPS @@ -856,8 +846,6 @@ struct kpp_testvec ecdh_tv_template[] = { /* * MD4 test vectors from RFC1320 */ -#define MD4_TEST_VECTORS 7 - static struct hash_testvec md4_tv_template [] = { { .plaintext = "", @@ -899,7 +887,6 @@ static struct hash_testvec md4_tv_template [] = { }, }; -#define SHA3_224_TEST_VECTORS 3 static struct hash_testvec sha3_224_tv_template[] = { { .plaintext = "", @@ -925,7 +912,6 @@ static struct hash_testvec sha3_224_tv_template[] = { }, }; -#define SHA3_256_TEST_VECTORS 3 static struct hash_testvec sha3_256_tv_template[] = { { .plaintext = "", @@ -952,7 +938,6 @@ static struct hash_testvec sha3_256_tv_template[] = { }; -#define SHA3_384_TEST_VECTORS 3 static struct hash_testvec sha3_384_tv_template[] = { { .plaintext = "", @@ -985,7 +970,6 @@ static struct hash_testvec sha3_384_tv_template[] = { }; -#define SHA3_512_TEST_VECTORS 3 static struct hash_testvec sha3_512_tv_template[] = { { .plaintext = "", @@ -1027,8 +1011,6 @@ static struct hash_testvec sha3_512_tv_template[] = { /* * MD5 test vectors from RFC1321 */ -#define MD5_TEST_VECTORS 7 - static struct hash_testvec md5_tv_template[] = { { .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04" @@ -1073,8 +1055,6 @@ static struct hash_testvec md5_tv_template[] = { /* * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E) */ -#define RMD128_TEST_VECTORS 10 - static struct hash_testvec rmd128_tv_template[] = { { .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e" @@ -1137,8 +1117,6 @@ static struct hash_testvec rmd128_tv_template[] = { /* * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E) */ -#define RMD160_TEST_VECTORS 10 - static struct hash_testvec rmd160_tv_template[] = { { .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28" @@ -1201,8 +1179,6 @@ static struct hash_testvec rmd160_tv_template[] = { /* * RIPEMD-256 test vectors */ -#define RMD256_TEST_VECTORS 8 - static struct hash_testvec rmd256_tv_template[] = { { .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18" @@ -1269,8 +1245,6 @@ static struct hash_testvec rmd256_tv_template[] = { /* * RIPEMD-320 test vectors */ -#define RMD320_TEST_VECTORS 8 - static struct hash_testvec rmd320_tv_template[] = { { .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1" @@ -1334,36 +1308,49 @@ static struct hash_testvec rmd320_tv_template[] = { } }; -#define CRCT10DIF_TEST_VECTORS 3 static struct hash_testvec crct10dif_tv_template[] = { { - .plaintext = "abc", - .psize = 3, -#ifdef __LITTLE_ENDIAN - .digest = "\x3b\x44", -#else - .digest = "\x44\x3b", -#endif - }, { - .plaintext = "1234567890123456789012345678901234567890" - "123456789012345678901234567890123456789", - .psize = 79, -#ifdef __LITTLE_ENDIAN - .digest = "\x70\x4b", -#else - .digest = "\x4b\x70", -#endif - }, { - .plaintext = - "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd", - .psize = 56, -#ifdef __LITTLE_ENDIAN - .digest = "\xe3\x9c", -#else - .digest = "\x9c\xe3", -#endif - .np = 2, - .tap = { 28, 28 } + .plaintext = "abc", + .psize = 3, + .digest = (u8 *)(u16 []){ 0x443b }, + }, { + .plaintext = "1234567890123456789012345678901234567890" + "123456789012345678901234567890123456789", + .psize = 79, + .digest = (u8 *)(u16 []){ 0x4b70 }, + .np = 2, + .tap = { 63, 16 }, + }, { + .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd" + "ddddddddddddd", + .psize = 56, + .digest = (u8 *)(u16 []){ 0x9ce3 }, + .np = 8, + .tap = { 1, 2, 28, 7, 6, 5, 4, 3 }, + }, { + .plaintext = "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "123456789012345678901234567890123456789", + .psize = 319, + .digest = (u8 *)(u16 []){ 0x44c6 }, + }, { + .plaintext = "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "123456789012345678901234567890123456789", + .psize = 319, + .digest = (u8 *)(u16 []){ 0x44c6 }, + .np = 4, + .tap = { 1, 255, 57, 6 }, } }; @@ -1371,8 +1358,6 @@ static struct hash_testvec crct10dif_tv_template[] = { * SHA1 test vectors from from FIPS PUB 180-1 * Long vector from CAVS 5.0 */ -#define SHA1_TEST_VECTORS 6 - static struct hash_testvec sha1_tv_template[] = { { .plaintext = "", @@ -1563,8 +1548,6 @@ static struct hash_testvec sha1_tv_template[] = { /* * SHA224 test vectors from from FIPS PUB 180-2 */ -#define SHA224_TEST_VECTORS 5 - static struct hash_testvec sha224_tv_template[] = { { .plaintext = "", @@ -1737,8 +1720,6 @@ static struct hash_testvec sha224_tv_template[] = { /* * SHA256 test vectors from from NIST */ -#define SHA256_TEST_VECTORS 5 - static struct hash_testvec sha256_tv_template[] = { { .plaintext = "", @@ -1910,8 +1891,6 @@ static struct hash_testvec sha256_tv_template[] = { /* * SHA384 test vectors from from NIST and kerneli */ -#define SHA384_TEST_VECTORS 6 - static struct hash_testvec sha384_tv_template[] = { { .plaintext = "", @@ -2104,8 +2083,6 @@ static struct hash_testvec sha384_tv_template[] = { /* * SHA512 test vectors from from NIST and kerneli */ -#define SHA512_TEST_VECTORS 6 - static struct hash_testvec sha512_tv_template[] = { { .plaintext = "", @@ -2313,8 +2290,6 @@ static struct hash_testvec sha512_tv_template[] = { * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE * submission */ -#define WP512_TEST_VECTORS 8 - static struct hash_testvec wp512_tv_template[] = { { .plaintext = "", @@ -2411,8 +2386,6 @@ static struct hash_testvec wp512_tv_template[] = { }, }; -#define WP384_TEST_VECTORS 8 - static struct hash_testvec wp384_tv_template[] = { { .plaintext = "", @@ -2493,8 +2466,6 @@ static struct hash_testvec wp384_tv_template[] = { }, }; -#define WP256_TEST_VECTORS 8 - static struct hash_testvec wp256_tv_template[] = { { .plaintext = "", @@ -2562,8 +2533,6 @@ static struct hash_testvec wp256_tv_template[] = { /* * TIGER test vectors from Tiger website */ -#define TGR192_TEST_VECTORS 6 - static struct hash_testvec tgr192_tv_template[] = { { .plaintext = "", @@ -2607,8 +2576,6 @@ static struct hash_testvec tgr192_tv_template[] = { }, }; -#define TGR160_TEST_VECTORS 6 - static struct hash_testvec tgr160_tv_template[] = { { .plaintext = "", @@ -2652,8 +2619,6 @@ static struct hash_testvec tgr160_tv_template[] = { }, }; -#define TGR128_TEST_VECTORS 6 - static struct hash_testvec tgr128_tv_template[] = { { .plaintext = "", @@ -2691,8 +2656,6 @@ static struct hash_testvec tgr128_tv_template[] = { }, }; -#define GHASH_TEST_VECTORS 6 - static struct hash_testvec ghash_tv_template[] = { { @@ -2808,8 +2771,6 @@ static struct hash_testvec ghash_tv_template[] = * HMAC-MD5 test vectors from RFC2202 * (These need to be fixed to not use strlen). */ -#define HMAC_MD5_TEST_VECTORS 7 - static struct hash_testvec hmac_md5_tv_template[] = { { @@ -2890,8 +2851,6 @@ static struct hash_testvec hmac_md5_tv_template[] = /* * HMAC-RIPEMD128 test vectors from RFC2286 */ -#define HMAC_RMD128_TEST_VECTORS 7 - static struct hash_testvec hmac_rmd128_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", @@ -2971,8 +2930,6 @@ static struct hash_testvec hmac_rmd128_tv_template[] = { /* * HMAC-RIPEMD160 test vectors from RFC2286 */ -#define HMAC_RMD160_TEST_VECTORS 7 - static struct hash_testvec hmac_rmd160_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", @@ -3052,8 +3009,6 @@ static struct hash_testvec hmac_rmd160_tv_template[] = { /* * HMAC-SHA1 test vectors from RFC2202 */ -#define HMAC_SHA1_TEST_VECTORS 7 - static struct hash_testvec hmac_sha1_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", @@ -3135,8 +3090,6 @@ static struct hash_testvec hmac_sha1_tv_template[] = { /* * SHA224 HMAC test vectors from RFC4231 */ -#define HMAC_SHA224_TEST_VECTORS 4 - static struct hash_testvec hmac_sha224_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" @@ -3250,8 +3203,6 @@ static struct hash_testvec hmac_sha224_tv_template[] = { * HMAC-SHA256 test vectors from * draft-ietf-ipsec-ciph-sha-256-01.txt */ -#define HMAC_SHA256_TEST_VECTORS 10 - static struct hash_testvec hmac_sha256_tv_template[] = { { .key = "\x01\x02\x03\x04\x05\x06\x07\x08" @@ -3387,8 +3338,6 @@ static struct hash_testvec hmac_sha256_tv_template[] = { }, }; -#define CMAC_AES_TEST_VECTORS 6 - static struct hash_testvec aes_cmac128_tv_template[] = { { /* From NIST Special Publication 800-38B, AES-128 */ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" @@ -3464,7 +3413,65 @@ static struct hash_testvec aes_cmac128_tv_template[] = { } }; -#define CMAC_DES3_EDE_TEST_VECTORS 4 +static struct hash_testvec aes_cbcmac_tv_template[] = { + { + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a", + .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60" + "\xa8\x9e\xca\xf3\x24\x66\xef\x97", + .psize = 16, + .ksize = 16, + }, { + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30", + .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43" + "\xf8\xf2\x76\x03\xac\x39\xb0\x9d", + .psize = 33, + .ksize = 16, + .np = 2, + .tap = { 7, 26 }, + }, { + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37", + .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c" + "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a", + .psize = 63, + .ksize = 16, + }, { + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10" + "\x1c", + .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f" + "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6", + .psize = 65, + .ksize = 32, + } +}; static struct hash_testvec des3_ede_cmac64_tv_template[] = { /* @@ -3512,8 +3519,6 @@ static struct hash_testvec des3_ede_cmac64_tv_template[] = { } }; -#define XCBC_AES_TEST_VECTORS 6 - static struct hash_testvec aes_xcbc128_tv_template[] = { { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" @@ -3580,7 +3585,6 @@ static struct hash_testvec aes_xcbc128_tv_template[] = { } }; -#define VMAC_AES_TEST_VECTORS 11 static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02', '\x02', '\x04', '\x01', '\x07', @@ -3687,8 +3691,6 @@ static struct hash_testvec aes_vmac128_tv_template[] = { * SHA384 HMAC test vectors from RFC4231 */ -#define HMAC_SHA384_TEST_VECTORS 4 - static struct hash_testvec hmac_sha384_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" @@ -3787,8 +3789,6 @@ static struct hash_testvec hmac_sha384_tv_template[] = { * SHA512 HMAC test vectors from RFC4231 */ -#define HMAC_SHA512_TEST_VECTORS 4 - static struct hash_testvec hmac_sha512_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" @@ -3894,8 +3894,6 @@ static struct hash_testvec hmac_sha512_tv_template[] = { }, }; -#define HMAC_SHA3_224_TEST_VECTORS 4 - static struct hash_testvec hmac_sha3_224_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" @@ -3985,8 +3983,6 @@ static struct hash_testvec hmac_sha3_224_tv_template[] = { }, }; -#define HMAC_SHA3_256_TEST_VECTORS 4 - static struct hash_testvec hmac_sha3_256_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" @@ -4076,8 +4072,6 @@ static struct hash_testvec hmac_sha3_256_tv_template[] = { }, }; -#define HMAC_SHA3_384_TEST_VECTORS 4 - static struct hash_testvec hmac_sha3_384_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" @@ -4175,8 +4169,6 @@ static struct hash_testvec hmac_sha3_384_tv_template[] = { }, }; -#define HMAC_SHA3_512_TEST_VECTORS 4 - static struct hash_testvec hmac_sha3_512_tv_template[] = { { .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" @@ -4286,8 +4278,6 @@ static struct hash_testvec hmac_sha3_512_tv_template[] = { * Poly1305 test vectors from RFC7539 A.3. */ -#define POLY1305_TEST_VECTORS 11 - static struct hash_testvec poly1305_tv_template[] = { { /* Test Vector #1 */ .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" @@ -4533,19 +4523,6 @@ static struct hash_testvec poly1305_tv_template[] = { /* * DES test vectors. */ -#define DES_ENC_TEST_VECTORS 11 -#define DES_DEC_TEST_VECTORS 5 -#define DES_CBC_ENC_TEST_VECTORS 6 -#define DES_CBC_DEC_TEST_VECTORS 5 -#define DES_CTR_ENC_TEST_VECTORS 2 -#define DES_CTR_DEC_TEST_VECTORS 2 -#define DES3_EDE_ENC_TEST_VECTORS 4 -#define DES3_EDE_DEC_TEST_VECTORS 4 -#define DES3_EDE_CBC_ENC_TEST_VECTORS 2 -#define DES3_EDE_CBC_DEC_TEST_VECTORS 2 -#define DES3_EDE_CTR_ENC_TEST_VECTORS 2 -#define DES3_EDE_CTR_DEC_TEST_VECTORS 2 - static struct cipher_testvec des_enc_tv_template[] = { { /* From Applied Cryptography */ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", @@ -6606,13 +6583,6 @@ static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = { /* * Blowfish test vectors. */ -#define BF_ENC_TEST_VECTORS 7 -#define BF_DEC_TEST_VECTORS 7 -#define BF_CBC_ENC_TEST_VECTORS 2 -#define BF_CBC_DEC_TEST_VECTORS 2 -#define BF_CTR_ENC_TEST_VECTORS 2 -#define BF_CTR_DEC_TEST_VECTORS 2 - static struct cipher_testvec bf_enc_tv_template[] = { { /* DES test vectors from OpenSSL */ .key = "\x00\x00\x00\x00\x00\x00\x00\x00", @@ -8138,17 +8108,6 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = { /* * Twofish test vectors. */ -#define TF_ENC_TEST_VECTORS 4 -#define TF_DEC_TEST_VECTORS 4 -#define TF_CBC_ENC_TEST_VECTORS 5 -#define TF_CBC_DEC_TEST_VECTORS 5 -#define TF_CTR_ENC_TEST_VECTORS 2 -#define TF_CTR_DEC_TEST_VECTORS 2 -#define TF_LRW_ENC_TEST_VECTORS 8 -#define TF_LRW_DEC_TEST_VECTORS 8 -#define TF_XTS_ENC_TEST_VECTORS 5 -#define TF_XTS_DEC_TEST_VECTORS 5 - static struct cipher_testvec tf_enc_tv_template[] = { { .key = zeroed_string, @@ -10867,24 +10826,6 @@ static struct cipher_testvec tf_xts_dec_tv_template[] = { * Serpent test vectors. These are backwards because Serpent writes * octet sequences in right-to-left mode. */ -#define SERPENT_ENC_TEST_VECTORS 5 -#define SERPENT_DEC_TEST_VECTORS 5 - -#define TNEPRES_ENC_TEST_VECTORS 4 -#define TNEPRES_DEC_TEST_VECTORS 4 - -#define SERPENT_CBC_ENC_TEST_VECTORS 1 -#define SERPENT_CBC_DEC_TEST_VECTORS 1 - -#define SERPENT_CTR_ENC_TEST_VECTORS 2 -#define SERPENT_CTR_DEC_TEST_VECTORS 2 - -#define SERPENT_LRW_ENC_TEST_VECTORS 8 -#define SERPENT_LRW_DEC_TEST_VECTORS 8 - -#define SERPENT_XTS_ENC_TEST_VECTORS 5 -#define SERPENT_XTS_DEC_TEST_VECTORS 5 - static struct cipher_testvec serpent_enc_tv_template[] = { { .input = "\x00\x01\x02\x03\x04\x05\x06\x07" @@ -13623,17 +13564,6 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = { }; /* Cast6 test vectors from RFC 2612 */ -#define CAST6_ENC_TEST_VECTORS 4 -#define CAST6_DEC_TEST_VECTORS 4 -#define CAST6_CBC_ENC_TEST_VECTORS 1 -#define CAST6_CBC_DEC_TEST_VECTORS 1 -#define CAST6_CTR_ENC_TEST_VECTORS 2 -#define CAST6_CTR_DEC_TEST_VECTORS 2 -#define CAST6_LRW_ENC_TEST_VECTORS 1 -#define CAST6_LRW_DEC_TEST_VECTORS 1 -#define CAST6_XTS_ENC_TEST_VECTORS 1 -#define CAST6_XTS_DEC_TEST_VECTORS 1 - static struct cipher_testvec cast6_enc_tv_template[] = { { .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" @@ -15168,38 +15098,6 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = { /* * AES test vectors. */ -#define AES_ENC_TEST_VECTORS 4 -#define AES_DEC_TEST_VECTORS 4 -#define AES_CBC_ENC_TEST_VECTORS 5 -#define AES_CBC_DEC_TEST_VECTORS 5 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7 -#define AES_LRW_ENC_TEST_VECTORS 8 -#define AES_LRW_DEC_TEST_VECTORS 8 -#define AES_XTS_ENC_TEST_VECTORS 5 -#define AES_XTS_DEC_TEST_VECTORS 5 -#define AES_CTR_ENC_TEST_VECTORS 5 -#define AES_CTR_DEC_TEST_VECTORS 5 -#define AES_OFB_ENC_TEST_VECTORS 1 -#define AES_OFB_DEC_TEST_VECTORS 1 -#define AES_CTR_3686_ENC_TEST_VECTORS 7 -#define AES_CTR_3686_DEC_TEST_VECTORS 6 -#define AES_GCM_ENC_TEST_VECTORS 9 -#define AES_GCM_DEC_TEST_VECTORS 8 -#define AES_GCM_4106_ENC_TEST_VECTORS 23 -#define AES_GCM_4106_DEC_TEST_VECTORS 23 -#define AES_GCM_4543_ENC_TEST_VECTORS 1 -#define AES_GCM_4543_DEC_TEST_VECTORS 2 -#define AES_CCM_ENC_TEST_VECTORS 8 -#define AES_CCM_DEC_TEST_VECTORS 7 -#define AES_CCM_4309_ENC_TEST_VECTORS 7 -#define AES_CCM_4309_DEC_TEST_VECTORS 10 - static struct cipher_testvec aes_enc_tv_template[] = { { /* From FIPS-197 */ .key = "\x00\x01\x02\x03\x04\x05\x06\x07" @@ -17055,8 +16953,6 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17116,8 +17012,6 @@ static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17177,8 +17071,6 @@ static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17240,8 +17132,6 @@ static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17307,8 +17197,6 @@ static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17378,8 +17266,6 @@ static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17441,8 +17327,6 @@ static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17504,8 +17388,6 @@ static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17569,8 +17451,6 @@ static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -17638,8 +17518,6 @@ static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { }, }; -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1 - static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN @@ -22813,7 +22691,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = { "\x09\x75\x9a\x9b\x3c\x9b\x27\x39", .klen = 32, .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d" - "\x43\xf6\x1e\x50", + "\x43\xf6\x1e\x50\0\0\0\0", .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" "\x13\x02\x01\x0c\x83\x4c\x96\x35" "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" @@ -24420,8 +24298,6 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { /* * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5. */ -#define RFC7539_ENC_TEST_VECTORS 2 -#define RFC7539_DEC_TEST_VECTORS 2 static struct aead_testvec rfc7539_enc_tv_template[] = { { .key = "\x80\x81\x82\x83\x84\x85\x86\x87" @@ -24689,8 +24565,6 @@ static struct aead_testvec rfc7539_dec_tv_template[] = { /* * draft-irtf-cfrg-chacha20-poly1305 */ -#define RFC7539ESP_DEC_TEST_VECTORS 1 -#define RFC7539ESP_ENC_TEST_VECTORS 1 static struct aead_testvec rfc7539esp_enc_tv_template[] = { { .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" @@ -24913,8 +24787,6 @@ static struct cipher_testvec aes_kw_dec_tv_template[] = { * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf * Only AES-128 is supported at this time. */ -#define ANSI_CPRNG_AES_TEST_VECTORS 6 - static struct cprng_testvec ansi_cprng_aes_tv_template[] = { { .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" @@ -25832,13 +25704,6 @@ static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = { }; /* Cast5 test vectors from RFC 2144 */ -#define CAST5_ENC_TEST_VECTORS 4 -#define CAST5_DEC_TEST_VECTORS 4 -#define CAST5_CBC_ENC_TEST_VECTORS 1 -#define CAST5_CBC_DEC_TEST_VECTORS 1 -#define CAST5_CTR_ENC_TEST_VECTORS 2 -#define CAST5_CTR_DEC_TEST_VECTORS 2 - static struct cipher_testvec cast5_enc_tv_template[] = { { .key = "\x01\x23\x45\x67\x12\x34\x56\x78" @@ -26742,9 +26607,6 @@ static struct cipher_testvec cast5_ctr_dec_tv_template[] = { /* * ARC4 test vectors from OpenSSL */ -#define ARC4_ENC_TEST_VECTORS 7 -#define ARC4_DEC_TEST_VECTORS 7 - static struct cipher_testvec arc4_enc_tv_template[] = { { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", @@ -26880,9 +26742,6 @@ static struct cipher_testvec arc4_dec_tv_template[] = { /* * TEA test vectors */ -#define TEA_ENC_TEST_VECTORS 4 -#define TEA_DEC_TEST_VECTORS 4 - static struct cipher_testvec tea_enc_tv_template[] = { { .key = zeroed_string, @@ -26972,9 +26831,6 @@ static struct cipher_testvec tea_dec_tv_template[] = { /* * XTEA test vectors */ -#define XTEA_ENC_TEST_VECTORS 4 -#define XTEA_DEC_TEST_VECTORS 4 - static struct cipher_testvec xtea_enc_tv_template[] = { { .key = zeroed_string, @@ -27064,9 +26920,6 @@ static struct cipher_testvec xtea_dec_tv_template[] = { /* * KHAZAD test vectors. */ -#define KHAZAD_ENC_TEST_VECTORS 5 -#define KHAZAD_DEC_TEST_VECTORS 5 - static struct cipher_testvec khazad_enc_tv_template[] = { { .key = "\x80\x00\x00\x00\x00\x00\x00\x00" @@ -27163,11 +27016,6 @@ static struct cipher_testvec khazad_dec_tv_template[] = { * Anubis test vectors. */ -#define ANUBIS_ENC_TEST_VECTORS 5 -#define ANUBIS_DEC_TEST_VECTORS 5 -#define ANUBIS_CBC_ENC_TEST_VECTORS 2 -#define ANUBIS_CBC_DEC_TEST_VECTORS 2 - static struct cipher_testvec anubis_enc_tv_template[] = { { .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" @@ -27367,9 +27215,6 @@ static struct cipher_testvec anubis_cbc_dec_tv_template[] = { /* * XETA test vectors */ -#define XETA_ENC_TEST_VECTORS 4 -#define XETA_DEC_TEST_VECTORS 4 - static struct cipher_testvec xeta_enc_tv_template[] = { { .key = zeroed_string, @@ -27459,9 +27304,6 @@ static struct cipher_testvec xeta_dec_tv_template[] = { /* * FCrypt test vectors */ -#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template) -#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template) - static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = { { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */ .key = "\x00\x00\x00\x00\x00\x00\x00\x00", @@ -27587,17 +27429,6 @@ static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = { /* * CAMELLIA test vectors. */ -#define CAMELLIA_ENC_TEST_VECTORS 4 -#define CAMELLIA_DEC_TEST_VECTORS 4 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5 - static struct cipher_testvec camellia_enc_tv_template[] = { { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" @@ -31317,9 +31148,6 @@ static struct cipher_testvec camellia_xts_dec_tv_template[] = { /* * SEED test vectors */ -#define SEED_ENC_TEST_VECTORS 4 -#define SEED_DEC_TEST_VECTORS 4 - static struct cipher_testvec seed_enc_tv_template[] = { { .key = zeroed_string, @@ -31404,7 +31232,6 @@ static struct cipher_testvec seed_dec_tv_template[] = { } }; -#define SALSA20_STREAM_ENC_TEST_VECTORS 5 static struct cipher_testvec salsa20_stream_enc_tv_template[] = { /* * Testvectors from verified.test-vectors submitted to ECRYPT. @@ -32574,7 +32401,6 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = { }, }; -#define CHACHA20_ENC_TEST_VECTORS 4 static struct cipher_testvec chacha20_enc_tv_template[] = { { /* RFC7539 A.2. Test Vector #1 */ .key = "\x00\x00\x00\x00\x00\x00\x00\x00" @@ -33086,8 +32912,6 @@ static struct cipher_testvec chacha20_enc_tv_template[] = { /* * CTS (Cipher Text Stealing) mode tests */ -#define CTS_MODE_ENC_TEST_VECTORS 6 -#define CTS_MODE_DEC_TEST_VECTORS 6 static struct cipher_testvec cts_mode_enc_tv_template[] = { { /* from rfc3962 */ .klen = 16, @@ -33308,9 +33132,6 @@ struct comp_testvec { * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. */ -#define DEFLATE_COMP_TEST_VECTORS 2 -#define DEFLATE_DECOMP_TEST_VECTORS 2 - static struct comp_testvec deflate_comp_tv_template[] = { { .inlen = 70, @@ -33386,9 +33207,6 @@ static struct comp_testvec deflate_decomp_tv_template[] = { /* * LZO test vectors (null-terminated strings). */ -#define LZO_COMP_TEST_VECTORS 2 -#define LZO_DECOMP_TEST_VECTORS 2 - static struct comp_testvec lzo_comp_tv_template[] = { { .inlen = 70, @@ -33520,8 +33338,6 @@ static struct hash_testvec michael_mic_tv_template[] = { /* * CRC32 test vectors */ -#define CRC32_TEST_VECTORS 14 - static struct hash_testvec crc32_tv_template[] = { { .key = "\x87\xa9\xcb\xed", @@ -33954,8 +33770,6 @@ static struct hash_testvec crc32_tv_template[] = { /* * CRC32C test vectors */ -#define CRC32C_TEST_VECTORS 15 - static struct hash_testvec crc32c_tv_template[] = { { .psize = 0, @@ -34392,8 +34206,6 @@ static struct hash_testvec crc32c_tv_template[] = { /* * Blakcifn CRC test vectors */ -#define BFIN_CRC_TEST_VECTORS 6 - static struct hash_testvec bfin_crc_tv_template[] = { { .psize = 0, @@ -34479,69 +34291,125 @@ static struct hash_testvec bfin_crc_tv_template[] = { }; -#define LZ4_COMP_TEST_VECTORS 1 -#define LZ4_DECOMP_TEST_VECTORS 1 - static struct comp_testvec lz4_comp_tv_template[] = { { - .inlen = 70, - .outlen = 45, - .input = "Join us now and share the software " - "Join us now and share the software ", - .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" - "\x73\x20\x6e\x6f\x77\x20\x61\x6e" - "\x64\x20\x73\x68\x61\x72\x65\x20" - "\x74\x68\x65\x20\x73\x6f\x66\x74" - "\x77\x0d\x00\x0f\x23\x00\x0b\x50" - "\x77\x61\x72\x65\x20", + .inlen = 255, + .outlen = 218, + .input = "LZ4 is lossless compression algorithm, providing" + " compression speed at 400 MB/s per core, scalable " + "with multi-cores CPU. It features an extremely fast " + "decoder, with speed in multiple GB/s per core, " + "typically reaching RAM speed limits on multi-core " + "systems.", + .output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73" + "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73" + "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d" + "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00" + "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30" + "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f" + "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20" + "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00" + "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66" + "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78" + "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20" + "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00" + "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00" + "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72" + "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83" + "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00" + "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e", + }, }; static struct comp_testvec lz4_decomp_tv_template[] = { { - .inlen = 45, - .outlen = 70, - .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" - "\x73\x20\x6e\x6f\x77\x20\x61\x6e" - "\x64\x20\x73\x68\x61\x72\x65\x20" - "\x74\x68\x65\x20\x73\x6f\x66\x74" - "\x77\x0d\x00\x0f\x23\x00\x0b\x50" - "\x77\x61\x72\x65\x20", - .output = "Join us now and share the software " - "Join us now and share the software ", + .inlen = 218, + .outlen = 255, + .input = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73" + "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73" + "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d" + "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00" + "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30" + "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f" + "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20" + "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00" + "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66" + "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78" + "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20" + "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00" + "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00" + "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72" + "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83" + "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00" + "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e", + .output = "LZ4 is lossless compression algorithm, providing" + " compression speed at 400 MB/s per core, scalable " + "with multi-cores CPU. It features an extremely fast " + "decoder, with speed in multiple GB/s per core, " + "typically reaching RAM speed limits on multi-core " + "systems.", }, }; -#define LZ4HC_COMP_TEST_VECTORS 1 -#define LZ4HC_DECOMP_TEST_VECTORS 1 - static struct comp_testvec lz4hc_comp_tv_template[] = { { - .inlen = 70, - .outlen = 45, - .input = "Join us now and share the software " - "Join us now and share the software ", - .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" - "\x73\x20\x6e\x6f\x77\x20\x61\x6e" - "\x64\x20\x73\x68\x61\x72\x65\x20" - "\x74\x68\x65\x20\x73\x6f\x66\x74" - "\x77\x0d\x00\x0f\x23\x00\x0b\x50" - "\x77\x61\x72\x65\x20", + .inlen = 255, + .outlen = 216, + .input = "LZ4 is lossless compression algorithm, providing" + " compression speed at 400 MB/s per core, scalable " + "with multi-cores CPU. It features an extremely fast " + "decoder, with speed in multiple GB/s per core, " + "typically reaching RAM speed limits on multi-core " + "systems.", + .output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73" + "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73" + "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d" + "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00" + "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30" + "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f" + "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20" + "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00" + "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66" + "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78" + "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20" + "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00" + "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00" + "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72" + "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97" + "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20" + "\x73\x79\x73\x74\x65\x6d\x73\x2e", + }, }; static struct comp_testvec lz4hc_decomp_tv_template[] = { { - .inlen = 45, - .outlen = 70, - .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" - "\x73\x20\x6e\x6f\x77\x20\x61\x6e" - "\x64\x20\x73\x68\x61\x72\x65\x20" - "\x74\x68\x65\x20\x73\x6f\x66\x74" - "\x77\x0d\x00\x0f\x23\x00\x0b\x50" - "\x77\x61\x72\x65\x20", - .output = "Join us now and share the software " - "Join us now and share the software ", + .inlen = 216, + .outlen = 255, + .input = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73" + "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73" + "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d" + "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00" + "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30" + "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f" + "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20" + "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00" + "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66" + "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78" + "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20" + "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00" + "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00" + "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72" + "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97" + "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20" + "\x73\x79\x73\x74\x65\x6d\x73\x2e", + .output = "LZ4 is lossless compression algorithm, providing" + " compression speed at 400 MB/s per core, scalable " + "with multi-cores CPU. It features an extremely fast " + "decoder, with speed in multiple GB/s per core, " + "typically reaching RAM speed limits on multi-core " + "systems.", }, }; diff --git a/crypto/xts.c b/crypto/xts.c index 305343f22a02..baeb34dd8582 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -13,7 +13,8 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -25,140 +26,320 @@ #include <crypto/b128ops.h> #include <crypto/gf128mul.h> +#define XTS_BUFFER_SIZE 128u + struct priv { - struct crypto_cipher *child; + struct crypto_skcipher *child; struct crypto_cipher *tweak; }; -static int setkey(struct crypto_tfm *parent, const u8 *key, +struct xts_instance_ctx { + struct crypto_skcipher_spawn spawn; + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct rctx { + be128 buf[XTS_BUFFER_SIZE / sizeof(be128)]; + + be128 t; + + be128 *ext; + + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct scatterlist *src; + struct scatterlist *dst; + + unsigned int left; + + struct skcipher_request subreq; +}; + +static int setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { - struct priv *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->tweak; + struct priv *ctx = crypto_skcipher_ctx(parent); + struct crypto_skcipher *child; + struct crypto_cipher *tweak; int err; - err = xts_check_key(parent, key, keylen); + err = xts_verify_key(parent, key, keylen); if (err) return err; + keylen /= 2; + /* we need two cipher instances: one to compute the initial 'tweak' * by encrypting the IV (usually the 'plain' iv) and the other * one to encrypt and decrypt the data */ /* tweak cipher, uses Key2 i.e. the second half of *key */ - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & + tweak = ctx->tweak; + crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key + keylen/2, keylen/2); + err = crypto_cipher_setkey(tweak, key + keylen, keylen); + crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) & + CRYPTO_TFM_RES_MASK); if (err) return err; - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - + /* data cipher, uses Key1 i.e. the first half of *key */ child = ctx->child; + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(child, key, keylen); + crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); - /* data cipher, uses Key1 i.e. the first half of *key */ - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen/2); - if (err) - return err; + return err; +} - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); +static int post_crypt(struct skcipher_request *req) +{ + struct rctx *rctx = skcipher_request_ctx(req); + be128 *buf = rctx->ext ?: rctx->buf; + struct skcipher_request *subreq; + const int bs = XTS_BLOCK_SIZE; + struct skcipher_walk w; + struct scatterlist *sg; + unsigned offset; + int err; - return 0; -} + subreq = &rctx->subreq; + err = skcipher_walk_virt(&w, subreq, false); -struct sinfo { - be128 *t; - struct crypto_tfm *tfm; - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); -}; + while (w.nbytes) { + unsigned int avail = w.nbytes; + be128 *wdst; -static inline void xts_round(struct sinfo *s, void *dst, const void *src) -{ - be128_xor(dst, s->t, src); /* PP <- T xor P */ - s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ - be128_xor(dst, dst, s->t); /* C <- T xor CC */ + wdst = w.dst.virt.addr; + + do { + be128_xor(wdst, buf++, wdst); + wdst++; + } while ((avail -= bs) >= bs); + + err = skcipher_walk_done(&w, avail); + } + + rctx->left -= subreq->cryptlen; + + if (err || !rctx->left) + goto out; + + rctx->dst = rctx->dstbuf; + + scatterwalk_done(&w.out, 0, 1); + sg = w.out.sg; + offset = w.out.offset; + + if (rctx->dst != sg) { + rctx->dst[0] = *sg; + sg_unmark_end(rctx->dst); + scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); + } + rctx->dst[0].length -= offset - sg->offset; + rctx->dst[0].offset = offset; + +out: + return err; } -static int crypt(struct blkcipher_desc *d, - struct blkcipher_walk *w, struct priv *ctx, - void (*tw)(struct crypto_tfm *, u8 *, const u8 *), - void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) +static int pre_crypt(struct skcipher_request *req) { - int err; - unsigned int avail; + struct rctx *rctx = skcipher_request_ctx(req); + be128 *buf = rctx->ext ?: rctx->buf; + struct skcipher_request *subreq; const int bs = XTS_BLOCK_SIZE; - struct sinfo s = { - .tfm = crypto_cipher_tfm(ctx->child), - .fn = fn - }; - u8 *wsrc; - u8 *wdst; - - err = blkcipher_walk_virt(d, w); - if (!w->nbytes) - return err; + struct skcipher_walk w; + struct scatterlist *sg; + unsigned cryptlen; + unsigned offset; + bool more; + int err; - s.t = (be128 *)w->iv; - avail = w->nbytes; + subreq = &rctx->subreq; + cryptlen = subreq->cryptlen; - wsrc = w->src.virt.addr; - wdst = w->dst.virt.addr; + more = rctx->left > cryptlen; + if (!more) + cryptlen = rctx->left; - /* calculate first value of T */ - tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv); + skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, + cryptlen, NULL); - goto first; + err = skcipher_walk_virt(&w, subreq, false); - for (;;) { - do { - gf128mul_x_ble(s.t, s.t); + while (w.nbytes) { + unsigned int avail = w.nbytes; + be128 *wsrc; + be128 *wdst; -first: - xts_round(&s, wdst, wsrc); + wsrc = w.src.virt.addr; + wdst = w.dst.virt.addr; - wsrc += bs; - wdst += bs; + do { + *buf++ = rctx->t; + be128_xor(wdst++, &rctx->t, wsrc++); + gf128mul_x_ble(&rctx->t, &rctx->t); } while ((avail -= bs) >= bs); - err = blkcipher_walk_done(d, w, avail); - if (!w->nbytes) - break; + err = skcipher_walk_done(&w, avail); + } + + skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, + cryptlen, NULL); + + if (err || !more) + goto out; + + rctx->src = rctx->srcbuf; - avail = w->nbytes; + scatterwalk_done(&w.in, 0, 1); + sg = w.in.sg; + offset = w.in.offset; - wsrc = w->src.virt.addr; - wdst = w->dst.virt.addr; + if (rctx->src != sg) { + rctx->src[0] = *sg; + sg_unmark_end(rctx->src); + scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); } + rctx->src[0].length -= offset - sg->offset; + rctx->src[0].offset = offset; +out: return err; } -static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int init_crypt(struct skcipher_request *req, crypto_completion_t done) { - struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk w; + struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct rctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq; + gfp_t gfp; + + subreq = &rctx->subreq; + skcipher_request_set_tfm(subreq, ctx->child); + skcipher_request_set_callback(subreq, req->base.flags, done, req); + + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : + GFP_ATOMIC; + rctx->ext = NULL; + + subreq->cryptlen = XTS_BUFFER_SIZE; + if (req->cryptlen > XTS_BUFFER_SIZE) { + subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); + rctx->ext = kmalloc(subreq->cryptlen, gfp); + } + + rctx->src = req->src; + rctx->dst = req->dst; + rctx->left = req->cryptlen; + + /* calculate first value of T */ + crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); - blkcipher_walk_init(&w, dst, src, nbytes); - return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, - crypto_cipher_alg(ctx->child)->cia_encrypt); + return 0; } -static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static void exit_crypt(struct skcipher_request *req) { - struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk w; + struct rctx *rctx = skcipher_request_ctx(req); + + rctx->left = 0; - blkcipher_walk_init(&w, dst, src, nbytes); - return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, - crypto_cipher_alg(ctx->child)->cia_decrypt); + if (rctx->ext) + kzfree(rctx->ext); +} + +static int do_encrypt(struct skcipher_request *req, int err) +{ + struct rctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq; + + subreq = &rctx->subreq; + + while (!err && rctx->left) { + err = pre_crypt(req) ?: + crypto_skcipher_encrypt(subreq) ?: + post_crypt(req); + + if (err == -EINPROGRESS || + (err == -EBUSY && + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return err; + } + + exit_crypt(req); + return err; +} + +static void encrypt_done(struct crypto_async_request *areq, int err) +{ + struct skcipher_request *req = areq->data; + struct skcipher_request *subreq; + struct rctx *rctx; + + rctx = skcipher_request_ctx(req); + subreq = &rctx->subreq; + subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; + + err = do_encrypt(req, err ?: post_crypt(req)); + if (rctx->left) + return; + + skcipher_request_complete(req, err); +} + +static int encrypt(struct skcipher_request *req) +{ + return do_encrypt(req, init_crypt(req, encrypt_done)); +} + +static int do_decrypt(struct skcipher_request *req, int err) +{ + struct rctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq; + + subreq = &rctx->subreq; + + while (!err && rctx->left) { + err = pre_crypt(req) ?: + crypto_skcipher_decrypt(subreq) ?: + post_crypt(req); + + if (err == -EINPROGRESS || + (err == -EBUSY && + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return err; + } + + exit_crypt(req); + return err; +} + +static void decrypt_done(struct crypto_async_request *areq, int err) +{ + struct skcipher_request *req = areq->data; + struct skcipher_request *subreq; + struct rctx *rctx; + + rctx = skcipher_request_ctx(req); + subreq = &rctx->subreq; + subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; + + err = do_decrypt(req, err ?: post_crypt(req)); + if (rctx->left) + return; + + skcipher_request_complete(req, err); +} + +static int decrypt(struct skcipher_request *req) +{ + return do_decrypt(req, init_crypt(req, decrypt_done)); } int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, @@ -233,112 +414,170 @@ first: } EXPORT_SYMBOL_GPL(xts_crypt); -static int init_tfm(struct crypto_tfm *tfm) +static int init_tfm(struct crypto_skcipher *tfm) { - struct crypto_cipher *cipher; - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct priv *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { - *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; - crypto_free_cipher(cipher); - return -EINVAL; - } + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); + struct priv *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *child; + struct crypto_cipher *tweak; - ctx->child = cipher; + child = crypto_spawn_skcipher(&ictx->spawn); + if (IS_ERR(child)) + return PTR_ERR(child); - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) { - crypto_free_cipher(ctx->child); - return PTR_ERR(cipher); - } + ctx->child = child; - /* this check isn't really needed, leave it here just in case */ - if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { - crypto_free_cipher(cipher); - crypto_free_cipher(ctx->child); - *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; - return -EINVAL; + tweak = crypto_alloc_cipher(ictx->name, 0, 0); + if (IS_ERR(tweak)) { + crypto_free_skcipher(ctx->child); + return PTR_ERR(tweak); } - ctx->tweak = cipher; + ctx->tweak = tweak; + + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + + sizeof(struct rctx)); return 0; } -static void exit_tfm(struct crypto_tfm *tfm) +static void exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_tfm_ctx(tfm); - crypto_free_cipher(ctx->child); + struct priv *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_skcipher(ctx->child); crypto_free_cipher(ctx->tweak); } -static struct crypto_instance *alloc(struct rtattr **tb) +static void free(struct skcipher_instance *inst) +{ + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); +} + +static int create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_instance *inst; - struct crypto_alg *alg; + struct skcipher_instance *inst; + struct crypto_attr_type *algt; + struct xts_instance_ctx *ctx; + struct skcipher_alg *alg; + const char *cipher_name; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) + return -EINVAL; + + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return PTR_ERR(cipher_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + ctx = skcipher_instance_ctx(inst); + + crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); + + mask = crypto_requires_off(algt->type, algt->mask, + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); + + err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask); + if (err == -ENOENT) { + err = -ENAMETOOLONG; + if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask); + } + if (err) - return ERR_PTR(err); + goto err_free_inst; + + alg = crypto_skcipher_spawn_alg(&ctx->spawn); - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); + err = -EINVAL; + if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) + goto err_drop_spawn; - inst = crypto_alloc_instance("xts", alg); - if (IS_ERR(inst)) - goto out_put_alg; + if (crypto_skcipher_alg_ivsize(alg)) + goto err_drop_spawn; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; + err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", + &alg->base); + if (err) + goto err_drop_spawn; - if (alg->cra_alignmask < 7) - inst->alg.cra_alignmask = 7; - else - inst->alg.cra_alignmask = alg->cra_alignmask; + err = -EINVAL; + cipher_name = alg->base.cra_name; - inst->alg.cra_type = &crypto_blkcipher_type; + /* Alas we screwed up the naming so we have to mangle the + * cipher name. + */ + if (!strncmp(cipher_name, "ecb(", 4)) { + unsigned len; - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; - inst->alg.cra_blkcipher.min_keysize = - 2 * alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = - 2 * alg->cra_cipher.cia_max_keysize; + len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); + if (len < 2 || len >= sizeof(ctx->name)) + goto err_drop_spawn; - inst->alg.cra_ctxsize = sizeof(struct priv); + if (ctx->name[len - 1] != ')') + goto err_drop_spawn; - inst->alg.cra_init = init_tfm; - inst->alg.cra_exit = exit_tfm; + ctx->name[len - 1] = 0; - inst->alg.cra_blkcipher.setkey = setkey; - inst->alg.cra_blkcipher.encrypt = encrypt; - inst->alg.cra_blkcipher.decrypt = decrypt; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + } else + goto err_drop_spawn; -out_put_alg: - crypto_mod_put(alg); - return inst; -} + inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask | + (__alignof__(u64) - 1); -static void free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); + inst->alg.ivsize = XTS_BLOCK_SIZE; + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; + inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; + + inst->alg.base.cra_ctxsize = sizeof(struct priv); + + inst->alg.init = init_tfm; + inst->alg.exit = exit_tfm; + + inst->alg.setkey = setkey; + inst->alg.encrypt = encrypt; + inst->alg.decrypt = decrypt; + + inst->free = free; + + err = skcipher_register_instance(tmpl, inst); + if (err) + goto err_drop_spawn; + +out: + return err; + +err_drop_spawn: + crypto_drop_skcipher(&ctx->spawn); +err_free_inst: kfree(inst); + goto out; } static struct crypto_template crypto_tmpl = { .name = "xts", - .alloc = alloc, - .free = free, + .create = create, .module = THIS_MODULE, }; |