summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig77
-rw-r--r--crypto/Makefile12
-rw-r--r--crypto/ablk_helper.c6
-rw-r--r--crypto/ablkcipher.c223
-rw-r--r--crypto/aead.c16
-rw-r--r--crypto/ahash.c6
-rw-r--r--crypto/algapi.c24
-rw-r--r--crypto/authenc.c116
-rw-r--r--crypto/authencesn.c106
-rw-r--r--crypto/blkcipher.c185
-rw-r--r--crypto/ccm.c72
-rw-r--r--crypto/chacha20poly1305.c89
-rw-r--r--crypto/chainiv.c317
-rw-r--r--crypto/cryptd.c132
-rw-r--r--crypto/crypto_null.c11
-rw-r--r--crypto/crypto_user.c57
-rw-r--r--crypto/ctr.c183
-rw-r--r--crypto/cts.c495
-rw-r--r--crypto/dh.c189
-rw-r--r--crypto/dh_helper.c95
-rw-r--r--crypto/drbg.c269
-rw-r--r--crypto/ecc.c1018
-rw-r--r--crypto/ecc.h83
-rw-r--r--crypto/ecc_curve_defs.h57
-rw-r--r--crypto/ecdh.c151
-rw-r--r--crypto/ecdh_helper.c86
-rw-r--r--crypto/echainiv.c16
-rw-r--r--crypto/eseqiv.c242
-rw-r--r--crypto/gcm.c115
-rw-r--r--crypto/jitterentropy-kcapi.c22
-rw-r--r--crypto/kpp.c123
-rw-r--r--crypto/mcryptd.c132
-rw-r--r--crypto/rsa-pkcs1pad.c325
-rw-r--r--crypto/rsa.c113
-rw-r--r--crypto/rsa_helper.c172
-rw-r--r--crypto/rsaprivkey.asn110
-rw-r--r--crypto/scatterwalk.c81
-rw-r--r--crypto/seqiv.c176
-rw-r--r--crypto/sha3_generic.c300
-rw-r--r--crypto/skcipher.c196
-rw-r--r--crypto/tcrypt.c442
-rw-r--r--crypto/testmgr.c288
-rw-r--r--crypto/testmgr.h1036
43 files changed, 5343 insertions, 2521 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 1d33beb6a1ae..a9377bef25e3 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -93,6 +93,15 @@ config CRYPTO_AKCIPHER
select CRYPTO_AKCIPHER2
select CRYPTO_ALGAPI
+config CRYPTO_KPP2
+ tristate
+ select CRYPTO_ALGAPI2
+
+config CRYPTO_KPP
+ tristate
+ select CRYPTO_ALGAPI
+ select CRYPTO_KPP2
+
config CRYPTO_RSA
tristate "RSA algorithm"
select CRYPTO_AKCIPHER
@@ -102,6 +111,19 @@ config CRYPTO_RSA
help
Generic implementation of the RSA public key algorithm.
+config CRYPTO_DH
+ tristate "Diffie-Hellman algorithm"
+ select CRYPTO_KPP
+ select MPILIB
+ help
+ Generic implementation of the Diffie-Hellman algorithm.
+
+config CRYPTO_ECDH
+ tristate "ECDH algorithm"
+ select CRYTPO_KPP
+ help
+ Generic implementation of the ECDH algorithm
+
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_MANAGER2
@@ -115,6 +137,7 @@ config CRYPTO_MANAGER2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
select CRYPTO_AKCIPHER2
+ select CRYPTO_KPP2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
@@ -414,6 +437,17 @@ config CRYPTO_CRC32C_INTEL
gain performance compared with software implementation.
Module will be crc32c-intel.
+config CRYPT_CRC32C_VPMSUM
+ tristate "CRC32c CRC algorithm (powerpc64)"
+ depends on PPC64
+ select CRYPTO_HASH
+ select CRC32
+ help
+ CRC32c algorithm implemented using vector polynomial multiply-sum
+ (vpmsum) instructions, introduced in POWER8. Enable on POWER8
+ and newer processors for improved performance.
+
+
config CRYPTO_CRC32C_SPARC64
tristate "CRC32c CRC algorithm (SPARC64)"
depends on SPARC64
@@ -681,6 +715,38 @@ config CRYPTO_SHA1_MB
lanes remain unfilled, a flush operation will be initiated to
process the crypto jobs, adding a slight latency.
+config CRYPTO_SHA256_MB
+ tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
+ depends on X86 && 64BIT
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ select CRYPTO_MCRYPTD
+ help
+ SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+ using multi-buffer technique. This algorithm computes on
+ multiple data lanes concurrently with SIMD instructions for
+ better throughput. It should not be enabled by default but
+ used when there is significant amount of work to keep the keep
+ the data lanes filled to get performance benefit. If the data
+ lanes remain unfilled, a flush operation will be initiated to
+ process the crypto jobs, adding a slight latency.
+
+config CRYPTO_SHA512_MB
+ tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
+ depends on X86 && 64BIT
+ select CRYPTO_SHA512
+ select CRYPTO_HASH
+ select CRYPTO_MCRYPTD
+ help
+ SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+ using multi-buffer technique. This algorithm computes on
+ multiple data lanes concurrently with SIMD instructions for
+ better throughput. It should not be enabled by default but
+ used when there is significant amount of work to keep the keep
+ the data lanes filled to get performance benefit. If the data
+ lanes remain unfilled, a flush operation will be initiated to
+ process the crypto jobs, adding a slight latency.
+
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
@@ -750,6 +816,16 @@ config CRYPTO_SHA512_SPARC64
SHA-512 secure hash standard (DFIPS 180-2) implemented
using sparc64 crypto instructions, when available.
+config CRYPTO_SHA3
+ tristate "SHA3 digest algorithm"
+ select CRYPTO_HASH
+ help
+ SHA-3 secure hash standard (DFIPS 202). It's based on
+ cryptographic sponge function family called Keccak.
+
+ References:
+ http://keccak.noekeon.org/
+
config CRYPTO_TGR192
tristate "Tiger digest algorithms"
select CRYPTO_HASH
@@ -1567,6 +1643,7 @@ config CRYPTO_DRBG_HASH
config CRYPTO_DRBG_CTR
bool "Enable CTR DRBG"
select CRYPTO_AES
+ depends on CRYPTO_CTR
help
Enable the CTR DRBG variant as defined in NIST SP800-90A.
diff --git a/crypto/Makefile b/crypto/Makefile
index 4f4ef7eaae3f..99cc64ac70ef 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -20,8 +20,6 @@ crypto_blkcipher-y := ablkcipher.o
crypto_blkcipher-y += blkcipher.o
crypto_blkcipher-y += skcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
-obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
-obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
@@ -30,6 +28,15 @@ crypto_hash-y += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
+obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
+
+dh_generic-y := dh.o
+dh_generic-y += dh_helper.o
+obj-$(CONFIG_CRYPTO_DH) += dh_generic.o
+ecdh_generic-y := ecc.o
+ecdh_generic-y += ecdh.o
+ecdh_generic-y += ecdh_helper.o
+obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
@@ -61,6 +68,7 @@ obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
+obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
index e1fcf53bb931..1441f07d0a19 100644
--- a/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -71,7 +71,8 @@ int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- if (!may_use_simd()) {
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
@@ -90,7 +91,8 @@ int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- if (!may_use_simd()) {
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e5b5721809e2..d676fc59521a 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -14,11 +14,8 @@
*/
#include <crypto/internal/skcipher.h>
-#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/rtnetlink.h>
-#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
@@ -349,16 +346,6 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
return alg->cra_ctxsize;
}
-int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
-{
- return crypto_ablkcipher_encrypt(&req->creq);
-}
-
-int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
-{
- return crypto_ablkcipher_decrypt(&req->creq);
-}
-
static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
u32 mask)
{
@@ -371,10 +358,6 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
- if (!alg->ivsize) {
- crt->givencrypt = skcipher_null_givencrypt;
- crt->givdecrypt = skcipher_null_givdecrypt;
- }
crt->base = __crypto_ablkcipher_cast(tfm);
crt->ivsize = alg->ivsize;
@@ -436,11 +419,6 @@ const struct crypto_type crypto_ablkcipher_type = {
};
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
-static int no_givdecrypt(struct skcipher_givcrypt_request *req)
-{
- return -ENOSYS;
-}
-
static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
u32 mask)
{
@@ -454,8 +432,6 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
alg->setkey : setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
- crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
- crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
crt->base = __crypto_ablkcipher_cast(tfm);
crt->ivsize = alg->ivsize;
@@ -516,202 +492,3 @@ const struct crypto_type crypto_givcipher_type = {
.report = crypto_givcipher_report,
};
EXPORT_SYMBOL_GPL(crypto_givcipher_type);
-
-const char *crypto_default_geniv(const struct crypto_alg *alg)
-{
- if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
- alg->cra_ablkcipher.ivsize) !=
- alg->cra_blocksize)
- return "chainiv";
-
- return "eseqiv";
-}
-
-static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
-{
- struct rtattr *tb[3];
- struct {
- struct rtattr attr;
- struct crypto_attr_type data;
- } ptype;
- struct {
- struct rtattr attr;
- struct crypto_attr_alg data;
- } palg;
- struct crypto_template *tmpl;
- struct crypto_instance *inst;
- struct crypto_alg *larval;
- const char *geniv;
- int err;
-
- larval = crypto_larval_lookup(alg->cra_driver_name,
- (type & ~CRYPTO_ALG_TYPE_MASK) |
- CRYPTO_ALG_TYPE_GIVCIPHER,
- mask | CRYPTO_ALG_TYPE_MASK);
- err = PTR_ERR(larval);
- if (IS_ERR(larval))
- goto out;
-
- err = -EAGAIN;
- if (!crypto_is_larval(larval))
- goto drop_larval;
-
- ptype.attr.rta_len = sizeof(ptype);
- ptype.attr.rta_type = CRYPTOA_TYPE;
- ptype.data.type = type | CRYPTO_ALG_GENIV;
- /* GENIV tells the template that we're making a default geniv. */
- ptype.data.mask = mask | CRYPTO_ALG_GENIV;
- tb[0] = &ptype.attr;
-
- palg.attr.rta_len = sizeof(palg);
- palg.attr.rta_type = CRYPTOA_ALG;
- /* Must use the exact name to locate ourselves. */
- memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
- tb[1] = &palg.attr;
-
- tb[2] = NULL;
-
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- geniv = alg->cra_blkcipher.geniv;
- else
- geniv = alg->cra_ablkcipher.geniv;
-
- if (!geniv)
- geniv = crypto_default_geniv(alg);
-
- tmpl = crypto_lookup_template(geniv);
- err = -ENOENT;
- if (!tmpl)
- goto kill_larval;
-
- if (tmpl->create) {
- err = tmpl->create(tmpl, tb);
- if (err)
- goto put_tmpl;
- goto ok;
- }
-
- inst = tmpl->alloc(tb);
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto put_tmpl;
-
- err = crypto_register_instance(tmpl, inst);
- if (err) {
- tmpl->free(inst);
- goto put_tmpl;
- }
-
-ok:
- /* Redo the lookup to use the instance we just registered. */
- err = -EAGAIN;
-
-put_tmpl:
- crypto_tmpl_put(tmpl);
-kill_larval:
- crypto_larval_kill(larval);
-drop_larval:
- crypto_mod_put(larval);
-out:
- crypto_mod_put(alg);
- return err;
-}
-
-struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
-{
- struct crypto_alg *alg;
-
- alg = crypto_alg_mod_lookup(name, type, mask);
- if (IS_ERR(alg))
- return alg;
-
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_GIVCIPHER)
- return alg;
-
- if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
- alg->cra_ablkcipher.ivsize))
- return alg;
-
- crypto_mod_put(alg);
- alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
- mask & ~CRYPTO_ALG_TESTED);
- if (IS_ERR(alg))
- return alg;
-
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_GIVCIPHER) {
- if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
- crypto_mod_put(alg);
- alg = ERR_PTR(-ENOENT);
- }
- return alg;
- }
-
- BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
- alg->cra_ablkcipher.ivsize));
-
- return ERR_PTR(crypto_givcipher_default(alg, type, mask));
-}
-EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
-
-int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
- u32 type, u32 mask)
-{
- struct crypto_alg *alg;
- int err;
-
- type = crypto_skcipher_type(type);
- mask = crypto_skcipher_mask(mask);
-
- alg = crypto_lookup_skcipher(name, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
-
- err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
- crypto_mod_put(alg);
- return err;
-}
-EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
-
-struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
- u32 type, u32 mask)
-{
- struct crypto_tfm *tfm;
- int err;
-
- type = crypto_skcipher_type(type);
- mask = crypto_skcipher_mask(mask);
-
- for (;;) {
- struct crypto_alg *alg;
-
- alg = crypto_lookup_skcipher(alg_name, type, mask);
- if (IS_ERR(alg)) {
- err = PTR_ERR(alg);
- goto err;
- }
-
- tfm = __crypto_alloc_tfm(alg, type, mask);
- if (!IS_ERR(tfm))
- return __crypto_ablkcipher_cast(tfm);
-
- crypto_mod_put(alg);
- err = PTR_ERR(tfm);
-
-err:
- if (err != -EAGAIN)
- break;
- if (fatal_signal_pending(current)) {
- err = -EINTR;
- break;
- }
- }
-
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
diff --git a/crypto/aead.c b/crypto/aead.c
index 9b18a1e40d6a..3f5c5ff004ab 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -294,9 +294,9 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->null = crypto_get_default_null_skcipher();
- err = PTR_ERR(ctx->null);
- if (IS_ERR(ctx->null))
+ ctx->sknull = crypto_get_default_null_skcipher2();
+ err = PTR_ERR(ctx->sknull);
+ if (IS_ERR(ctx->sknull))
goto out;
child = crypto_spawn_aead(aead_instance_ctx(inst));
@@ -314,7 +314,7 @@ out:
return err;
drop_null:
- crypto_put_default_null_skcipher();
+ crypto_put_default_null_skcipher2();
goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -324,7 +324,7 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
+ crypto_put_default_null_skcipher2();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
@@ -346,9 +346,13 @@ static int aead_prepare_alg(struct aead_alg *alg)
{
struct crypto_alg *base = &alg->base;
- if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
+ if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
+ PAGE_SIZE / 8)
return -EINVAL;
+ if (!alg->chunksize)
+ alg->chunksize = base->cra_blocksize;
+
base->cra_type = &crypto_aead_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3887a98abcc3..2ce8bcb9049c 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -461,10 +461,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
- if (alg->cra_type == &crypto_ahash_type)
- return alg->cra_ctxsize;
+ if (alg->cra_type != &crypto_ahash_type)
+ return sizeof(struct crypto_shash *);
- return sizeof(struct crypto_shash *);
+ return crypto_alg_extsize(alg);
}
#ifdef CONFIG_NET
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 731255a6104f..df939b54b09f 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -811,6 +811,21 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
}
EXPORT_SYMBOL_GPL(crypto_attr_u32);
+int crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ struct crypto_alg *alg)
+{
+ if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
+ alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
+ return -ENAMETOOLONG;
+
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
+ name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_inst_setname);
+
void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
unsigned int head)
{
@@ -825,13 +840,8 @@ void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
inst = (void *)(p + head);
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
- alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_free_inst;
-
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ err = crypto_inst_setname(inst, name, alg);
+ if (err)
goto err_free_inst;
return p;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 55a354d57251..a7e1ac786c5d 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -32,8 +32,8 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
- struct crypto_ablkcipher *enc;
- struct crypto_blkcipher *null;
+ struct crypto_skcipher *enc;
+ struct crypto_skcipher *null;
};
struct authenc_request_ctx {
@@ -83,7 +83,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
{
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_ahash *auth = ctx->auth;
- struct crypto_ablkcipher *enc = ctx->enc;
+ struct crypto_skcipher *enc = ctx->enc;
struct crypto_authenc_keys keys;
int err = -EINVAL;
@@ -100,11 +100,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
if (err)
goto out;
- crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
- crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
+ crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
+ crypto_aead_set_flags(authenc, crypto_skcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
out:
@@ -184,12 +184,15 @@ static int crypto_authenc_copy_assoc(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct blkcipher_desc desc = {
- .tfm = ctx->null,
- };
+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
- return crypto_blkcipher_encrypt(&desc, req->dst, req->src,
- req->assoclen);
+ skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+ NULL);
+
+ return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_encrypt(struct aead_request *req)
@@ -199,14 +202,13 @@ static int crypto_authenc_encrypt(struct aead_request *req)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
- struct crypto_ablkcipher *enc = ctx->enc;
+ struct crypto_skcipher *enc = ctx->enc;
unsigned int cryptlen = req->cryptlen;
- struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
- ictx->reqoff);
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ictx->reqoff);
struct scatterlist *src, *dst;
int err;
- sg_init_table(areq_ctx->src, 2);
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
dst = src;
@@ -215,16 +217,15 @@ static int crypto_authenc_encrypt(struct aead_request *req)
if (err)
return err;
- sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
- ablkcipher_request_set_tfm(abreq, enc);
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- crypto_authenc_encrypt_done, req);
- ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
+ skcipher_request_set_tfm(skreq, enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ crypto_authenc_encrypt_done, req);
+ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
- err = crypto_ablkcipher_encrypt(abreq);
+ err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
@@ -240,8 +241,8 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
- struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
- ictx->reqoff);
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ictx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc);
u8 *ihash = ahreq->result + authsize;
struct scatterlist *src, *dst;
@@ -251,22 +252,19 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
if (crypto_memneq(ihash, ahreq->result, authsize))
return -EBADMSG;
- sg_init_table(areq_ctx->src, 2);
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
dst = src;
- if (req->src != req->dst) {
- sg_init_table(areq_ctx->dst, 2);
+ if (req->src != req->dst)
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
- }
- ablkcipher_request_set_tfm(abreq, ctx->enc);
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- req->base.complete, req->base.data);
- ablkcipher_request_set_crypt(abreq, src, dst,
- req->cryptlen - authsize, req->iv);
+ skcipher_request_set_tfm(skreq, ctx->enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(skreq, src, dst,
+ req->cryptlen - authsize, req->iv);
- return crypto_ablkcipher_decrypt(abreq);
+ return crypto_skcipher_decrypt(skreq);
}
static void authenc_verify_ahash_done(struct crypto_async_request *areq,
@@ -318,20 +316,20 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
- struct crypto_ablkcipher *enc;
- struct crypto_blkcipher *null;
+ struct crypto_skcipher *enc;
+ struct crypto_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
- enc = crypto_spawn_skcipher(&ictx->enc);
+ enc = crypto_spawn_skcipher2(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
+ null = crypto_get_default_null_skcipher2();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -347,13 +345,13 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
max_t(unsigned int,
crypto_ahash_reqsize(auth) +
sizeof(struct ahash_request),
- sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(enc)));
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(enc)));
return 0;
err_free_skcipher:
- crypto_free_ablkcipher(enc);
+ crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -364,8 +362,8 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->auth);
- crypto_free_ablkcipher(ctx->enc);
- crypto_put_default_null_skcipher();
+ crypto_free_skcipher(ctx->enc);
+ crypto_put_default_null_skcipher2();
}
static void crypto_authenc_free(struct aead_instance *inst)
@@ -384,7 +382,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
struct aead_instance *inst;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
- struct crypto_alg *enc;
+ struct skcipher_alg *enc;
struct authenc_instance_ctx *ctx;
const char *enc_name;
int err;
@@ -397,7 +395,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
return -EINVAL;
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ CRYPTO_ALG_TYPE_AHASH_MASK |
+ crypto_requires_sync(algt->type, algt->mask));
if (IS_ERR(auth))
return PTR_ERR(auth);
@@ -421,37 +420,40 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_auth;
- enc = crypto_skcipher_spawn_alg(&ctx->enc);
+ enc = crypto_spawn_skcipher_alg(&ctx->enc);
ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
auth_base->cra_alignmask + 1);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
+ "authenc(%s,%s)", auth_base->cra_name,
+ enc->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", auth_base->cra_driver_name,
- enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
- inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.base.cra_priority = enc->cra_priority * 10 +
+ inst->alg.base.cra_flags = (auth_base->cra_flags |
+ enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
- inst->alg.base.cra_blocksize = enc->cra_blocksize;
+ inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
- enc->cra_alignmask;
+ enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
- inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
+ inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_init_tfm;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0c0468869e25..121010ac9962 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -35,8 +35,8 @@ struct authenc_esn_instance_ctx {
struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
- struct crypto_ablkcipher *enc;
- struct crypto_blkcipher *null;
+ struct crypto_skcipher *enc;
+ struct crypto_skcipher *null;
};
struct authenc_esn_request_ctx {
@@ -65,7 +65,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
{
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
- struct crypto_ablkcipher *enc = ctx->enc;
+ struct crypto_skcipher *enc = ctx->enc;
struct crypto_authenc_keys keys;
int err = -EINVAL;
@@ -82,11 +82,11 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
if (err)
goto out;
- crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
+ crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
- crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
+ err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
+ crypto_aead_set_flags(authenc_esn, crypto_skcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
out:
@@ -182,11 +182,14 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- struct blkcipher_desc desc = {
- .tfm = ctx->null,
- };
+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
- return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len);
+ skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
+
+ return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_esn_encrypt(struct aead_request *req)
@@ -194,9 +197,9 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
- + ctx->reqoff);
- struct crypto_ablkcipher *enc = ctx->enc;
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ctx->reqoff);
+ struct crypto_skcipher *enc = ctx->enc;
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
struct scatterlist *src, *dst;
@@ -215,12 +218,12 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
}
- ablkcipher_request_set_tfm(abreq, enc);
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- crypto_authenc_esn_encrypt_done, req);
- ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
+ skcipher_request_set_tfm(skreq, enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ crypto_authenc_esn_encrypt_done, req);
+ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
- err = crypto_ablkcipher_encrypt(abreq);
+ err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
@@ -234,8 +237,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
unsigned int authsize = crypto_aead_authsize(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
- + ctx->reqoff);
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ctx->reqoff);
struct crypto_ahash *auth = ctx->auth;
u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
crypto_ahash_alignmask(auth) + 1);
@@ -256,12 +259,12 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
- ablkcipher_request_set_tfm(abreq, ctx->enc);
- ablkcipher_request_set_callback(abreq, flags,
- req->base.complete, req->base.data);
- ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv);
+ skcipher_request_set_tfm(skreq, ctx->enc);
+ skcipher_request_set_callback(skreq, flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
- return crypto_ablkcipher_decrypt(abreq);
+ return crypto_skcipher_decrypt(skreq);
}
static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
@@ -331,20 +334,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
- struct crypto_ablkcipher *enc;
- struct crypto_blkcipher *null;
+ struct crypto_skcipher *enc;
+ struct crypto_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
- enc = crypto_spawn_skcipher(&ictx->enc);
+ enc = crypto_spawn_skcipher2(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
+ null = crypto_get_default_null_skcipher2();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -361,15 +364,15 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
sizeof(struct authenc_esn_request_ctx) +
ctx->reqoff +
max_t(unsigned int,
- crypto_ahash_reqsize(auth) +
- sizeof(struct ahash_request),
- sizeof(struct skcipher_givcrypt_request) +
- crypto_ablkcipher_reqsize(enc)));
+ crypto_ahash_reqsize(auth) +
+ sizeof(struct ahash_request),
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(enc)));
return 0;
err_free_skcipher:
- crypto_free_ablkcipher(enc);
+ crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -380,8 +383,8 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->auth);
- crypto_free_ablkcipher(ctx->enc);
- crypto_put_default_null_skcipher();
+ crypto_free_skcipher(ctx->enc);
+ crypto_put_default_null_skcipher2();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
@@ -400,7 +403,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
struct aead_instance *inst;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
- struct crypto_alg *enc;
+ struct skcipher_alg *enc;
struct authenc_esn_instance_ctx *ctx;
const char *enc_name;
int err;
@@ -413,7 +416,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
return -EINVAL;
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ CRYPTO_ALG_TYPE_AHASH_MASK |
+ crypto_requires_sync(algt->type, algt->mask));
if (IS_ERR(auth))
return PTR_ERR(auth);
@@ -437,34 +441,36 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_auth;
- enc = crypto_skcipher_spawn_alg(&ctx->enc);
+ enc = crypto_spawn_skcipher_alg(&ctx->enc);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_name,
- enc->cra_name) >= CRYPTO_MAX_ALG_NAME)
+ enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_driver_name,
- enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
- inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.base.cra_priority = enc->cra_priority * 10 +
+ inst->alg.base.cra_flags = (auth_base->cra_flags |
+ enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
- inst->alg.base.cra_blocksize = enc->cra_blocksize;
+ inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
- enc->cra_alignmask;
+ enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
- inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
+ inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_esn_init_tfm;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 8cc1622b2ee0..369999530108 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -21,7 +21,6 @@
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -466,10 +465,6 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
crt->setkey = async_setkey;
crt->encrypt = async_encrypt;
crt->decrypt = async_decrypt;
- if (!alg->ivsize) {
- crt->givencrypt = skcipher_null_givencrypt;
- crt->givdecrypt = skcipher_null_givdecrypt;
- }
crt->base = __crypto_ablkcipher_cast(tfm);
crt->ivsize = alg->ivsize;
@@ -560,185 +555,5 @@ const struct crypto_type crypto_blkcipher_type = {
};
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
-static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
- const char *name, u32 type, u32 mask)
-{
- struct crypto_alg *alg;
- int err;
-
- type = crypto_skcipher_type(type);
- mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
-
- alg = crypto_alg_mod_lookup(name, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
-
- err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
- crypto_mod_put(alg);
- return err;
-}
-
-struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type,
- u32 mask)
-{
- struct {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-
- const char *geniv;
- } balg;
- const char *name;
- struct crypto_skcipher_spawn *spawn;
- struct crypto_attr_type *algt;
- struct crypto_instance *inst;
- struct crypto_alg *alg;
- int err;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
- algt->mask)
- return ERR_PTR(-EINVAL);
-
- name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(name))
- return ERR_CAST(name);
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return ERR_PTR(-ENOMEM);
-
- spawn = crypto_instance_ctx(inst);
-
- /* Ignore async algorithms if necessary. */
- mask |= crypto_requires_sync(algt->type, algt->mask);
-
- crypto_set_skcipher_spawn(spawn, inst);
- err = crypto_grab_nivcipher(spawn, name, type, mask);
- if (err)
- goto err_free_inst;
-
- alg = crypto_skcipher_spawn_alg(spawn);
-
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER) {
- balg.ivsize = alg->cra_blkcipher.ivsize;
- balg.min_keysize = alg->cra_blkcipher.min_keysize;
- balg.max_keysize = alg->cra_blkcipher.max_keysize;
-
- balg.setkey = async_setkey;
- balg.encrypt = async_encrypt;
- balg.decrypt = async_decrypt;
-
- balg.geniv = alg->cra_blkcipher.geniv;
- } else {
- balg.ivsize = alg->cra_ablkcipher.ivsize;
- balg.min_keysize = alg->cra_ablkcipher.min_keysize;
- balg.max_keysize = alg->cra_ablkcipher.max_keysize;
-
- balg.setkey = alg->cra_ablkcipher.setkey;
- balg.encrypt = alg->cra_ablkcipher.encrypt;
- balg.decrypt = alg->cra_ablkcipher.decrypt;
-
- balg.geniv = alg->cra_ablkcipher.geniv;
- }
-
- err = -EINVAL;
- if (!balg.ivsize)
- goto err_drop_alg;
-
- /*
- * This is only true if we're constructing an algorithm with its
- * default IV generator. For the default generator we elide the
- * template name and double-check the IV generator.
- */
- if (algt->mask & CRYPTO_ALG_GENIV) {
- if (!balg.geniv)
- balg.geniv = crypto_default_geniv(alg);
- err = -EAGAIN;
- if (strcmp(tmpl->name, balg.geniv))
- goto err_drop_alg;
-
- memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
- memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
- CRYPTO_MAX_ALG_NAME);
- } else {
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s)", tmpl->name, alg->cra_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto err_drop_alg;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s)", tmpl->name, alg->cra_driver_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto err_drop_alg;
- }
-
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
- inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_givcipher_type;
-
- inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
- inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
- inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
- inst->alg.cra_ablkcipher.geniv = balg.geniv;
-
- inst->alg.cra_ablkcipher.setkey = balg.setkey;
- inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
- inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
-
-out:
- return inst;
-
-err_drop_alg:
- crypto_drop_skcipher(spawn);
-err_free_inst:
- kfree(inst);
- inst = ERR_PTR(err);
- goto out;
-}
-EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
-
-void skcipher_geniv_free(struct crypto_instance *inst)
-{
- crypto_drop_skcipher(crypto_instance_ctx(inst));
- kfree(inst);
-}
-EXPORT_SYMBOL_GPL(skcipher_geniv_free);
-
-int skcipher_geniv_init(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_ablkcipher *cipher;
-
- cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- tfm->crt_ablkcipher.base = cipher;
- tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(skcipher_geniv_init);
-
-void skcipher_geniv_exit(struct crypto_tfm *tfm)
-{
- crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
-}
-EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic block chaining cipher type");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index cc31ea4335bf..006d8575ef5c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -28,7 +28,7 @@ struct ccm_instance_ctx {
struct crypto_ccm_ctx {
struct crypto_cipher *cipher;
- struct crypto_ablkcipher *ctr;
+ struct crypto_skcipher *ctr;
};
struct crypto_rfc4309_ctx {
@@ -50,7 +50,7 @@ struct crypto_ccm_req_priv_ctx {
u32 flags;
struct scatterlist src[3];
struct scatterlist dst[3];
- struct ablkcipher_request abreq;
+ struct skcipher_request skreq;
};
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
@@ -83,15 +83,15 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
- struct crypto_ablkcipher *ctr = ctx->ctr;
+ struct crypto_skcipher *ctr = ctx->ctr;
struct crypto_cipher *tfm = ctx->cipher;
int err = 0;
- crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
+ crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(ctr, key, keylen);
+ crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
CRYPTO_TFM_RES_MASK);
if (err)
goto out;
@@ -347,7 +347,7 @@ static int crypto_ccm_encrypt(struct aead_request *req)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->abreq;
+ struct skcipher_request *skreq = &pctx->skreq;
struct scatterlist *dst;
unsigned int cryptlen = req->cryptlen;
u8 *odata = pctx->odata;
@@ -366,11 +366,11 @@ static int crypto_ccm_encrypt(struct aead_request *req)
if (req->src != req->dst)
dst = pctx->dst;
- ablkcipher_request_set_tfm(abreq, ctx->ctr);
- ablkcipher_request_set_callback(abreq, pctx->flags,
- crypto_ccm_encrypt_done, req);
- ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
- err = crypto_ablkcipher_encrypt(abreq);
+ skcipher_request_set_tfm(skreq, ctx->ctr);
+ skcipher_request_set_callback(skreq, pctx->flags,
+ crypto_ccm_encrypt_done, req);
+ skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
+ err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
@@ -407,7 +407,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->abreq;
+ struct skcipher_request *skreq = &pctx->skreq;
struct scatterlist *dst;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen;
@@ -429,11 +429,11 @@ static int crypto_ccm_decrypt(struct aead_request *req)
if (req->src != req->dst)
dst = pctx->dst;
- ablkcipher_request_set_tfm(abreq, ctx->ctr);
- ablkcipher_request_set_callback(abreq, pctx->flags,
- crypto_ccm_decrypt_done, req);
- ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
- err = crypto_ablkcipher_decrypt(abreq);
+ skcipher_request_set_tfm(skreq, ctx->ctr);
+ skcipher_request_set_callback(skreq, pctx->flags,
+ crypto_ccm_decrypt_done, req);
+ skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
+ err = crypto_skcipher_decrypt(skreq);
if (err)
return err;
@@ -454,7 +454,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_cipher *cipher;
- struct crypto_ablkcipher *ctr;
+ struct crypto_skcipher *ctr;
unsigned long align;
int err;
@@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctr = crypto_spawn_skcipher(&ictx->ctr);
+ ctr = crypto_spawn_skcipher2(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_cipher;
@@ -475,7 +475,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
crypto_aead_set_reqsize(
tfm,
align + sizeof(struct crypto_ccm_req_priv_ctx) +
- crypto_ablkcipher_reqsize(ctr));
+ crypto_skcipher_reqsize(ctr));
return 0;
@@ -489,7 +489,7 @@ static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_cipher(ctx->cipher);
- crypto_free_ablkcipher(ctx->ctr);
+ crypto_free_skcipher(ctx->ctr);
}
static void crypto_ccm_free(struct aead_instance *inst)
@@ -509,7 +509,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
{
struct crypto_attr_type *algt;
struct aead_instance *inst;
- struct crypto_alg *ctr;
+ struct skcipher_alg *ctr;
struct crypto_alg *cipher;
struct ccm_instance_ctx *ictx;
int err;
@@ -544,39 +544,40 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_cipher;
- ctr = crypto_skcipher_spawn_alg(&ictx->ctr);
+ ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
/* Not a stream cipher? */
err = -EINVAL;
- if (ctr->cra_blocksize != 1)
+ if (ctr->base.cra_blocksize != 1)
goto err_drop_ctr;
/* We want the real thing! */
- if (ctr->cra_ablkcipher.ivsize != 16)
+ if (crypto_skcipher_alg_ivsize(ctr) != 16)
goto err_drop_ctr;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "ccm_base(%s,%s)", ctr->cra_driver_name,
+ "ccm_base(%s,%s)", ctr->base.cra_driver_name,
cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_ctr;
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
- inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (cipher->cra_priority +
- ctr->cra_priority) / 2;
+ ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = cipher->cra_alignmask |
- ctr->cra_alignmask |
+ ctr->base.cra_alignmask |
(__alignof__(u32) - 1);
inst->alg.ivsize = 16;
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
inst->alg.maxauthsize = 16;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
inst->alg.init = crypto_ccm_init_tfm;
@@ -863,6 +864,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.ivsize = 8;
+ inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = 16;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 7b6b935cef23..e899ef51dc8e 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -31,7 +31,7 @@ struct chachapoly_instance_ctx {
};
struct chachapoly_ctx {
- struct crypto_ablkcipher *chacha;
+ struct crypto_skcipher *chacha;
struct crypto_ahash *poly;
/* key bytes we use for the ChaCha20 IV */
unsigned int saltlen;
@@ -53,7 +53,7 @@ struct poly_req {
struct chacha_req {
u8 iv[CHACHA20_IV_SIZE];
struct scatterlist src[1];
- struct ablkcipher_request req; /* must be last member */
+ struct skcipher_request req; /* must be last member */
};
struct chachapoly_req_ctx {
@@ -144,12 +144,12 @@ static int chacha_decrypt(struct aead_request *req)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
}
- ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
- chacha_decrypt_done, req);
- ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
- ablkcipher_request_set_crypt(&creq->req, src, dst,
- rctx->cryptlen, creq->iv);
- err = crypto_ablkcipher_decrypt(&creq->req);
+ skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ chacha_decrypt_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, src, dst,
+ rctx->cryptlen, creq->iv);
+ err = crypto_skcipher_decrypt(&creq->req);
if (err)
return err;
@@ -393,13 +393,13 @@ static int poly_genkey(struct aead_request *req)
chacha_iv(creq->iv, req, 0);
- ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
- poly_genkey_done, req);
- ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
- ablkcipher_request_set_crypt(&creq->req, creq->src, creq->src,
- POLY1305_KEY_SIZE, creq->iv);
+ skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ poly_genkey_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
+ POLY1305_KEY_SIZE, creq->iv);
- err = crypto_ablkcipher_decrypt(&creq->req);
+ err = crypto_skcipher_decrypt(&creq->req);
if (err)
return err;
@@ -433,12 +433,12 @@ static int chacha_encrypt(struct aead_request *req)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
}
- ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
- chacha_encrypt_done, req);
- ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
- ablkcipher_request_set_crypt(&creq->req, src, dst,
- req->cryptlen, creq->iv);
- err = crypto_ablkcipher_encrypt(&creq->req);
+ skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ chacha_encrypt_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, src, dst,
+ req->cryptlen, creq->iv);
+ err = crypto_skcipher_encrypt(&creq->req);
if (err)
return err;
@@ -500,13 +500,13 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
keylen -= ctx->saltlen;
memcpy(ctx->salt, key + keylen, ctx->saltlen);
- crypto_ablkcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
- CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(ctx->chacha, key, keylen);
- crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctx->chacha) &
- CRYPTO_TFM_RES_MASK);
+ err = crypto_skcipher_setkey(ctx->chacha, key, keylen);
+ crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctx->chacha) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
@@ -524,7 +524,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
struct aead_instance *inst = aead_alg_instance(tfm);
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- struct crypto_ablkcipher *chacha;
+ struct crypto_skcipher *chacha;
struct crypto_ahash *poly;
unsigned long align;
@@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
if (IS_ERR(poly))
return PTR_ERR(poly);
- chacha = crypto_spawn_skcipher(&ictx->chacha);
+ chacha = crypto_spawn_skcipher2(&ictx->chacha);
if (IS_ERR(chacha)) {
crypto_free_ahash(poly);
return PTR_ERR(chacha);
@@ -548,8 +548,8 @@ static int chachapoly_init(struct crypto_aead *tfm)
tfm,
align + offsetof(struct chachapoly_req_ctx, u) +
max(offsetof(struct chacha_req, req) +
- sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(chacha),
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(chacha),
offsetof(struct poly_req, req) +
sizeof(struct ahash_request) +
crypto_ahash_reqsize(poly)));
@@ -562,7 +562,7 @@ static void chachapoly_exit(struct crypto_aead *tfm)
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->poly);
- crypto_free_ablkcipher(ctx->chacha);
+ crypto_free_skcipher(ctx->chacha);
}
static void chachapoly_free(struct aead_instance *inst)
@@ -579,7 +579,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
{
struct crypto_attr_type *algt;
struct aead_instance *inst;
- struct crypto_alg *chacha;
+ struct skcipher_alg *chacha;
struct crypto_alg *poly;
struct hash_alg_common *poly_hash;
struct chachapoly_instance_ctx *ctx;
@@ -605,7 +605,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
poly = crypto_find_alg(poly_name, &crypto_ahash_type,
CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ CRYPTO_ALG_TYPE_AHASH_MASK |
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (IS_ERR(poly))
return PTR_ERR(poly);
@@ -623,20 +625,20 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_poly;
- chacha = crypto_skcipher_spawn_alg(&ctx->chacha);
+ chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
err = -EINVAL;
/* Need 16-byte IV size, including Initial Block Counter value */
- if (chacha->cra_ablkcipher.ivsize != CHACHA20_IV_SIZE)
+ if (crypto_skcipher_alg_ivsize(chacha) != CHACHA20_IV_SIZE)
goto out_drop_chacha;
/* Not a stream cipher? */
- if (chacha->cra_blocksize != 1)
+ if (chacha->base.cra_blocksize != 1)
goto out_drop_chacha;
err = -ENAMETOOLONG;
@@ -645,20 +647,21 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
poly_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_chacha;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->cra_driver_name,
+ "%s(%s,%s)", name, chacha->base.cra_driver_name,
poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_chacha;
- inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) &
+ inst->alg.base.cra_flags = (chacha->base.cra_flags | poly->cra_flags) &
CRYPTO_ALG_ASYNC;
- inst->alg.base.cra_priority = (chacha->cra_priority +
+ inst->alg.base.cra_priority = (chacha->base.cra_priority +
poly->cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
- inst->alg.base.cra_alignmask = chacha->cra_alignmask |
+ inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
poly->cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
ctx->saltlen;
inst->alg.ivsize = ivsize;
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha);
inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
inst->alg.init = chachapoly_init;
inst->alg.exit = chachapoly_exit;
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
deleted file mode 100644
index b4340018c8d4..000000000000
--- a/crypto/chainiv.c
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * chainiv: Chain IV Generator
- *
- * Generate IVs simply be using the last block of the previous encryption.
- * This is mainly useful for CBC with a synchronous algorithm.
- *
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/internal/skcipher.h>
-#include <crypto/rng.h>
-#include <crypto/crypto_wq.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/workqueue.h>
-
-enum {
- CHAINIV_STATE_INUSE = 0,
-};
-
-struct chainiv_ctx {
- spinlock_t lock;
- char iv[];
-};
-
-struct async_chainiv_ctx {
- unsigned long state;
-
- spinlock_t lock;
- int err;
-
- struct crypto_queue queue;
- struct work_struct postponed;
-
- char iv[];
-};
-
-static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
- unsigned int ivsize;
- int err;
-
- ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
- ablkcipher_request_set_callback(subreq, req->creq.base.flags &
- ~CRYPTO_TFM_REQ_MAY_SLEEP,
- req->creq.base.complete,
- req->creq.base.data);
- ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
- req->creq.nbytes, req->creq.info);
-
- spin_lock_bh(&ctx->lock);
-
- ivsize = crypto_ablkcipher_ivsize(geniv);
-
- memcpy(req->giv, ctx->iv, ivsize);
- memcpy(subreq->info, ctx->iv, ivsize);
-
- err = crypto_ablkcipher_encrypt(subreq);
- if (err)
- goto unlock;
-
- memcpy(ctx->iv, subreq->info, ivsize);
-
-unlock:
- spin_unlock_bh(&ctx->lock);
-
- return err;
-}
-
-static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
-{
- struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
- int err = 0;
-
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
-
- if (iv) {
- err = crypto_rng_get_bytes(crypto_default_rng, iv,
- crypto_ablkcipher_ivsize(geniv));
- crypto_put_default_rng();
- }
-
- return err ?: skcipher_geniv_init(tfm);
-}
-
-static int chainiv_init(struct crypto_tfm *tfm)
-{
- struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
- struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
- char *iv;
-
- spin_lock_init(&ctx->lock);
-
- iv = NULL;
- if (!crypto_get_default_rng()) {
- crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
- iv = ctx->iv;
- }
-
- return chainiv_init_common(tfm, iv);
-}
-
-static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
-{
- int queued;
- int err = ctx->err;
-
- if (!ctx->queue.qlen) {
- smp_mb__before_atomic();
- clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
-
- if (!ctx->queue.qlen ||
- test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
- goto out;
- }
-
- queued = queue_work(kcrypto_wq, &ctx->postponed);
- BUG_ON(!queued);
-
-out:
- return err;
-}
-
-static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- int err;
-
- spin_lock_bh(&ctx->lock);
- err = skcipher_enqueue_givcrypt(&ctx->queue, req);
- spin_unlock_bh(&ctx->lock);
-
- if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
- return err;
-
- ctx->err = err;
- return async_chainiv_schedule_work(ctx);
-}
-
-static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
- unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
-
- memcpy(req->giv, ctx->iv, ivsize);
- memcpy(subreq->info, ctx->iv, ivsize);
-
- ctx->err = crypto_ablkcipher_encrypt(subreq);
- if (ctx->err)
- goto out;
-
- memcpy(ctx->iv, subreq->info, ivsize);
-
-out:
- return async_chainiv_schedule_work(ctx);
-}
-
-static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-
- ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
- ablkcipher_request_set_callback(subreq, req->creq.base.flags,
- req->creq.base.complete,
- req->creq.base.data);
- ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
- req->creq.nbytes, req->creq.info);
-
- if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
- goto postpone;
-
- if (ctx->queue.qlen) {
- clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
- goto postpone;
- }
-
- return async_chainiv_givencrypt_tail(req);
-
-postpone:
- return async_chainiv_postpone_request(req);
-}
-
-static void async_chainiv_do_postponed(struct work_struct *work)
-{
- struct async_chainiv_ctx *ctx = container_of(work,
- struct async_chainiv_ctx,
- postponed);
- struct skcipher_givcrypt_request *req;
- struct ablkcipher_request *subreq;
- int err;
-
- /* Only handle one request at a time to avoid hogging keventd. */
- spin_lock_bh(&ctx->lock);
- req = skcipher_dequeue_givcrypt(&ctx->queue);
- spin_unlock_bh(&ctx->lock);
-
- if (!req) {
- async_chainiv_schedule_work(ctx);
- return;
- }
-
- subreq = skcipher_givcrypt_reqctx(req);
- subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = async_chainiv_givencrypt_tail(req);
-
- local_bh_disable();
- skcipher_givcrypt_complete(req, err);
- local_bh_enable();
-}
-
-static int async_chainiv_init(struct crypto_tfm *tfm)
-{
- struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
- struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
- char *iv;
-
- spin_lock_init(&ctx->lock);
-
- crypto_init_queue(&ctx->queue, 100);
- INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
-
- iv = NULL;
- if (!crypto_get_default_rng()) {
- crypto_ablkcipher_crt(geniv)->givencrypt =
- async_chainiv_givencrypt;
- iv = ctx->iv;
- }
-
- return chainiv_init_common(tfm, iv);
-}
-
-static void async_chainiv_exit(struct crypto_tfm *tfm)
-{
- struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
-
- BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
-
- skcipher_geniv_exit(tfm);
-}
-
-static struct crypto_template chainiv_tmpl;
-
-static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
-{
- struct crypto_attr_type *algt;
- struct crypto_instance *inst;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
- if (IS_ERR(inst))
- goto out;
-
- inst->alg.cra_init = chainiv_init;
- inst->alg.cra_exit = skcipher_geniv_exit;
-
- inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
-
- if (!crypto_requires_sync(algt->type, algt->mask)) {
- inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
-
- inst->alg.cra_init = async_chainiv_init;
- inst->alg.cra_exit = async_chainiv_exit;
-
- inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
- }
-
- inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
-
-out:
- return inst;
-}
-
-static struct crypto_template chainiv_tmpl = {
- .name = "chainiv",
- .alloc = chainiv_alloc,
- .free = skcipher_geniv_free,
- .module = THIS_MODULE,
-};
-
-static int __init chainiv_module_init(void)
-{
- return crypto_register_template(&chainiv_tmpl);
-}
-
-static void chainiv_module_exit(void)
-{
- crypto_unregister_template(&chainiv_tmpl);
-}
-
-module_init(chainiv_module_init);
-module_exit(chainiv_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Chain IV Generator");
-MODULE_ALIAS_CRYPTO("chainiv");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 7921251cdb13..cf8037a87b2d 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -22,6 +22,7 @@
#include <crypto/internal/aead.h>
#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
+#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -31,7 +32,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#define CRYPTD_MAX_CPU_QLEN 100
+#define CRYPTD_MAX_CPU_QLEN 1000
struct cryptd_cpu_queue {
struct crypto_queue queue;
@@ -58,6 +59,7 @@ struct aead_instance_ctx {
};
struct cryptd_blkcipher_ctx {
+ atomic_t refcnt;
struct crypto_blkcipher *child;
};
@@ -66,6 +68,7 @@ struct cryptd_blkcipher_request_ctx {
};
struct cryptd_hash_ctx {
+ atomic_t refcnt;
struct crypto_shash *child;
};
@@ -75,6 +78,7 @@ struct cryptd_hash_request_ctx {
};
struct cryptd_aead_ctx {
+ atomic_t refcnt;
struct crypto_aead *child;
};
@@ -118,11 +122,29 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
{
int cpu, err;
struct cryptd_cpu_queue *cpu_queue;
+ struct crypto_tfm *tfm;
+ atomic_t *refcnt;
+ bool may_backlog;
cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
+
+ refcnt = crypto_tfm_ctx(request->tfm);
+ may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ if (err == -EBUSY && !may_backlog)
+ goto out_put_cpu;
+
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+
+ if (!atomic_read(refcnt))
+ goto out_put_cpu;
+
+ tfm = request->tfm;
+ atomic_inc(refcnt);
+
+out_put_cpu:
put_cpu();
return err;
@@ -206,7 +228,10 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
unsigned int len))
{
struct cryptd_blkcipher_request_ctx *rctx;
+ struct cryptd_blkcipher_ctx *ctx;
+ struct crypto_ablkcipher *tfm;
struct blkcipher_desc desc;
+ int refcnt;
rctx = ablkcipher_request_ctx(req);
@@ -222,9 +247,16 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
req->base.complete = rctx->complete;
out:
+ tfm = crypto_ablkcipher_reqtfm(req);
+ ctx = crypto_ablkcipher_ctx(tfm);
+ refcnt = atomic_read(&ctx->refcnt);
+
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
+
+ if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_ablkcipher(tfm);
}
static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
@@ -456,6 +488,21 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
return cryptd_enqueue_request(queue, &req->base);
}
+static void cryptd_hash_complete(struct ahash_request *req, int err)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ int refcnt = atomic_read(&ctx->refcnt);
+
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+
+ if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_ahash(tfm);
+}
+
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
@@ -475,9 +522,7 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete;
out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
+ cryptd_hash_complete(req, err);
}
static int cryptd_hash_init_enqueue(struct ahash_request *req)
@@ -500,9 +545,7 @@ static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete;
out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
+ cryptd_hash_complete(req, err);
}
static int cryptd_hash_update_enqueue(struct ahash_request *req)
@@ -523,9 +566,7 @@ static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete;
out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
+ cryptd_hash_complete(req, err);
}
static int cryptd_hash_final_enqueue(struct ahash_request *req)
@@ -546,9 +587,7 @@ static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete;
out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
+ cryptd_hash_complete(req, err);
}
static int cryptd_hash_finup_enqueue(struct ahash_request *req)
@@ -575,9 +614,7 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
req->base.complete = rctx->complete;
out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
+ cryptd_hash_complete(req, err);
}
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
@@ -688,7 +725,10 @@ static void cryptd_aead_crypt(struct aead_request *req,
int (*crypt)(struct aead_request *req))
{
struct cryptd_aead_request_ctx *rctx;
+ struct cryptd_aead_ctx *ctx;
crypto_completion_t compl;
+ struct crypto_aead *tfm;
+ int refcnt;
rctx = aead_request_ctx(req);
compl = rctx->complete;
@@ -697,10 +737,18 @@ static void cryptd_aead_crypt(struct aead_request *req,
goto out;
aead_request_set_tfm(req, child);
err = crypt( req );
+
out:
+ tfm = crypto_aead_reqtfm(req);
+ ctx = crypto_aead_ctx(tfm);
+ refcnt = atomic_read(&ctx->refcnt);
+
local_bh_disable();
compl(&req->base, err);
local_bh_enable();
+
+ if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_aead(tfm);
}
static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
@@ -883,6 +931,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct cryptd_blkcipher_ctx *ctx;
struct crypto_tfm *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -899,6 +948,9 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
return ERR_PTR(-EINVAL);
}
+ ctx = crypto_tfm_ctx(tfm);
+ atomic_set(&ctx->refcnt, 1);
+
return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
}
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
@@ -910,9 +962,20 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
+bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
+{
+ struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
+
+ return atomic_read(&ctx->refcnt) - 1;
+}
+EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
+
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
{
- crypto_free_ablkcipher(&tfm->base);
+ struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
+
+ if (atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_ablkcipher(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
@@ -920,6 +983,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct cryptd_hash_ctx *ctx;
struct crypto_ahash *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -933,6 +997,9 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
return ERR_PTR(-EINVAL);
}
+ ctx = crypto_ahash_ctx(tfm);
+ atomic_set(&ctx->refcnt, 1);
+
return __cryptd_ahash_cast(tfm);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
@@ -952,9 +1019,20 @@ struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
}
EXPORT_SYMBOL_GPL(cryptd_shash_desc);
+bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
+{
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
+
+ return atomic_read(&ctx->refcnt) - 1;
+}
+EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
+
void cryptd_free_ahash(struct cryptd_ahash *tfm)
{
- crypto_free_ahash(&tfm->base);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
+
+ if (atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_ahash(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_ahash);
@@ -962,6 +1040,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct cryptd_aead_ctx *ctx;
struct crypto_aead *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -974,6 +1053,10 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
crypto_free_aead(tfm);
return ERR_PTR(-EINVAL);
}
+
+ ctx = crypto_aead_ctx(tfm);
+ atomic_set(&ctx->refcnt, 1);
+
return __cryptd_aead_cast(tfm);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
@@ -986,9 +1069,20 @@ struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_aead_child);
+bool cryptd_aead_queued(struct cryptd_aead *tfm)
+{
+ struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
+
+ return atomic_read(&ctx->refcnt) - 1;
+}
+EXPORT_SYMBOL_GPL(cryptd_aead_queued);
+
void cryptd_free_aead(struct cryptd_aead *tfm)
{
- crypto_free_aead(&tfm->base);
+ struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
+
+ if (atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_aead(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_aead);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 941c9a434d50..20ff2c746e0b 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -26,7 +26,7 @@
#include <linux/string.h>
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
-static struct crypto_blkcipher *crypto_default_null_skcipher;
+static struct crypto_skcipher *crypto_default_null_skcipher;
static int crypto_default_null_skcipher_refcnt;
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
@@ -153,15 +153,16 @@ MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
-struct crypto_blkcipher *crypto_get_default_null_skcipher(void)
+struct crypto_skcipher *crypto_get_default_null_skcipher(void)
{
- struct crypto_blkcipher *tfm;
+ struct crypto_skcipher *tfm;
mutex_lock(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
- tfm = crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0);
+ tfm = crypto_alloc_skcipher("ecb(cipher_null)",
+ 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto unlock;
@@ -181,7 +182,7 @@ void crypto_put_default_null_skcipher(void)
{
mutex_lock(&crypto_default_null_skcipher_lock);
if (!--crypto_default_null_skcipher_refcnt) {
- crypto_free_blkcipher(crypto_default_null_skcipher);
+ crypto_free_skcipher(crypto_default_null_skcipher);
crypto_default_null_skcipher = NULL;
}
mutex_unlock(&crypto_default_null_skcipher_lock);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 7097a3395b25..1c5705481c69 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -28,6 +28,7 @@
#include <crypto/internal/skcipher.h>
#include <crypto/internal/rng.h>
#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
#include "internal.h"
@@ -126,6 +127,21 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_kpp rkpp;
+
+ strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
+ sizeof(struct crypto_report_kpp), &rkpp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
@@ -176,6 +192,10 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
+ case CRYPTO_ALG_TYPE_KPP:
+ if (crypto_report_kpp(skb, alg))
+ goto nla_put_failure;
+ break;
}
out:
@@ -358,32 +378,6 @@ drop_alg:
return err;
}
-static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
- u32 mask)
-{
- int err;
- struct crypto_alg *alg;
-
- type = crypto_skcipher_type(type);
- mask = crypto_skcipher_mask(mask);
-
- for (;;) {
- alg = crypto_lookup_skcipher(name, type, mask);
- if (!IS_ERR(alg))
- return alg;
-
- err = PTR_ERR(alg);
- if (err != -EAGAIN)
- break;
- if (fatal_signal_pending(current)) {
- err = -EINTR;
- break;
- }
- }
-
- return ERR_PTR(err);
-}
-
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
@@ -416,16 +410,7 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
else
name = p->cru_name;
- switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- case CRYPTO_ALG_TYPE_BLKCIPHER:
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
- break;
- default:
- alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
- }
-
+ alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 2386f7313952..ff4d21eddb83 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -26,13 +26,13 @@ struct crypto_ctr_ctx {
};
struct crypto_rfc3686_ctx {
- struct crypto_ablkcipher *child;
+ struct crypto_skcipher *child;
u8 nonce[CTR_RFC3686_NONCE_SIZE];
};
struct crypto_rfc3686_req_ctx {
u8 iv[CTR_RFC3686_BLOCK_SIZE];
- struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR;
+ struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
};
static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
@@ -249,11 +249,11 @@ static struct crypto_template crypto_ctr_tmpl = {
.module = THIS_MODULE,
};
-static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent,
+static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
- struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent);
- struct crypto_ablkcipher *child = ctx->child;
+ struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
int err;
/* the nonce is stored in bytes at end of key */
@@ -265,173 +265,178 @@ static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent,
keylen -= CTR_RFC3686_NONCE_SIZE;
- crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(child, key, keylen);
- crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
-static int crypto_rfc3686_crypt(struct ablkcipher_request *req)
+static int crypto_rfc3686_crypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_ablkcipher *child = ctx->child;
- unsigned long align = crypto_ablkcipher_alignmask(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = ctx->child;
+ unsigned long align = crypto_skcipher_alignmask(tfm);
struct crypto_rfc3686_req_ctx *rctx =
- (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1);
- struct ablkcipher_request *subreq = &rctx->subreq;
+ (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1);
+ struct skcipher_request *subreq = &rctx->subreq;
u8 *iv = rctx->iv;
/* set up counter block */
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE);
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
- ablkcipher_request_set_tfm(subreq, child);
- ablkcipher_request_set_callback(subreq, req->base.flags,
- req->base.complete, req->base.data);
- ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes,
- iv);
+ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, iv);
- return crypto_ablkcipher_encrypt(subreq);
+ return crypto_skcipher_encrypt(subreq);
}
-static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm)
+static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_ablkcipher *cipher;
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *cipher;
unsigned long align;
+ unsigned int reqsize;
- cipher = crypto_spawn_skcipher(spawn);
+ cipher = crypto_spawn_skcipher2(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
- align = crypto_tfm_alg_alignmask(tfm);
+ align = crypto_skcipher_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
- tfm->crt_ablkcipher.reqsize = align +
- sizeof(struct crypto_rfc3686_req_ctx) +
- crypto_ablkcipher_reqsize(cipher);
+ reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) +
+ crypto_skcipher_reqsize(cipher);
+ crypto_skcipher_set_reqsize(tfm, reqsize);
return 0;
}
-static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->child);
+}
- crypto_free_ablkcipher(ctx->child);
+static void crypto_rfc3686_free(struct skcipher_instance *inst)
+{
+ struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
+
+ crypto_drop_skcipher(spawn);
+ kfree(inst);
}
-static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb)
+static int crypto_rfc3686_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
{
struct crypto_attr_type *algt;
- struct crypto_instance *inst;
- struct crypto_alg *alg;
+ struct skcipher_instance *inst;
+ struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
const char *cipher_name;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
- if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask)
- return ERR_PTR(-EINVAL);
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+ return -EINVAL;
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
- return ERR_CAST(cipher_name);
+ return PTR_ERR(cipher_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- spawn = crypto_instance_ctx(inst);
+ spawn = skcipher_instance_ctx(inst);
- crypto_set_skcipher_spawn(spawn, inst);
- err = crypto_grab_skcipher(spawn, cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher2(spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_free_inst;
- alg = crypto_skcipher_spawn_alg(spawn);
+ alg = crypto_spawn_skcipher_alg(spawn);
/* We only support 16-byte blocks. */
err = -EINVAL;
- if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE)
+ if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
goto err_drop_spawn;
/* Not a stream cipher? */
- if (alg->cra_blocksize != 1)
+ if (alg->base.cra_blocksize != 1)
goto err_drop_spawn;
err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)",
- alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_spawn;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "rfc3686(%s)", alg->cra_driver_name) >=
- CRYPTO_MAX_ALG_NAME)
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "rfc3686(%s)", alg->base.cra_driver_name) >=
+ CRYPTO_MAX_ALG_NAME)
goto err_drop_spawn;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = 1;
- inst->alg.cra_alignmask = alg->cra_alignmask;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- (alg->cra_flags & CRYPTO_ALG_ASYNC);
- inst->alg.cra_type = &crypto_ablkcipher_type;
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE;
- inst->alg.cra_ablkcipher.min_keysize =
- alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE;
- inst->alg.cra_ablkcipher.max_keysize =
- alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE;
+ inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
+ CTR_RFC3686_NONCE_SIZE;
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
+ CTR_RFC3686_NONCE_SIZE;
- inst->alg.cra_ablkcipher.geniv = "seqiv";
+ inst->alg.setkey = crypto_rfc3686_setkey;
+ inst->alg.encrypt = crypto_rfc3686_crypt;
+ inst->alg.decrypt = crypto_rfc3686_crypt;
- inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey;
- inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt;
- inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt;
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
- inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
+ inst->alg.init = crypto_rfc3686_init_tfm;
+ inst->alg.exit = crypto_rfc3686_exit_tfm;
- inst->alg.cra_init = crypto_rfc3686_init_tfm;
- inst->alg.cra_exit = crypto_rfc3686_exit_tfm;
+ inst->free = crypto_rfc3686_free;
- return inst;
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
err_drop_spawn:
crypto_drop_skcipher(spawn);
err_free_inst:
kfree(inst);
- return ERR_PTR(err);
-}
-
-static void crypto_rfc3686_free(struct crypto_instance *inst)
-{
- struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst);
-
- crypto_drop_skcipher(spawn);
- kfree(inst);
+ goto out;
}
static struct crypto_template crypto_rfc3686_tmpl = {
.name = "rfc3686",
- .alloc = crypto_rfc3686_alloc,
- .free = crypto_rfc3686_free,
+ .create = crypto_rfc3686_create,
.module = THIS_MODULE,
};
diff --git a/crypto/cts.c b/crypto/cts.c
index e467ec0acf9f..51976187b2bf 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -40,7 +40,7 @@
* rfc3962 includes errata information in its Appendix A.
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -51,289 +51,364 @@
#include <linux/slab.h>
struct crypto_cts_ctx {
- struct crypto_blkcipher *child;
+ struct crypto_skcipher *child;
};
-static int crypto_cts_setkey(struct crypto_tfm *parent, const u8 *key,
- unsigned int keylen)
+struct crypto_cts_reqctx {
+ struct scatterlist sg[2];
+ unsigned offset;
+ struct skcipher_request subreq;
+};
+
+static inline u8 *crypto_cts_reqctx_space(struct skcipher_request *req)
{
- struct crypto_cts_ctx *ctx = crypto_tfm_ctx(parent);
- struct crypto_blkcipher *child = ctx->child;
- int err;
+ struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = ctx->child;
- crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_blkcipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
+ return PTR_ALIGN((u8 *)(rctx + 1) + crypto_skcipher_reqsize(child),
+ crypto_skcipher_alignmask(tfm) + 1);
}
-static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int offset,
- unsigned int nbytes)
+static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
{
- int bsize = crypto_blkcipher_blocksize(desc->tfm);
- u8 tmp[bsize], tmp2[bsize];
- struct blkcipher_desc lcldesc;
- struct scatterlist sgsrc[1], sgdst[1];
- int lastn = nbytes - bsize;
- u8 iv[bsize];
- u8 s[bsize * 2], d[bsize * 2];
+ struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
int err;
- if (lastn < 0)
- return -EINVAL;
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
- sg_init_table(sgsrc, 1);
- sg_init_table(sgdst, 1);
+static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err)
+{
+ struct skcipher_request *req = areq->data;
- memset(s, 0, sizeof(s));
- scatterwalk_map_and_copy(s, src, offset, nbytes, 0);
+ if (err == -EINPROGRESS)
+ return;
- memcpy(iv, desc->info, bsize);
+ skcipher_request_complete(req, err);
+}
- lcldesc.tfm = ctx->child;
- lcldesc.info = iv;
- lcldesc.flags = desc->flags;
+static int cts_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_request *subreq = &rctx->subreq;
+ int bsize = crypto_skcipher_blocksize(tfm);
+ u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32))));
+ struct scatterlist *sg;
+ unsigned int offset;
+ int lastn;
+
+ offset = rctx->offset;
+ lastn = req->cryptlen - offset;
+
+ sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
+ scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
+
+ memset(d, 0, bsize);
+ scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
+
+ scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
+ memzero_explicit(d, sizeof(d));
+
+ skcipher_request_set_callback(subreq, req->base.flags &
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cts_cbc_crypt_done, req);
+ skcipher_request_set_crypt(subreq, sg, sg, bsize, req->iv);
+ return crypto_skcipher_encrypt(subreq);
+}
- sg_set_buf(&sgsrc[0], s, bsize);
- sg_set_buf(&sgdst[0], tmp, bsize);
- err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
+static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
+{
+ struct skcipher_request *req = areq->data;
- memcpy(d + bsize, tmp, lastn);
+ if (err)
+ goto out;
- lcldesc.info = tmp;
+ err = cts_cbc_encrypt(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return;
- sg_set_buf(&sgsrc[0], s + bsize, bsize);
- sg_set_buf(&sgdst[0], tmp2, bsize);
- err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
+out:
+ skcipher_request_complete(req, err);
+}
- memcpy(d, tmp2, bsize);
+static int crypto_cts_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_request *subreq = &rctx->subreq;
+ int bsize = crypto_skcipher_blocksize(tfm);
+ unsigned int nbytes = req->cryptlen;
+ int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
+ unsigned int offset;
+
+ skcipher_request_set_tfm(subreq, ctx->child);
+
+ if (cbc_blocks <= 0) {
+ skcipher_request_set_callback(subreq, req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
+ req->iv);
+ return crypto_skcipher_encrypt(subreq);
+ }
- scatterwalk_map_and_copy(d, dst, offset, nbytes, 1);
+ offset = cbc_blocks * bsize;
+ rctx->offset = offset;
- memcpy(desc->info, tmp2, bsize);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ crypto_cts_encrypt_done, req);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ offset, req->iv);
- return err;
+ return crypto_skcipher_encrypt(subreq) ?:
+ cts_cbc_encrypt(req);
}
-static int crypto_cts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cts_cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- int bsize = crypto_blkcipher_blocksize(desc->tfm);
- int tot_blocks = (nbytes + bsize - 1) / bsize;
- int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0;
- struct blkcipher_desc lcldesc;
- int err;
+ struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_request *subreq = &rctx->subreq;
+ int bsize = crypto_skcipher_blocksize(tfm);
+ u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32))));
+ struct scatterlist *sg;
+ unsigned int offset;
+ u8 *space;
+ int lastn;
+
+ offset = rctx->offset;
+ lastn = req->cryptlen - offset;
+
+ sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
+
+ /* 1. Decrypt Cn-1 (s) to create Dn */
+ scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
+ space = crypto_cts_reqctx_space(req);
+ crypto_xor(d + bsize, space, bsize);
+ /* 2. Pad Cn with zeros at the end to create C of length BB */
+ memset(d, 0, bsize);
+ scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
+ /* 3. Exclusive-or Dn with C to create Xn */
+ /* 4. Select the first Ln bytes of Xn to create Pn */
+ crypto_xor(d + bsize, d, lastn);
+
+ /* 5. Append the tail (BB - Ln) bytes of Xn to Cn to create En */
+ memcpy(d + lastn, d + bsize + lastn, bsize - lastn);
+ /* 6. Decrypt En to create Pn-1 */
- lcldesc.tfm = ctx->child;
- lcldesc.info = desc->info;
- lcldesc.flags = desc->flags;
-
- if (tot_blocks == 1) {
- err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize);
- } else if (nbytes <= bsize * 2) {
- err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes);
- } else {
- /* do normal function for tot_blocks - 2 */
- err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src,
- cbc_blocks * bsize);
- if (err == 0) {
- /* do cts for final two blocks */
- err = cts_cbc_encrypt(ctx, desc, dst, src,
- cbc_blocks * bsize,
- nbytes - (cbc_blocks * bsize));
- }
- }
+ scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
+ memzero_explicit(d, sizeof(d));
- return err;
+ skcipher_request_set_callback(subreq, req->base.flags &
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cts_cbc_crypt_done, req);
+
+ skcipher_request_set_crypt(subreq, sg, sg, bsize, space);
+ return crypto_skcipher_decrypt(subreq);
}
-static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int offset,
- unsigned int nbytes)
+static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
{
- int bsize = crypto_blkcipher_blocksize(desc->tfm);
- u8 tmp[bsize];
- struct blkcipher_desc lcldesc;
- struct scatterlist sgsrc[1], sgdst[1];
- int lastn = nbytes - bsize;
- u8 iv[bsize];
- u8 s[bsize * 2], d[bsize * 2];
- int err;
-
- if (lastn < 0)
- return -EINVAL;
+ struct skcipher_request *req = areq->data;
- sg_init_table(sgsrc, 1);
- sg_init_table(sgdst, 1);
+ if (err)
+ goto out;
- scatterwalk_map_and_copy(s, src, offset, nbytes, 0);
+ err = cts_cbc_decrypt(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return;
- lcldesc.tfm = ctx->child;
- lcldesc.info = iv;
- lcldesc.flags = desc->flags;
+out:
+ skcipher_request_complete(req, err);
+}
- /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/
- memset(iv, 0, sizeof(iv));
- sg_set_buf(&sgsrc[0], s, bsize);
- sg_set_buf(&sgdst[0], tmp, bsize);
- err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
- if (err)
- return err;
- /* 2. Pad Cn with zeros at the end to create C of length BB */
- memset(iv, 0, sizeof(iv));
- memcpy(iv, s + bsize, lastn);
- /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */
- crypto_xor(tmp, iv, bsize);
- /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */
- memcpy(d + bsize, tmp, lastn);
-
- /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
- memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
- /* 6. Decrypt En to create Pn-1 */
- memzero_explicit(iv, sizeof(iv));
+static int crypto_cts_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_request *subreq = &rctx->subreq;
+ int bsize = crypto_skcipher_blocksize(tfm);
+ unsigned int nbytes = req->cryptlen;
+ int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
+ unsigned int offset;
+ u8 *space;
+
+ skcipher_request_set_tfm(subreq, ctx->child);
+
+ if (cbc_blocks <= 0) {
+ skcipher_request_set_callback(subreq, req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
+ req->iv);
+ return crypto_skcipher_decrypt(subreq);
+ }
- sg_set_buf(&sgsrc[0], s + bsize, bsize);
- sg_set_buf(&sgdst[0], d, bsize);
- err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ crypto_cts_decrypt_done, req);
- /* XOR with previous block */
- crypto_xor(d, desc->info, bsize);
+ space = crypto_cts_reqctx_space(req);
- scatterwalk_map_and_copy(d, dst, offset, nbytes, 1);
+ offset = cbc_blocks * bsize;
+ rctx->offset = offset;
- memcpy(desc->info, s, bsize);
- return err;
-}
+ if (cbc_blocks <= 1)
+ memcpy(space, req->iv, bsize);
+ else
+ scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
+ bsize, 0);
-static int crypto_cts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- int bsize = crypto_blkcipher_blocksize(desc->tfm);
- int tot_blocks = (nbytes + bsize - 1) / bsize;
- int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0;
- struct blkcipher_desc lcldesc;
- int err;
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ offset, req->iv);
- lcldesc.tfm = ctx->child;
- lcldesc.info = desc->info;
- lcldesc.flags = desc->flags;
-
- if (tot_blocks == 1) {
- err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize);
- } else if (nbytes <= bsize * 2) {
- err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes);
- } else {
- /* do normal function for tot_blocks - 2 */
- err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src,
- cbc_blocks * bsize);
- if (err == 0) {
- /* do cts for final two blocks */
- err = cts_cbc_decrypt(ctx, desc, dst, src,
- cbc_blocks * bsize,
- nbytes - (cbc_blocks * bsize));
- }
- }
- return err;
+ return crypto_skcipher_decrypt(subreq) ?:
+ cts_cbc_decrypt(req);
}
-static int crypto_cts_init_tfm(struct crypto_tfm *tfm)
+static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_blkcipher *cipher;
-
- cipher = crypto_spawn_blkcipher(spawn);
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *cipher;
+ unsigned reqsize;
+ unsigned bsize;
+ unsigned align;
+
+ cipher = crypto_spawn_skcipher2(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
+
+ align = crypto_skcipher_alignmask(tfm);
+ bsize = crypto_skcipher_blocksize(cipher);
+ reqsize = ALIGN(sizeof(struct crypto_cts_reqctx) +
+ crypto_skcipher_reqsize(cipher),
+ crypto_tfm_ctx_alignment()) +
+ (align & ~(crypto_tfm_ctx_alignment() - 1)) + bsize;
+
+ crypto_skcipher_set_reqsize(tfm, reqsize);
+
return 0;
}
-static void crypto_cts_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_cts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_blkcipher(ctx->child);
+ struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->child);
}
-static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
+static void crypto_cts_free(struct skcipher_instance *inst)
{
- struct crypto_instance *inst;
- struct crypto_alg *alg;
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_skcipher_spawn *spawn;
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct skcipher_alg *alg;
+ const char *cipher_name;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+ return -EINVAL;
+
+ cipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(cipher_name))
+ return PTR_ERR(cipher_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ spawn = skcipher_instance_ctx(inst);
+
+ crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher2(spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
- return ERR_PTR(err);
+ goto err_free_inst;
- alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
+ alg = crypto_spawn_skcipher_alg(spawn);
- inst = ERR_PTR(-EINVAL);
- if (!is_power_of_2(alg->cra_blocksize))
- goto out_put_alg;
+ err = -EINVAL;
+ if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
+ goto err_drop_spawn;
- if (strncmp(alg->cra_name, "cbc(", 4))
- goto out_put_alg;
+ if (strncmp(alg->base.cra_name, "cbc(", 4))
+ goto err_drop_spawn;
- inst = crypto_alloc_instance("cts", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts",
+ &alg->base);
+ if (err)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
/* We access the data as u32s when xoring. */
- inst->alg.cra_alignmask |= __alignof__(u32) - 1;
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
+ inst->alg.ivsize = alg->base.cra_blocksize;
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
- inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx);
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx);
- inst->alg.cra_init = crypto_cts_init_tfm;
- inst->alg.cra_exit = crypto_cts_exit_tfm;
+ inst->alg.init = crypto_cts_init_tfm;
+ inst->alg.exit = crypto_cts_exit_tfm;
- inst->alg.cra_blkcipher.setkey = crypto_cts_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_cts_encrypt;
- inst->alg.cra_blkcipher.decrypt = crypto_cts_decrypt;
+ inst->alg.setkey = crypto_cts_setkey;
+ inst->alg.encrypt = crypto_cts_encrypt;
+ inst->alg.decrypt = crypto_cts_decrypt;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ inst->free = crypto_cts_free;
-static void crypto_cts_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_skcipher(spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_cts_tmpl = {
.name = "cts",
- .alloc = crypto_cts_alloc,
- .free = crypto_cts_free,
+ .create = crypto_cts_create,
.module = THIS_MODULE,
};
diff --git a/crypto/dh.c b/crypto/dh.c
new file mode 100644
index 000000000000..9d19360e7189
--- /dev/null
+++ b/crypto/dh.c
@@ -0,0 +1,189 @@
+/* Diffie-Hellman Key Agreement Method [RFC2631]
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/kpp.h>
+#include <crypto/dh.h>
+#include <linux/mpi.h>
+
+struct dh_ctx {
+ MPI p;
+ MPI g;
+ MPI xa;
+};
+
+static inline void dh_clear_params(struct dh_ctx *ctx)
+{
+ mpi_free(ctx->p);
+ mpi_free(ctx->g);
+ ctx->p = NULL;
+ ctx->g = NULL;
+}
+
+static void dh_free_ctx(struct dh_ctx *ctx)
+{
+ dh_clear_params(ctx);
+ mpi_free(ctx->xa);
+ ctx->xa = NULL;
+}
+
+/*
+ * If base is g we compute the public key
+ * ya = g^xa mod p; [RFC2631 sec 2.1.1]
+ * else if base if the counterpart public key we compute the shared secret
+ * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+ */
+static int _compute_val(const struct dh_ctx *ctx, MPI base, MPI val)
+{
+ /* val = base^xa mod p */
+ return mpi_powm(val, base, ctx->xa, ctx->p);
+}
+
+static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm)
+{
+ return kpp_tfm_ctx(tfm);
+}
+
+static int dh_check_params_length(unsigned int p_len)
+{
+ return (p_len < 1536) ? -EINVAL : 0;
+}
+
+static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
+{
+ if (unlikely(!params->p || !params->g))
+ return -EINVAL;
+
+ if (dh_check_params_length(params->p_size << 3))
+ return -EINVAL;
+
+ ctx->p = mpi_read_raw_data(params->p, params->p_size);
+ if (!ctx->p)
+ return -EINVAL;
+
+ ctx->g = mpi_read_raw_data(params->g, params->g_size);
+ if (!ctx->g) {
+ mpi_free(ctx->p);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
+{
+ struct dh_ctx *ctx = dh_get_ctx(tfm);
+ struct dh params;
+
+ if (crypto_dh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ if (dh_set_params(ctx, &params) < 0)
+ return -EINVAL;
+
+ ctx->xa = mpi_read_raw_data(params.key, params.key_size);
+ if (!ctx->xa) {
+ dh_clear_params(ctx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct dh_ctx *ctx = dh_get_ctx(tfm);
+ MPI base, val = mpi_alloc(0);
+ int ret = 0;
+ int sign;
+
+ if (!val)
+ return -ENOMEM;
+
+ if (unlikely(!ctx->xa)) {
+ ret = -EINVAL;
+ goto err_free_val;
+ }
+
+ if (req->src) {
+ base = mpi_read_raw_from_sgl(req->src, req->src_len);
+ if (!base) {
+ ret = EINVAL;
+ goto err_free_val;
+ }
+ } else {
+ base = ctx->g;
+ }
+
+ ret = _compute_val(ctx, base, val);
+ if (ret)
+ goto err_free_base;
+
+ ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
+ if (ret)
+ goto err_free_base;
+
+ if (sign < 0)
+ ret = -EBADMSG;
+err_free_base:
+ if (req->src)
+ mpi_free(base);
+err_free_val:
+ mpi_free(val);
+ return ret;
+}
+
+static int dh_max_size(struct crypto_kpp *tfm)
+{
+ struct dh_ctx *ctx = dh_get_ctx(tfm);
+
+ return mpi_get_size(ctx->p);
+}
+
+static void dh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct dh_ctx *ctx = dh_get_ctx(tfm);
+
+ dh_free_ctx(ctx);
+}
+
+static struct kpp_alg dh = {
+ .set_secret = dh_set_secret,
+ .generate_public_key = dh_compute_value,
+ .compute_shared_secret = dh_compute_value,
+ .max_size = dh_max_size,
+ .exit = dh_exit_tfm,
+ .base = {
+ .cra_name = "dh",
+ .cra_driver_name = "dh-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct dh_ctx),
+ },
+};
+
+static int dh_init(void)
+{
+ return crypto_register_kpp(&dh);
+}
+
+static void dh_exit(void)
+{
+ crypto_unregister_kpp(&dh);
+}
+
+module_init(dh_init);
+module_exit(dh_exit);
+MODULE_ALIAS_CRYPTO("dh");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DH generic algorithm");
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
new file mode 100644
index 000000000000..02db76b20d00
--- /dev/null
+++ b/crypto/dh_helper.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <crypto/dh.h>
+#include <crypto/kpp.h>
+
+#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
+
+static inline u8 *dh_pack_data(void *dst, const void *src, size_t size)
+{
+ memcpy(dst, src, size);
+ return dst + size;
+}
+
+static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
+{
+ memcpy(dst, src, size);
+ return src + size;
+}
+
+static inline int dh_data_size(const struct dh *p)
+{
+ return p->key_size + p->p_size + p->g_size;
+}
+
+int crypto_dh_key_len(const struct dh *p)
+{
+ return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
+}
+EXPORT_SYMBOL_GPL(crypto_dh_key_len);
+
+int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
+{
+ u8 *ptr = buf;
+ struct kpp_secret secret = {
+ .type = CRYPTO_KPP_SECRET_TYPE_DH,
+ .len = len
+ };
+
+ if (unlikely(!buf))
+ return -EINVAL;
+
+ if (len != crypto_dh_key_len(params))
+ return -EINVAL;
+
+ ptr = dh_pack_data(ptr, &secret, sizeof(secret));
+ ptr = dh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
+ ptr = dh_pack_data(ptr, &params->p_size, sizeof(params->p_size));
+ ptr = dh_pack_data(ptr, &params->g_size, sizeof(params->g_size));
+ ptr = dh_pack_data(ptr, params->key, params->key_size);
+ ptr = dh_pack_data(ptr, params->p, params->p_size);
+ dh_pack_data(ptr, params->g, params->g_size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
+
+int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
+{
+ const u8 *ptr = buf;
+ struct kpp_secret secret;
+
+ if (unlikely(!buf || len < DH_KPP_SECRET_MIN_SIZE))
+ return -EINVAL;
+
+ ptr = dh_unpack_data(&secret, ptr, sizeof(secret));
+ if (secret.type != CRYPTO_KPP_SECRET_TYPE_DH)
+ return -EINVAL;
+
+ ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
+ ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
+ ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
+ if (secret.len != crypto_dh_key_len(params))
+ return -EINVAL;
+
+ /* Don't allocate memory. Set pointers to data within
+ * the given buffer
+ */
+ params->key = (void *)ptr;
+ params->p = (void *)(ptr + params->key_size);
+ params->g = (void *)(ptr + params->key_size + params->p_size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 0a3538f6cf22..f752da3a7c75 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -252,10 +252,16 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
-static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
- unsigned char *outval, const struct drbg_string *in);
+static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
+ const unsigned char *key);
+static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
+ const struct drbg_string *in);
static int drbg_init_sym_kernel(struct drbg_state *drbg);
static int drbg_fini_sym_kernel(struct drbg_state *drbg);
+static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
+ u8 *inbuf, u32 inbuflen,
+ u8 *outbuf, u32 outlen);
+#define DRBG_CTR_NULL_LEN 128
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
@@ -270,6 +276,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
drbg_string_fill(&data, out, drbg_blocklen(drbg));
/* 10.4.3 step 2 / 4 */
+ drbg_kcapi_symsetkey(drbg, key);
list_for_each_entry(curr, in, list) {
const unsigned char *pos = curr->buf;
size_t len = curr->len;
@@ -278,7 +285,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
/* 10.4.3 step 4.2 */
if (drbg_blocklen(drbg) == cnt) {
cnt = 0;
- ret = drbg_kcapi_sym(drbg, key, out, &data);
+ ret = drbg_kcapi_sym(drbg, out, &data);
if (ret)
return ret;
}
@@ -290,7 +297,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
}
/* 10.4.3 step 4.2 for last block */
if (cnt)
- ret = drbg_kcapi_sym(drbg, key, out, &data);
+ ret = drbg_kcapi_sym(drbg, out, &data);
return ret;
}
@@ -425,6 +432,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
/* 10.4.2 step 12: overwriting of outval is implemented in next step */
/* 10.4.2 step 13 */
+ drbg_kcapi_symsetkey(drbg, temp);
while (generated_len < bytes_to_return) {
short blocklen = 0;
/*
@@ -432,7 +440,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
* implicit as the key is only drbg_blocklen in size based on
* the implementation of the cipher function callback
*/
- ret = drbg_kcapi_sym(drbg, temp, X, &cipherin);
+ ret = drbg_kcapi_sym(drbg, X, &cipherin);
if (ret)
goto out;
blocklen = (drbg_blocklen(drbg) <
@@ -476,49 +484,47 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
unsigned char *temp = drbg->scratchpad;
unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
drbg_blocklen(drbg);
- unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
- unsigned int len = 0;
- struct drbg_string cipherin;
if (3 > reseed)
memset(df_data, 0, drbg_statelen(drbg));
- /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
- if (seed) {
- ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
+ if (!reseed) {
+ /*
+ * The DRBG uses the CTR mode of the underlying AES cipher. The
+ * CTR mode increments the counter value after the AES operation
+ * but SP800-90A requires that the counter is incremented before
+ * the AES operation. Hence, we increment it at the time we set
+ * it by one.
+ */
+ crypto_inc(drbg->V, drbg_blocklen(drbg));
+
+ ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C,
+ drbg_keylen(drbg));
if (ret)
goto out;
}
- drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg));
- /*
- * 10.2.1.3.2 steps 2 and 3 are already covered as the allocation
- * zeroizes all memory during initialization
- */
- while (len < (drbg_statelen(drbg))) {
- /* 10.2.1.2 step 2.1 */
- crypto_inc(drbg->V, drbg_blocklen(drbg));
- /*
- * 10.2.1.2 step 2.2 */
- ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
+ /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
+ if (seed) {
+ ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
if (ret)
goto out;
- /* 10.2.1.2 step 2.3 and 3 */
- len += drbg_blocklen(drbg);
}
- /* 10.2.1.2 step 4 */
- temp_p = temp;
- df_data_p = df_data;
- for (len = 0; len < drbg_statelen(drbg); len++) {
- *temp_p ^= *df_data_p;
- df_data_p++; temp_p++;
- }
+ ret = drbg_kcapi_sym_ctr(drbg, df_data, drbg_statelen(drbg),
+ temp, drbg_statelen(drbg));
+ if (ret)
+ return ret;
/* 10.2.1.2 step 5 */
- memcpy(drbg->C, temp, drbg_keylen(drbg));
+ ret = crypto_skcipher_setkey(drbg->ctr_handle, temp,
+ drbg_keylen(drbg));
+ if (ret)
+ goto out;
/* 10.2.1.2 step 6 */
memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
+ /* See above: increment counter by one to compensate timing of CTR op */
+ crypto_inc(drbg->V, drbg_blocklen(drbg));
ret = 0;
out:
@@ -537,9 +543,8 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct list_head *addtl)
{
- int len = 0;
- int ret = 0;
- struct drbg_string data;
+ int ret;
+ int len = min_t(int, buflen, INT_MAX);
/* 10.2.1.5.2 step 2 */
if (addtl && !list_empty(addtl)) {
@@ -549,33 +554,16 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
}
/* 10.2.1.5.2 step 4.1 */
- crypto_inc(drbg->V, drbg_blocklen(drbg));
- drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg));
- while (len < buflen) {
- int outlen = 0;
- /* 10.2.1.5.2 step 4.2 */
- ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data);
- if (ret) {
- len = ret;
- goto out;
- }
- outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
- drbg_blocklen(drbg) : (buflen - len);
- /* 10.2.1.5.2 step 4.3 */
- memcpy(buf + len, drbg->scratchpad, outlen);
- len += outlen;
- /* 10.2.1.5.2 step 6 */
- if (len < buflen)
- crypto_inc(drbg->V, drbg_blocklen(drbg));
- }
+ ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN,
+ buf, len);
+ if (ret)
+ return ret;
/* 10.2.1.5.2 step 6 */
ret = drbg_ctr_update(drbg, NULL, 3);
if (ret)
len = ret;
-out:
- memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return len;
}
@@ -1145,11 +1133,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
if (!drbg)
return;
kzfree(drbg->V);
- drbg->V = NULL;
+ drbg->Vbuf = NULL;
kzfree(drbg->C);
- drbg->C = NULL;
- kzfree(drbg->scratchpad);
- drbg->scratchpad = NULL;
+ drbg->Cbuf = NULL;
+ kzfree(drbg->scratchpadbuf);
+ drbg->scratchpadbuf = NULL;
drbg->reseed_ctr = 0;
drbg->d_ops = NULL;
drbg->core = NULL;
@@ -1185,12 +1173,18 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
goto err;
}
- drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
- if (!drbg->V)
- goto err;
- drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
- if (!drbg->C)
+ ret = drbg->d_ops->crypto_init(drbg);
+ if (ret < 0)
goto err;
+
+ drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
+ if (!drbg->Vbuf)
+ goto fini;
+ drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
+ drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
+ if (!drbg->Cbuf)
+ goto fini;
+ drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
/* scratchpad is only generated for CTR and Hash */
if (drbg->core->flags & DRBG_HMAC)
sb_size = 0;
@@ -1204,13 +1198,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
if (0 < sb_size) {
- drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL);
- if (!drbg->scratchpad)
- goto err;
+ drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
+ if (!drbg->scratchpadbuf)
+ goto fini;
+ drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
}
return 0;
+fini:
+ drbg->d_ops->crypto_fini(drbg);
err:
drbg_dealloc_state(drbg);
return ret;
@@ -1478,10 +1475,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
if (ret)
goto unlock;
- ret = -EFAULT;
- if (drbg->d_ops->crypto_init(drbg))
- goto err;
-
ret = drbg_prepare_hrng(drbg);
if (ret)
goto free_everything;
@@ -1505,8 +1498,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
mutex_unlock(&drbg->drbg_mutex);
return ret;
-err:
- drbg_dealloc_state(drbg);
unlock:
mutex_unlock(&drbg->drbg_mutex);
return ret;
@@ -1591,7 +1582,8 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
sdesc->shash.tfm = tfm;
sdesc->shash.flags = 0;
drbg->priv_data = sdesc;
- return 0;
+
+ return crypto_shash_alignmask(tfm);
}
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
@@ -1627,10 +1619,45 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
#ifdef CONFIG_CRYPTO_DRBG_CTR
+static int drbg_fini_sym_kernel(struct drbg_state *drbg)
+{
+ struct crypto_cipher *tfm =
+ (struct crypto_cipher *)drbg->priv_data;
+ if (tfm)
+ crypto_free_cipher(tfm);
+ drbg->priv_data = NULL;
+
+ if (drbg->ctr_handle)
+ crypto_free_skcipher(drbg->ctr_handle);
+ drbg->ctr_handle = NULL;
+
+ if (drbg->ctr_req)
+ skcipher_request_free(drbg->ctr_req);
+ drbg->ctr_req = NULL;
+
+ kfree(drbg->ctr_null_value_buf);
+ drbg->ctr_null_value = NULL;
+
+ return 0;
+}
+
+static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
+{
+ struct drbg_state *drbg = req->data;
+
+ if (error == -EINPROGRESS)
+ return;
+ drbg->ctr_async_err = error;
+ complete(&drbg->ctr_completion);
+}
+
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
- int ret = 0;
struct crypto_cipher *tfm;
+ struct crypto_skcipher *sk_tfm;
+ struct skcipher_request *req;
+ unsigned int alignmask;
+ char ctr_name[CRYPTO_MAX_ALG_NAME];
tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) {
@@ -1640,31 +1667,103 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
}
BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
drbg->priv_data = tfm;
- return ret;
+
+ if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
+ drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
+ drbg_fini_sym_kernel(drbg);
+ return -EINVAL;
+ }
+ sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0);
+ if (IS_ERR(sk_tfm)) {
+ pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n",
+ ctr_name);
+ drbg_fini_sym_kernel(drbg);
+ return PTR_ERR(sk_tfm);
+ }
+ drbg->ctr_handle = sk_tfm;
+
+ req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
+ if (!req) {
+ pr_info("DRBG: could not allocate request queue\n");
+ drbg_fini_sym_kernel(drbg);
+ return -ENOMEM;
+ }
+ drbg->ctr_req = req;
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ drbg_skcipher_cb, drbg);
+
+ alignmask = crypto_skcipher_alignmask(sk_tfm);
+ drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
+ GFP_KERNEL);
+ if (!drbg->ctr_null_value_buf) {
+ drbg_fini_sym_kernel(drbg);
+ return -ENOMEM;
+ }
+ drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
+ alignmask + 1);
+
+ return alignmask;
}
-static int drbg_fini_sym_kernel(struct drbg_state *drbg)
+static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
+ const unsigned char *key)
{
struct crypto_cipher *tfm =
(struct crypto_cipher *)drbg->priv_data;
- if (tfm)
- crypto_free_cipher(tfm);
- drbg->priv_data = NULL;
- return 0;
+
+ crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
}
-static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
- unsigned char *outval, const struct drbg_string *in)
+static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
+ const struct drbg_string *in)
{
struct crypto_cipher *tfm =
(struct crypto_cipher *)drbg->priv_data;
- crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
/* there is only component in *in */
BUG_ON(in->len < drbg_blocklen(drbg));
crypto_cipher_encrypt_one(tfm, outval, in->buf);
return 0;
}
+
+static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
+ u8 *inbuf, u32 inlen,
+ u8 *outbuf, u32 outlen)
+{
+ struct scatterlist sg_in;
+
+ sg_init_one(&sg_in, inbuf, inlen);
+
+ while (outlen) {
+ u32 cryptlen = min_t(u32, inlen, outlen);
+ struct scatterlist sg_out;
+ int ret;
+
+ sg_init_one(&sg_out, outbuf, cryptlen);
+ skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
+ cryptlen, drbg->V);
+ ret = crypto_skcipher_encrypt(drbg->ctr_req);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(
+ &drbg->ctr_completion);
+ if (!ret && !drbg->ctr_async_err) {
+ reinit_completion(&drbg->ctr_completion);
+ break;
+ }
+ default:
+ return ret;
+ }
+ init_completion(&drbg->ctr_completion);
+
+ outlen -= cryptlen;
+ }
+
+ return 0;
+}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
/***************************************************************
diff --git a/crypto/ecc.c b/crypto/ecc.c
new file mode 100644
index 000000000000..414c78a9c214
--- /dev/null
+++ b/crypto/ecc.c
@@ -0,0 +1,1018 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+#include <linux/fips.h>
+#include <crypto/ecdh.h>
+
+#include "ecc.h"
+#include "ecc_curve_defs.h"
+
+typedef struct {
+ u64 m_low;
+ u64 m_high;
+} uint128_t;
+
+static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
+{
+ switch (curve_id) {
+ /* In FIPS mode only allow P256 and higher */
+ case ECC_CURVE_NIST_P192:
+ return fips_enabled ? NULL : &nist_p192;
+ case ECC_CURVE_NIST_P256:
+ return &nist_p256;
+ default:
+ return NULL;
+ }
+}
+
+static u64 *ecc_alloc_digits_space(unsigned int ndigits)
+{
+ size_t len = ndigits * sizeof(u64);
+
+ if (!len)
+ return NULL;
+
+ return kmalloc(len, GFP_KERNEL);
+}
+
+static void ecc_free_digits_space(u64 *space)
+{
+ kzfree(space);
+}
+
+static struct ecc_point *ecc_alloc_point(unsigned int ndigits)
+{
+ struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+
+ p->x = ecc_alloc_digits_space(ndigits);
+ if (!p->x)
+ goto err_alloc_x;
+
+ p->y = ecc_alloc_digits_space(ndigits);
+ if (!p->y)
+ goto err_alloc_y;
+
+ p->ndigits = ndigits;
+
+ return p;
+
+err_alloc_y:
+ ecc_free_digits_space(p->x);
+err_alloc_x:
+ kfree(p);
+ return NULL;
+}
+
+static void ecc_free_point(struct ecc_point *p)
+{
+ if (!p)
+ return;
+
+ kzfree(p->x);
+ kzfree(p->y);
+ kzfree(p);
+}
+
+static void vli_clear(u64 *vli, unsigned int ndigits)
+{
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ vli[i] = 0;
+}
+
+/* Returns true if vli == 0, false otherwise. */
+static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
+{
+ int i;
+
+ for (i = 0; i < ndigits; i++) {
+ if (vli[i])
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns nonzero if bit bit of vli is set. */
+static u64 vli_test_bit(const u64 *vli, unsigned int bit)
+{
+ return (vli[bit / 64] & ((u64)1 << (bit % 64)));
+}
+
+/* Counts the number of 64-bit "digits" in vli. */
+static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
+{
+ int i;
+
+ /* Search from the end until we find a non-zero digit.
+ * We do it in reverse because we expect that most digits will
+ * be nonzero.
+ */
+ for (i = ndigits - 1; i >= 0 && vli[i] == 0; i--);
+
+ return (i + 1);
+}
+
+/* Counts the number of bits required for vli. */
+static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
+{
+ unsigned int i, num_digits;
+ u64 digit;
+
+ num_digits = vli_num_digits(vli, ndigits);
+ if (num_digits == 0)
+ return 0;
+
+ digit = vli[num_digits - 1];
+ for (i = 0; digit; i++)
+ digit >>= 1;
+
+ return ((num_digits - 1) * 64 + i);
+}
+
+/* Sets dest = src. */
+static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
+{
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ dest[i] = src[i];
+}
+
+/* Returns sign of left - right. */
+static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
+{
+ int i;
+
+ for (i = ndigits - 1; i >= 0; i--) {
+ if (left[i] > right[i])
+ return 1;
+ else if (left[i] < right[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Computes result = in << c, returning carry. Can modify in place
+ * (if result == in). 0 < shift < 64.
+ */
+static u64 vli_lshift(u64 *result, const u64 *in, unsigned int shift,
+ unsigned int ndigits)
+{
+ u64 carry = 0;
+ int i;
+
+ for (i = 0; i < ndigits; i++) {
+ u64 temp = in[i];
+
+ result[i] = (temp << shift) | carry;
+ carry = temp >> (64 - shift);
+ }
+
+ return carry;
+}
+
+/* Computes vli = vli >> 1. */
+static void vli_rshift1(u64 *vli, unsigned int ndigits)
+{
+ u64 *end = vli;
+ u64 carry = 0;
+
+ vli += ndigits;
+
+ while (vli-- > end) {
+ u64 temp = *vli;
+ *vli = (temp >> 1) | carry;
+ carry = temp << 63;
+ }
+}
+
+/* Computes result = left + right, returning carry. Can modify in place. */
+static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits)
+{
+ u64 carry = 0;
+ int i;
+
+ for (i = 0; i < ndigits; i++) {
+ u64 sum;
+
+ sum = left[i] + right[i] + carry;
+ if (sum != left[i])
+ carry = (sum < left[i]);
+
+ result[i] = sum;
+ }
+
+ return carry;
+}
+
+/* Computes result = left - right, returning borrow. Can modify in place. */
+static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits)
+{
+ u64 borrow = 0;
+ int i;
+
+ for (i = 0; i < ndigits; i++) {
+ u64 diff;
+
+ diff = left[i] - right[i] - borrow;
+ if (diff != left[i])
+ borrow = (diff > left[i]);
+
+ result[i] = diff;
+ }
+
+ return borrow;
+}
+
+static uint128_t mul_64_64(u64 left, u64 right)
+{
+ u64 a0 = left & 0xffffffffull;
+ u64 a1 = left >> 32;
+ u64 b0 = right & 0xffffffffull;
+ u64 b1 = right >> 32;
+ u64 m0 = a0 * b0;
+ u64 m1 = a0 * b1;
+ u64 m2 = a1 * b0;
+ u64 m3 = a1 * b1;
+ uint128_t result;
+
+ m2 += (m0 >> 32);
+ m2 += m1;
+
+ /* Overflow */
+ if (m2 < m1)
+ m3 += 0x100000000ull;
+
+ result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
+ result.m_high = m3 + (m2 >> 32);
+
+ return result;
+}
+
+static uint128_t add_128_128(uint128_t a, uint128_t b)
+{
+ uint128_t result;
+
+ result.m_low = a.m_low + b.m_low;
+ result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low);
+
+ return result;
+}
+
+static void vli_mult(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits)
+{
+ uint128_t r01 = { 0, 0 };
+ u64 r2 = 0;
+ unsigned int i, k;
+
+ /* Compute each digit of result in sequence, maintaining the
+ * carries.
+ */
+ for (k = 0; k < ndigits * 2 - 1; k++) {
+ unsigned int min;
+
+ if (k < ndigits)
+ min = 0;
+ else
+ min = (k + 1) - ndigits;
+
+ for (i = min; i <= k && i < ndigits; i++) {
+ uint128_t product;
+
+ product = mul_64_64(left[i], right[k - i]);
+
+ r01 = add_128_128(r01, product);
+ r2 += (r01.m_high < product.m_high);
+ }
+
+ result[k] = r01.m_low;
+ r01.m_low = r01.m_high;
+ r01.m_high = r2;
+ r2 = 0;
+ }
+
+ result[ndigits * 2 - 1] = r01.m_low;
+}
+
+static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
+{
+ uint128_t r01 = { 0, 0 };
+ u64 r2 = 0;
+ int i, k;
+
+ for (k = 0; k < ndigits * 2 - 1; k++) {
+ unsigned int min;
+
+ if (k < ndigits)
+ min = 0;
+ else
+ min = (k + 1) - ndigits;
+
+ for (i = min; i <= k && i <= k - i; i++) {
+ uint128_t product;
+
+ product = mul_64_64(left[i], left[k - i]);
+
+ if (i < k - i) {
+ r2 += product.m_high >> 63;
+ product.m_high = (product.m_high << 1) |
+ (product.m_low >> 63);
+ product.m_low <<= 1;
+ }
+
+ r01 = add_128_128(r01, product);
+ r2 += (r01.m_high < product.m_high);
+ }
+
+ result[k] = r01.m_low;
+ r01.m_low = r01.m_high;
+ r01.m_high = r2;
+ r2 = 0;
+ }
+
+ result[ndigits * 2 - 1] = r01.m_low;
+}
+
+/* Computes result = (left + right) % mod.
+ * Assumes that left < mod and right < mod, result != mod.
+ */
+static void vli_mod_add(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits)
+{
+ u64 carry;
+
+ carry = vli_add(result, left, right, ndigits);
+
+ /* result > mod (result = mod + remainder), so subtract mod to
+ * get remainder.
+ */
+ if (carry || vli_cmp(result, mod, ndigits) >= 0)
+ vli_sub(result, result, mod, ndigits);
+}
+
+/* Computes result = (left - right) % mod.
+ * Assumes that left < mod and right < mod, result != mod.
+ */
+static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits)
+{
+ u64 borrow = vli_sub(result, left, right, ndigits);
+
+ /* In this case, p_result == -diff == (max int) - diff.
+ * Since -x % d == d - x, we can get the correct result from
+ * result + mod (with overflow).
+ */
+ if (borrow)
+ vli_add(result, result, mod, ndigits);
+}
+
+/* Computes p_result = p_product % curve_p.
+ * See algorithm 5 and 6 from
+ * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
+ */
+static void vli_mmod_fast_192(u64 *result, const u64 *product,
+ const u64 *curve_prime, u64 *tmp)
+{
+ const unsigned int ndigits = 3;
+ int carry;
+
+ vli_set(result, product, ndigits);
+
+ vli_set(tmp, &product[3], ndigits);
+ carry = vli_add(result, result, tmp, ndigits);
+
+ tmp[0] = 0;
+ tmp[1] = product[3];
+ tmp[2] = product[4];
+ carry += vli_add(result, result, tmp, ndigits);
+
+ tmp[0] = tmp[1] = product[5];
+ tmp[2] = 0;
+ carry += vli_add(result, result, tmp, ndigits);
+
+ while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
+ carry -= vli_sub(result, result, curve_prime, ndigits);
+}
+
+/* Computes result = product % curve_prime
+ * from http://www.nsa.gov/ia/_files/nist-routines.pdf
+ */
+static void vli_mmod_fast_256(u64 *result, const u64 *product,
+ const u64 *curve_prime, u64 *tmp)
+{
+ int carry;
+ const unsigned int ndigits = 4;
+
+ /* t */
+ vli_set(result, product, ndigits);
+
+ /* s1 */
+ tmp[0] = 0;
+ tmp[1] = product[5] & 0xffffffff00000000ull;
+ tmp[2] = product[6];
+ tmp[3] = product[7];
+ carry = vli_lshift(tmp, tmp, 1, ndigits);
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* s2 */
+ tmp[1] = product[6] << 32;
+ tmp[2] = (product[6] >> 32) | (product[7] << 32);
+ tmp[3] = product[7] >> 32;
+ carry += vli_lshift(tmp, tmp, 1, ndigits);
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* s3 */
+ tmp[0] = product[4];
+ tmp[1] = product[5] & 0xffffffff;
+ tmp[2] = 0;
+ tmp[3] = product[7];
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* s4 */
+ tmp[0] = (product[4] >> 32) | (product[5] << 32);
+ tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull);
+ tmp[2] = product[7];
+ tmp[3] = (product[6] >> 32) | (product[4] << 32);
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* d1 */
+ tmp[0] = (product[5] >> 32) | (product[6] << 32);
+ tmp[1] = (product[6] >> 32);
+ tmp[2] = 0;
+ tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32);
+ carry -= vli_sub(result, result, tmp, ndigits);
+
+ /* d2 */
+ tmp[0] = product[6];
+ tmp[1] = product[7];
+ tmp[2] = 0;
+ tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull);
+ carry -= vli_sub(result, result, tmp, ndigits);
+
+ /* d3 */
+ tmp[0] = (product[6] >> 32) | (product[7] << 32);
+ tmp[1] = (product[7] >> 32) | (product[4] << 32);
+ tmp[2] = (product[4] >> 32) | (product[5] << 32);
+ tmp[3] = (product[6] << 32);
+ carry -= vli_sub(result, result, tmp, ndigits);
+
+ /* d4 */
+ tmp[0] = product[7];
+ tmp[1] = product[4] & 0xffffffff00000000ull;
+ tmp[2] = product[5];
+ tmp[3] = product[6] & 0xffffffff00000000ull;
+ carry -= vli_sub(result, result, tmp, ndigits);
+
+ if (carry < 0) {
+ do {
+ carry += vli_add(result, result, curve_prime, ndigits);
+ } while (carry < 0);
+ } else {
+ while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
+ carry -= vli_sub(result, result, curve_prime, ndigits);
+ }
+}
+
+/* Computes result = product % curve_prime
+ * from http://www.nsa.gov/ia/_files/nist-routines.pdf
+*/
+static bool vli_mmod_fast(u64 *result, u64 *product,
+ const u64 *curve_prime, unsigned int ndigits)
+{
+ u64 tmp[2 * ndigits];
+
+ switch (ndigits) {
+ case 3:
+ vli_mmod_fast_192(result, product, curve_prime, tmp);
+ break;
+ case 4:
+ vli_mmod_fast_256(result, product, curve_prime, tmp);
+ break;
+ default:
+ pr_err("unsupports digits size!\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* Computes result = (left * right) % curve_prime. */
+static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
+ const u64 *curve_prime, unsigned int ndigits)
+{
+ u64 product[2 * ndigits];
+
+ vli_mult(product, left, right, ndigits);
+ vli_mmod_fast(result, product, curve_prime, ndigits);
+}
+
+/* Computes result = left^2 % curve_prime. */
+static void vli_mod_square_fast(u64 *result, const u64 *left,
+ const u64 *curve_prime, unsigned int ndigits)
+{
+ u64 product[2 * ndigits];
+
+ vli_square(product, left, ndigits);
+ vli_mmod_fast(result, product, curve_prime, ndigits);
+}
+
+#define EVEN(vli) (!(vli[0] & 1))
+/* Computes result = (1 / p_input) % mod. All VLIs are the same size.
+ * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
+ * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
+ */
+static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+ unsigned int ndigits)
+{
+ u64 a[ndigits], b[ndigits];
+ u64 u[ndigits], v[ndigits];
+ u64 carry;
+ int cmp_result;
+
+ if (vli_is_zero(input, ndigits)) {
+ vli_clear(result, ndigits);
+ return;
+ }
+
+ vli_set(a, input, ndigits);
+ vli_set(b, mod, ndigits);
+ vli_clear(u, ndigits);
+ u[0] = 1;
+ vli_clear(v, ndigits);
+
+ while ((cmp_result = vli_cmp(a, b, ndigits)) != 0) {
+ carry = 0;
+
+ if (EVEN(a)) {
+ vli_rshift1(a, ndigits);
+
+ if (!EVEN(u))
+ carry = vli_add(u, u, mod, ndigits);
+
+ vli_rshift1(u, ndigits);
+ if (carry)
+ u[ndigits - 1] |= 0x8000000000000000ull;
+ } else if (EVEN(b)) {
+ vli_rshift1(b, ndigits);
+
+ if (!EVEN(v))
+ carry = vli_add(v, v, mod, ndigits);
+
+ vli_rshift1(v, ndigits);
+ if (carry)
+ v[ndigits - 1] |= 0x8000000000000000ull;
+ } else if (cmp_result > 0) {
+ vli_sub(a, a, b, ndigits);
+ vli_rshift1(a, ndigits);
+
+ if (vli_cmp(u, v, ndigits) < 0)
+ vli_add(u, u, mod, ndigits);
+
+ vli_sub(u, u, v, ndigits);
+ if (!EVEN(u))
+ carry = vli_add(u, u, mod, ndigits);
+
+ vli_rshift1(u, ndigits);
+ if (carry)
+ u[ndigits - 1] |= 0x8000000000000000ull;
+ } else {
+ vli_sub(b, b, a, ndigits);
+ vli_rshift1(b, ndigits);
+
+ if (vli_cmp(v, u, ndigits) < 0)
+ vli_add(v, v, mod, ndigits);
+
+ vli_sub(v, v, u, ndigits);
+ if (!EVEN(v))
+ carry = vli_add(v, v, mod, ndigits);
+
+ vli_rshift1(v, ndigits);
+ if (carry)
+ v[ndigits - 1] |= 0x8000000000000000ull;
+ }
+ }
+
+ vli_set(result, u, ndigits);
+}
+
+/* ------ Point operations ------ */
+
+/* Returns true if p_point is the point at infinity, false otherwise. */
+static bool ecc_point_is_zero(const struct ecc_point *point)
+{
+ return (vli_is_zero(point->x, point->ndigits) &&
+ vli_is_zero(point->y, point->ndigits));
+}
+
+/* Point multiplication algorithm using Montgomery's ladder with co-Z
+ * coordinates. From http://eprint.iacr.org/2011/338.pdf
+ */
+
+/* Double in place */
+static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
+ u64 *curve_prime, unsigned int ndigits)
+{
+ /* t1 = x, t2 = y, t3 = z */
+ u64 t4[ndigits];
+ u64 t5[ndigits];
+
+ if (vli_is_zero(z1, ndigits))
+ return;
+
+ /* t4 = y1^2 */
+ vli_mod_square_fast(t4, y1, curve_prime, ndigits);
+ /* t5 = x1*y1^2 = A */
+ vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits);
+ /* t4 = y1^4 */
+ vli_mod_square_fast(t4, t4, curve_prime, ndigits);
+ /* t2 = y1*z1 = z3 */
+ vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits);
+ /* t3 = z1^2 */
+ vli_mod_square_fast(z1, z1, curve_prime, ndigits);
+
+ /* t1 = x1 + z1^2 */
+ vli_mod_add(x1, x1, z1, curve_prime, ndigits);
+ /* t3 = 2*z1^2 */
+ vli_mod_add(z1, z1, z1, curve_prime, ndigits);
+ /* t3 = x1 - z1^2 */
+ vli_mod_sub(z1, x1, z1, curve_prime, ndigits);
+ /* t1 = x1^2 - z1^4 */
+ vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits);
+
+ /* t3 = 2*(x1^2 - z1^4) */
+ vli_mod_add(z1, x1, x1, curve_prime, ndigits);
+ /* t1 = 3*(x1^2 - z1^4) */
+ vli_mod_add(x1, x1, z1, curve_prime, ndigits);
+ if (vli_test_bit(x1, 0)) {
+ u64 carry = vli_add(x1, x1, curve_prime, ndigits);
+
+ vli_rshift1(x1, ndigits);
+ x1[ndigits - 1] |= carry << 63;
+ } else {
+ vli_rshift1(x1, ndigits);
+ }
+ /* t1 = 3/2*(x1^2 - z1^4) = B */
+
+ /* t3 = B^2 */
+ vli_mod_square_fast(z1, x1, curve_prime, ndigits);
+ /* t3 = B^2 - A */
+ vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
+ /* t3 = B^2 - 2A = x3 */
+ vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
+ /* t5 = A - x3 */
+ vli_mod_sub(t5, t5, z1, curve_prime, ndigits);
+ /* t1 = B * (A - x3) */
+ vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+ /* t4 = B * (A - x3) - y1^4 = y3 */
+ vli_mod_sub(t4, x1, t4, curve_prime, ndigits);
+
+ vli_set(x1, z1, ndigits);
+ vli_set(z1, y1, ndigits);
+ vli_set(y1, t4, ndigits);
+}
+
+/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
+static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime,
+ unsigned int ndigits)
+{
+ u64 t1[ndigits];
+
+ vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */
+ vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */
+ vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits); /* z^3 */
+ vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */
+}
+
+/* P = (x1, y1) => 2P, (x2, y2) => P' */
+static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
+ u64 *p_initial_z, u64 *curve_prime,
+ unsigned int ndigits)
+{
+ u64 z[ndigits];
+
+ vli_set(x2, x1, ndigits);
+ vli_set(y2, y1, ndigits);
+
+ vli_clear(z, ndigits);
+ z[0] = 1;
+
+ if (p_initial_z)
+ vli_set(z, p_initial_z, ndigits);
+
+ apply_z(x1, y1, z, curve_prime, ndigits);
+
+ ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits);
+
+ apply_z(x2, y2, z, curve_prime, ndigits);
+}
+
+/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
+ * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
+ * or P => P', Q => P + Q
+ */
+static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
+ unsigned int ndigits)
+{
+ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
+ u64 t5[ndigits];
+
+ /* t5 = x2 - x1 */
+ vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
+ /* t5 = (x2 - x1)^2 = A */
+ vli_mod_square_fast(t5, t5, curve_prime, ndigits);
+ /* t1 = x1*A = B */
+ vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+ /* t3 = x2*A = C */
+ vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
+ /* t4 = y2 - y1 */
+ vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
+ /* t5 = (y2 - y1)^2 = D */
+ vli_mod_square_fast(t5, y2, curve_prime, ndigits);
+
+ /* t5 = D - B */
+ vli_mod_sub(t5, t5, x1, curve_prime, ndigits);
+ /* t5 = D - B - C = x3 */
+ vli_mod_sub(t5, t5, x2, curve_prime, ndigits);
+ /* t3 = C - B */
+ vli_mod_sub(x2, x2, x1, curve_prime, ndigits);
+ /* t2 = y1*(C - B) */
+ vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits);
+ /* t3 = B - x3 */
+ vli_mod_sub(x2, x1, t5, curve_prime, ndigits);
+ /* t4 = (y2 - y1)*(B - x3) */
+ vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits);
+ /* t4 = y3 */
+ vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
+
+ vli_set(x2, t5, ndigits);
+}
+
+/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
+ * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
+ * or P => P - Q, Q => P + Q
+ */
+static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
+ unsigned int ndigits)
+{
+ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
+ u64 t5[ndigits];
+ u64 t6[ndigits];
+ u64 t7[ndigits];
+
+ /* t5 = x2 - x1 */
+ vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
+ /* t5 = (x2 - x1)^2 = A */
+ vli_mod_square_fast(t5, t5, curve_prime, ndigits);
+ /* t1 = x1*A = B */
+ vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+ /* t3 = x2*A = C */
+ vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
+ /* t4 = y2 + y1 */
+ vli_mod_add(t5, y2, y1, curve_prime, ndigits);
+ /* t4 = y2 - y1 */
+ vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
+
+ /* t6 = C - B */
+ vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
+ /* t2 = y1 * (C - B) */
+ vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits);
+ /* t6 = B + C */
+ vli_mod_add(t6, x1, x2, curve_prime, ndigits);
+ /* t3 = (y2 - y1)^2 */
+ vli_mod_square_fast(x2, y2, curve_prime, ndigits);
+ /* t3 = x3 */
+ vli_mod_sub(x2, x2, t6, curve_prime, ndigits);
+
+ /* t7 = B - x3 */
+ vli_mod_sub(t7, x1, x2, curve_prime, ndigits);
+ /* t4 = (y2 - y1)*(B - x3) */
+ vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits);
+ /* t4 = y3 */
+ vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
+
+ /* t7 = (y2 + y1)^2 = F */
+ vli_mod_square_fast(t7, t5, curve_prime, ndigits);
+ /* t7 = x3' */
+ vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
+ /* t6 = x3' - B */
+ vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
+ /* t6 = (y2 + y1)*(x3' - B) */
+ vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits);
+ /* t2 = y3' */
+ vli_mod_sub(y1, t6, y1, curve_prime, ndigits);
+
+ vli_set(x1, t7, ndigits);
+}
+
+static void ecc_point_mult(struct ecc_point *result,
+ const struct ecc_point *point, const u64 *scalar,
+ u64 *initial_z, u64 *curve_prime,
+ unsigned int ndigits)
+{
+ /* R0 and R1 */
+ u64 rx[2][ndigits];
+ u64 ry[2][ndigits];
+ u64 z[ndigits];
+ int i, nb;
+ int num_bits = vli_num_bits(scalar, ndigits);
+
+ vli_set(rx[1], point->x, ndigits);
+ vli_set(ry[1], point->y, ndigits);
+
+ xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime,
+ ndigits);
+
+ for (i = num_bits - 2; i > 0; i--) {
+ nb = !vli_test_bit(scalar, i);
+ xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
+ ndigits);
+ xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime,
+ ndigits);
+ }
+
+ nb = !vli_test_bit(scalar, 0);
+ xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
+ ndigits);
+
+ /* Find final 1/Z value. */
+ /* X1 - X0 */
+ vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits);
+ /* Yb * (X1 - X0) */
+ vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits);
+ /* xP * Yb * (X1 - X0) */
+ vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits);
+
+ /* 1 / (xP * Yb * (X1 - X0)) */
+ vli_mod_inv(z, z, curve_prime, point->ndigits);
+
+ /* yP / (xP * Yb * (X1 - X0)) */
+ vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits);
+ /* Xb * yP / (xP * Yb * (X1 - X0)) */
+ vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits);
+ /* End 1/Z calculation */
+
+ xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits);
+
+ apply_z(rx[0], ry[0], z, curve_prime, ndigits);
+
+ vli_set(result->x, rx[0], ndigits);
+ vli_set(result->y, ry[0], ndigits);
+}
+
+static inline void ecc_swap_digits(const u64 *in, u64 *out,
+ unsigned int ndigits)
+{
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ out[i] = __swab64(in[ndigits - 1 - i]);
+}
+
+int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
+ const u8 *private_key, unsigned int private_key_len)
+{
+ int nbytes;
+ const struct ecc_curve *curve = ecc_get_curve(curve_id);
+
+ if (!private_key)
+ return -EINVAL;
+
+ nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+
+ if (private_key_len != nbytes)
+ return -EINVAL;
+
+ if (vli_is_zero((const u64 *)&private_key[0], ndigits))
+ return -EINVAL;
+
+ /* Make sure the private key is in the range [1, n-1]. */
+ if (vli_cmp(curve->n, (const u64 *)&private_key[0], ndigits) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits,
+ const u8 *private_key, unsigned int private_key_len,
+ u8 *public_key, unsigned int public_key_len)
+{
+ int ret = 0;
+ struct ecc_point *pk;
+ u64 priv[ndigits];
+ unsigned int nbytes;
+ const struct ecc_curve *curve = ecc_get_curve(curve_id);
+
+ if (!private_key || !curve) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ecc_swap_digits((const u64 *)private_key, priv, ndigits);
+
+ pk = ecc_alloc_point(ndigits);
+ if (!pk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
+ if (ecc_point_is_zero(pk)) {
+ ret = -EAGAIN;
+ goto err_free_point;
+ }
+
+ nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+ ecc_swap_digits(pk->x, (u64 *)public_key, ndigits);
+ ecc_swap_digits(pk->y, (u64 *)&public_key[nbytes], ndigits);
+
+err_free_point:
+ ecc_free_point(pk);
+out:
+ return ret;
+}
+
+int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
+ const u8 *private_key, unsigned int private_key_len,
+ const u8 *public_key, unsigned int public_key_len,
+ u8 *secret, unsigned int secret_len)
+{
+ int ret = 0;
+ struct ecc_point *product, *pk;
+ u64 priv[ndigits];
+ u64 rand_z[ndigits];
+ unsigned int nbytes;
+ const struct ecc_curve *curve = ecc_get_curve(curve_id);
+
+ if (!private_key || !public_key || !curve) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+
+ get_random_bytes(rand_z, nbytes);
+
+ pk = ecc_alloc_point(ndigits);
+ if (!pk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ product = ecc_alloc_point(ndigits);
+ if (!product) {
+ ret = -ENOMEM;
+ goto err_alloc_product;
+ }
+
+ ecc_swap_digits((const u64 *)public_key, pk->x, ndigits);
+ ecc_swap_digits((const u64 *)&public_key[nbytes], pk->y, ndigits);
+ ecc_swap_digits((const u64 *)private_key, priv, ndigits);
+
+ ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
+
+ ecc_swap_digits(product->x, (u64 *)secret, ndigits);
+
+ if (ecc_point_is_zero(product))
+ ret = -EFAULT;
+
+ ecc_free_point(product);
+err_alloc_product:
+ ecc_free_point(pk);
+out:
+ return ret;
+}
diff --git a/crypto/ecc.h b/crypto/ecc.h
new file mode 100644
index 000000000000..663d598c7406
--- /dev/null
+++ b/crypto/ecc.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _CRYPTO_ECC_H
+#define _CRYPTO_ECC_H
+
+#define ECC_MAX_DIGITS 4 /* 256 */
+
+#define ECC_DIGITS_TO_BYTES_SHIFT 3
+
+/**
+ * ecc_is_key_valid() - Validate a given ECDH private key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve number of digits
+ * @private_key: private key to be used for the given curve
+ * @private_key_len: private key len
+ *
+ * Returns 0 if the key is acceptable, a negative value otherwise
+ */
+int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
+ const u8 *private_key, unsigned int private_key_len);
+
+/**
+ * ecdh_make_pub_key() - Compute an ECC public key
+ *
+ * @curve_id: id representing the curve to use
+ * @private_key: pregenerated private key for the given curve
+ * @private_key_len: length of private_key
+ * @public_key: buffer for storing the public key generated
+ * @public_key_len: length of the public_key buffer
+ *
+ * Returns 0 if the public key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
+ const u8 *private_key, unsigned int private_key_len,
+ u8 *public_key, unsigned int public_key_len);
+
+/**
+ * crypto_ecdh_shared_secret() - Compute a shared secret
+ *
+ * @curve_id: id representing the curve to use
+ * @private_key: private key of part A
+ * @private_key_len: length of private_key
+ * @public_key: public key of counterpart B
+ * @public_key_len: length of public_key
+ * @secret: buffer for storing the calculated shared secret
+ * @secret_len: length of the secret buffer
+ *
+ * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
+ * before using it for symmetric encryption or HMAC.
+ *
+ * Returns 0 if the shared secret was generated successfully, a negative value
+ * if an error occurred.
+ */
+int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
+ const u8 *private_key, unsigned int private_key_len,
+ const u8 *public_key, unsigned int public_key_len,
+ u8 *secret, unsigned int secret_len);
+#endif
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
new file mode 100644
index 000000000000..03ae5f714028
--- /dev/null
+++ b/crypto/ecc_curve_defs.h
@@ -0,0 +1,57 @@
+#ifndef _CRYTO_ECC_CURVE_DEFS_H
+#define _CRYTO_ECC_CURVE_DEFS_H
+
+struct ecc_point {
+ u64 *x;
+ u64 *y;
+ u8 ndigits;
+};
+
+struct ecc_curve {
+ char *name;
+ struct ecc_point g;
+ u64 *p;
+ u64 *n;
+};
+
+/* NIST P-192 */
+static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
+ 0x188DA80EB03090F6ull };
+static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
+ 0x07192B95FFC8DA78ull };
+static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
+ 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
+ 0xFFFFFFFFFFFFFFFFull };
+static struct ecc_curve nist_p192 = {
+ .name = "nist_192",
+ .g = {
+ .x = nist_p192_g_x,
+ .y = nist_p192_g_y,
+ .ndigits = 3,
+ },
+ .p = nist_p192_p,
+ .n = nist_p192_n
+};
+
+/* NIST P-256 */
+static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
+ 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
+static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
+ 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull };
+static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
+ 0x0000000000000000ull, 0xFFFFFFFF00000001ull };
+static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
+static struct ecc_curve nist_p256 = {
+ .name = "nist_256",
+ .g = {
+ .x = nist_p256_g_x,
+ .y = nist_p256_g_y,
+ .ndigits = 4,
+ },
+ .p = nist_p256_p,
+ .n = nist_p256_n
+};
+
+#endif
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
new file mode 100644
index 000000000000..3de289806d67
--- /dev/null
+++ b/crypto/ecdh.c
@@ -0,0 +1,151 @@
+/* ECDH key-agreement protocol
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvator Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/kpp.h>
+#include <crypto/ecdh.h>
+#include <linux/scatterlist.h>
+#include "ecc.h"
+
+struct ecdh_ctx {
+ unsigned int curve_id;
+ unsigned int ndigits;
+ u64 private_key[ECC_MAX_DIGITS];
+ u64 public_key[2 * ECC_MAX_DIGITS];
+ u64 shared_secret[ECC_MAX_DIGITS];
+};
+
+static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
+{
+ return kpp_tfm_ctx(tfm);
+}
+
+static unsigned int ecdh_supported_curve(unsigned int curve_id)
+{
+ switch (curve_id) {
+ case ECC_CURVE_NIST_P192: return 3;
+ case ECC_CURVE_NIST_P256: return 4;
+ default: return 0;
+ }
+}
+
+static int ecdh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
+{
+ struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+ struct ecdh params;
+ unsigned int ndigits;
+
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ ndigits = ecdh_supported_curve(params.curve_id);
+ if (!ndigits)
+ return -EINVAL;
+
+ ctx->curve_id = params.curve_id;
+ ctx->ndigits = ndigits;
+
+ if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
+ (const u8 *)params.key, params.key_size) < 0)
+ return -EINVAL;
+
+ memcpy(ctx->private_key, params.key, params.key_size);
+
+ return 0;
+}
+
+static int ecdh_compute_value(struct kpp_request *req)
+{
+ int ret = 0;
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+ size_t copied, nbytes;
+ void *buf;
+
+ nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+
+ if (req->src) {
+ copied = sg_copy_to_buffer(req->src, 1, ctx->public_key,
+ 2 * nbytes);
+ if (copied != 2 * nbytes)
+ return -EINVAL;
+
+ ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
+ (const u8 *)ctx->private_key, nbytes,
+ (const u8 *)ctx->public_key, 2 * nbytes,
+ (u8 *)ctx->shared_secret, nbytes);
+
+ buf = ctx->shared_secret;
+ } else {
+ ret = ecdh_make_pub_key(ctx->curve_id, ctx->ndigits,
+ (const u8 *)ctx->private_key, nbytes,
+ (u8 *)ctx->public_key,
+ sizeof(ctx->public_key));
+ buf = ctx->public_key;
+ /* Public part is a point thus it has both coordinates */
+ nbytes *= 2;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
+ if (copied != nbytes)
+ return -EINVAL;
+
+ return ret;
+}
+
+static int ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+ int nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+
+ /* Public key is made of two coordinates */
+ return 2 * nbytes;
+}
+
+static void no_exit_tfm(struct crypto_kpp *tfm)
+{
+ return;
+}
+
+static struct kpp_alg ecdh = {
+ .set_secret = ecdh_set_secret,
+ .generate_public_key = ecdh_compute_value,
+ .compute_shared_secret = ecdh_compute_value,
+ .max_size = ecdh_max_size,
+ .exit = no_exit_tfm,
+ .base = {
+ .cra_name = "ecdh",
+ .cra_driver_name = "ecdh-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecdh_ctx),
+ },
+};
+
+static int ecdh_init(void)
+{
+ return crypto_register_kpp(&ecdh);
+}
+
+static void ecdh_exit(void)
+{
+ crypto_unregister_kpp(&ecdh);
+}
+
+module_init(ecdh_init);
+module_exit(ecdh_exit);
+MODULE_ALIAS_CRYPTO("ecdh");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ECDH generic algorithm");
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
new file mode 100644
index 000000000000..3cd8a2414e60
--- /dev/null
+++ b/crypto/ecdh_helper.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <crypto/ecdh.h>
+#include <crypto/kpp.h>
+
+#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short))
+
+static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
+{
+ memcpy(dst, src, sz);
+ return dst + sz;
+}
+
+static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
+{
+ memcpy(dst, src, sz);
+ return src + sz;
+}
+
+int crypto_ecdh_key_len(const struct ecdh *params)
+{
+ return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
+}
+EXPORT_SYMBOL_GPL(crypto_ecdh_key_len);
+
+int crypto_ecdh_encode_key(char *buf, unsigned int len,
+ const struct ecdh *params)
+{
+ u8 *ptr = buf;
+ struct kpp_secret secret = {
+ .type = CRYPTO_KPP_SECRET_TYPE_ECDH,
+ .len = len
+ };
+
+ if (unlikely(!buf))
+ return -EINVAL;
+
+ if (len != crypto_ecdh_key_len(params))
+ return -EINVAL;
+
+ ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
+ ptr = ecdh_pack_data(ptr, &params->curve_id, sizeof(params->curve_id));
+ ptr = ecdh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
+ ecdh_pack_data(ptr, params->key, params->key_size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_ecdh_encode_key);
+
+int crypto_ecdh_decode_key(const char *buf, unsigned int len,
+ struct ecdh *params)
+{
+ const u8 *ptr = buf;
+ struct kpp_secret secret;
+
+ if (unlikely(!buf || len < ECDH_KPP_SECRET_MIN_SIZE))
+ return -EINVAL;
+
+ ptr = ecdh_unpack_data(&secret, ptr, sizeof(secret));
+ if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
+ return -EINVAL;
+
+ ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
+ ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
+ if (secret.len != crypto_ecdh_key_len(params))
+ return -EINVAL;
+
+ /* Don't allocate memory. Set pointer to data
+ * within the given buffer
+ */
+ params->key = (void *)ptr;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_ecdh_decode_key);
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index b96a84560b67..1b01fe98e91f 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -20,6 +20,7 @@
#include <crypto/internal/geniv.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -112,13 +113,16 @@ static int echainiv_encrypt(struct aead_request *req)
info = req->iv;
if (req->src != req->dst) {
- struct blkcipher_desc desc = {
- .tfm = ctx->null,
- };
+ SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
- err = crypto_blkcipher_encrypt(
- &desc, req->dst, req->src,
- req->assoclen + req->cryptlen);
+ skcipher_request_set_tfm(nreq, ctx->sknull);
+ skcipher_request_set_callback(nreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(nreq, req->src, req->dst,
+ req->assoclen + req->cryptlen,
+ NULL);
+
+ err = crypto_skcipher_encrypt(nreq);
if (err)
return err;
}
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
deleted file mode 100644
index 16dda72fc4f8..000000000000
--- a/crypto/eseqiv.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * eseqiv: Encrypted Sequence Number IV Generator
- *
- * This generator generates an IV based on a sequence number by xoring it
- * with a salt and then encrypting it with the same key as used to encrypt
- * the plain text. This algorithm requires that the block size be equal
- * to the IV size. It is mainly useful for CBC.
- *
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/internal/skcipher.h>
-#include <crypto/rng.h>
-#include <crypto/scatterwalk.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-
-struct eseqiv_request_ctx {
- struct scatterlist src[2];
- struct scatterlist dst[2];
- char tail[];
-};
-
-struct eseqiv_ctx {
- spinlock_t lock;
- unsigned int reqoff;
- char salt[];
-};
-
-static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
-
- memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
- crypto_ablkcipher_alignmask(geniv) + 1),
- crypto_ablkcipher_ivsize(geniv));
-}
-
-static void eseqiv_complete(struct crypto_async_request *base, int err)
-{
- struct skcipher_givcrypt_request *req = base->data;
-
- if (err)
- goto out;
-
- eseqiv_complete2(req);
-
-out:
- skcipher_givcrypt_complete(req, err);
-}
-
-static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
- struct ablkcipher_request *subreq;
- crypto_completion_t compl;
- void *data;
- struct scatterlist *osrc, *odst;
- struct scatterlist *dst;
- struct page *srcp;
- struct page *dstp;
- u8 *giv;
- u8 *vsrc;
- u8 *vdst;
- __be64 seq;
- unsigned int ivsize;
- unsigned int len;
- int err;
-
- subreq = (void *)(reqctx->tail + ctx->reqoff);
- ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
-
- giv = req->giv;
- compl = req->creq.base.complete;
- data = req->creq.base.data;
-
- osrc = req->creq.src;
- odst = req->creq.dst;
- srcp = sg_page(osrc);
- dstp = sg_page(odst);
- vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
- vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
-
- ivsize = crypto_ablkcipher_ivsize(geniv);
-
- if (vsrc != giv + ivsize && vdst != giv + ivsize) {
- giv = PTR_ALIGN((u8 *)reqctx->tail,
- crypto_ablkcipher_alignmask(geniv) + 1);
- compl = eseqiv_complete;
- data = req;
- }
-
- ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
- data);
-
- sg_init_table(reqctx->src, 2);
- sg_set_buf(reqctx->src, giv, ivsize);
- scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
-
- dst = reqctx->src;
- if (osrc != odst) {
- sg_init_table(reqctx->dst, 2);
- sg_set_buf(reqctx->dst, giv, ivsize);
- scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
-
- dst = reqctx->dst;
- }
-
- ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
- req->creq.nbytes + ivsize,
- req->creq.info);
-
- memcpy(req->creq.info, ctx->salt, ivsize);
-
- len = ivsize;
- if (ivsize > sizeof(u64)) {
- memset(req->giv, 0, ivsize - sizeof(u64));
- len = sizeof(u64);
- }
- seq = cpu_to_be64(req->seq);
- memcpy(req->giv + ivsize - len, &seq, len);
-
- err = crypto_ablkcipher_encrypt(subreq);
- if (err)
- goto out;
-
- if (giv != req->giv)
- eseqiv_complete2(req);
-
-out:
- return err;
-}
-
-static int eseqiv_init(struct crypto_tfm *tfm)
-{
- struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
- struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- unsigned long alignmask;
- unsigned int reqsize;
- int err;
-
- spin_lock_init(&ctx->lock);
-
- alignmask = crypto_tfm_ctx_alignment() - 1;
- reqsize = sizeof(struct eseqiv_request_ctx);
-
- if (alignmask & reqsize) {
- alignmask &= reqsize;
- alignmask--;
- }
-
- alignmask = ~alignmask;
- alignmask &= crypto_ablkcipher_alignmask(geniv);
-
- reqsize += alignmask;
- reqsize += crypto_ablkcipher_ivsize(geniv);
- reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
-
- ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
-
- tfm->crt_ablkcipher.reqsize = reqsize +
- sizeof(struct ablkcipher_request);
-
- err = 0;
- if (!crypto_get_default_rng()) {
- crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
- crypto_ablkcipher_ivsize(geniv));
- crypto_put_default_rng();
- }
-
- return err ?: skcipher_geniv_init(tfm);
-}
-
-static struct crypto_template eseqiv_tmpl;
-
-static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
-{
- struct crypto_instance *inst;
- int err;
-
- inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
- if (IS_ERR(inst))
- goto out;
-
- err = -EINVAL;
- if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
- goto free_inst;
-
- inst->alg.cra_init = eseqiv_init;
- inst->alg.cra_exit = skcipher_geniv_exit;
-
- inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
- inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
-
-out:
- return inst;
-
-free_inst:
- skcipher_geniv_free(inst);
- inst = ERR_PTR(err);
- goto out;
-}
-
-static struct crypto_template eseqiv_tmpl = {
- .name = "eseqiv",
- .alloc = eseqiv_alloc,
- .free = skcipher_geniv_free,
- .module = THIS_MODULE,
-};
-
-static int __init eseqiv_module_init(void)
-{
- return crypto_register_template(&eseqiv_tmpl);
-}
-
-static void __exit eseqiv_module_exit(void)
-{
- crypto_unregister_template(&eseqiv_tmpl);
-}
-
-module_init(eseqiv_module_init);
-module_exit(eseqiv_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
-MODULE_ALIAS_CRYPTO("eseqiv");
diff --git a/crypto/gcm.c b/crypto/gcm.c
index bec329b3de8d..70a892e87ccb 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -29,7 +29,7 @@ struct gcm_instance_ctx {
};
struct crypto_gcm_ctx {
- struct crypto_ablkcipher *ctr;
+ struct crypto_skcipher *ctr;
struct crypto_ahash *ghash;
};
@@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx {
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
- struct crypto_blkcipher *null;
+ struct crypto_skcipher *null;
u8 nonce[4];
};
@@ -74,7 +74,7 @@ struct crypto_gcm_req_priv_ctx {
struct crypto_gcm_ghash_ctx ghash_ctx;
union {
struct ahash_request ahreq;
- struct ablkcipher_request abreq;
+ struct skcipher_request skreq;
} u;
};
@@ -114,7 +114,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
{
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ahash *ghash = ctx->ghash;
- struct crypto_ablkcipher *ctr = ctx->ctr;
+ struct crypto_skcipher *ctr = ctx->ctr;
struct {
be128 hash;
u8 iv[8];
@@ -122,35 +122,35 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_gcm_setkey_result result;
struct scatterlist sg[1];
- struct ablkcipher_request req;
+ struct skcipher_request req;
} *data;
int err;
- crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
+ crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(ctr, key, keylen);
+ crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
CRYPTO_TFM_RES_MASK);
if (err)
return err;
- data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr),
+ data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
GFP_KERNEL);
if (!data)
return -ENOMEM;
init_completion(&data->result.completion);
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
- ablkcipher_request_set_tfm(&data->req, ctr);
- ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_gcm_setkey_done,
- &data->result);
- ablkcipher_request_set_crypt(&data->req, data->sg, data->sg,
- sizeof(data->hash), data->iv);
-
- err = crypto_ablkcipher_encrypt(&data->req);
+ skcipher_request_set_tfm(&data->req, ctr);
+ skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_gcm_setkey_done,
+ &data->result);
+ skcipher_request_set_crypt(&data->req, data->sg, data->sg,
+ sizeof(data->hash), data->iv);
+
+ err = crypto_skcipher_encrypt(&data->req);
if (err == -EINPROGRESS || err == -EBUSY) {
err = wait_for_completion_interruptible(
&data->result.completion);
@@ -223,13 +223,13 @@ static void crypto_gcm_init_crypt(struct aead_request *req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *ablk_req = &pctx->u.abreq;
+ struct skcipher_request *skreq = &pctx->u.skreq;
struct scatterlist *dst;
dst = req->src == req->dst ? pctx->src : pctx->dst;
- ablkcipher_request_set_tfm(ablk_req, ctx->ctr);
- ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
+ skcipher_request_set_tfm(skreq, ctx->ctr);
+ skcipher_request_set_crypt(skreq, pctx->src, dst,
cryptlen + sizeof(pctx->auth_tag),
pctx->iv);
}
@@ -494,14 +494,14 @@ out:
static int crypto_gcm_encrypt(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct skcipher_request *skreq = &pctx->u.skreq;
u32 flags = aead_request_flags(req);
crypto_gcm_init_common(req);
crypto_gcm_init_crypt(req, req->cryptlen);
- ablkcipher_request_set_callback(abreq, flags, gcm_encrypt_done, req);
+ skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req);
- return crypto_ablkcipher_encrypt(abreq) ?:
+ return crypto_skcipher_encrypt(skreq) ?:
gcm_encrypt_continue(req, flags);
}
@@ -533,12 +533,12 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct skcipher_request *skreq = &pctx->u.skreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
crypto_gcm_init_crypt(req, gctx->cryptlen);
- ablkcipher_request_set_callback(abreq, flags, gcm_decrypt_done, req);
- return crypto_ablkcipher_decrypt(abreq) ?: crypto_gcm_verify(req);
+ skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req);
+ return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req);
}
static int crypto_gcm_decrypt(struct aead_request *req)
@@ -566,7 +566,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
struct aead_instance *inst = aead_alg_instance(tfm);
struct gcm_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
- struct crypto_ablkcipher *ctr;
+ struct crypto_skcipher *ctr;
struct crypto_ahash *ghash;
unsigned long align;
int err;
@@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(ghash))
return PTR_ERR(ghash);
- ctr = crypto_spawn_skcipher(&ictx->ctr);
+ ctr = crypto_spawn_skcipher2(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_hash;
@@ -587,8 +587,8 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
align &= ~(crypto_tfm_ctx_alignment() - 1);
crypto_aead_set_reqsize(tfm,
align + offsetof(struct crypto_gcm_req_priv_ctx, u) +
- max(sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(ctr),
+ max(sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(ctr),
sizeof(struct ahash_request) +
crypto_ahash_reqsize(ghash)));
@@ -604,7 +604,7 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->ghash);
- crypto_free_ablkcipher(ctx->ctr);
+ crypto_free_skcipher(ctx->ctr);
}
static void crypto_gcm_free(struct aead_instance *inst)
@@ -624,7 +624,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
{
struct crypto_attr_type *algt;
struct aead_instance *inst;
- struct crypto_alg *ctr;
+ struct skcipher_alg *ctr;
struct crypto_alg *ghash_alg;
struct hash_alg_common *ghash;
struct gcm_instance_ctx *ctx;
@@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ CRYPTO_ALG_TYPE_AHASH_MASK |
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (IS_ERR(ghash_alg))
return PTR_ERR(ghash_alg);
@@ -661,41 +663,42 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_drop_ghash;
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_ghash;
- ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
+ ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
/* We only support 16-byte blocks. */
- if (ctr->cra_ablkcipher.ivsize != 16)
+ if (crypto_skcipher_alg_ivsize(ctr) != 16)
goto out_put_ctr;
/* Not a stream cipher? */
err = -EINVAL;
- if (ctr->cra_blocksize != 1)
+ if (ctr->base.cra_blocksize != 1)
goto out_put_ctr;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "gcm_base(%s,%s)", ctr->cra_driver_name,
+ "gcm_base(%s,%s)", ctr->base.cra_driver_name,
ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
- inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->cra_flags) &
- CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_flags = (ghash->base.cra_flags |
+ ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
- ctr->cra_priority) / 2;
+ ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
- ctr->cra_alignmask;
+ ctr->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
inst->alg.ivsize = 12;
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
inst->alg.maxauthsize = 16;
inst->alg.init = crypto_gcm_init_tfm;
inst->alg.exit = crypto_gcm_exit_tfm;
@@ -980,6 +983,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
inst->alg.ivsize = 8;
+ inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
inst->alg.init = crypto_rfc4106_init_tfm;
@@ -1084,11 +1088,13 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int nbytes = req->assoclen + req->cryptlen -
(enc ? 0 : authsize);
- struct blkcipher_desc desc = {
- .tfm = ctx->null,
- };
+ SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
- return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes);
+ skcipher_request_set_tfm(nreq, ctx->null);
+ skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
+
+ return crypto_skcipher_encrypt(nreq);
}
static int crypto_rfc4543_encrypt(struct aead_request *req)
@@ -1108,7 +1114,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
struct crypto_aead_spawn *spawn = &ictx->aead;
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
- struct crypto_blkcipher *null;
+ struct crypto_skcipher *null;
unsigned long align;
int err = 0;
@@ -1116,7 +1122,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher();
+ null = crypto_get_default_null_skcipher2();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_aead;
@@ -1144,7 +1150,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
+ crypto_put_default_null_skcipher2();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
@@ -1219,6 +1225,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
inst->alg.ivsize = 8;
+ inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
inst->alg.init = crypto_rfc4543_init_tfm;
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 597cedd3531c..c4938497eedb 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -87,24 +87,28 @@ void jent_memcpy(void *dest, const void *src, unsigned int n)
memcpy(dest, src, n);
}
+/*
+ * Obtain a high-resolution time stamp value. The time stamp is used to measure
+ * the execution time of a given code path and its variations. Hence, the time
+ * stamp must have a sufficiently high resolution.
+ *
+ * Note, if the function returns zero because a given architecture does not
+ * implement a high-resolution time stamp, the RNG code's runtime test
+ * will detect it and will not produce output.
+ */
void jent_get_nstime(__u64 *out)
{
- struct timespec ts;
__u64 tmp = 0;
tmp = random_get_entropy();
/*
- * If random_get_entropy does not return a value (which is possible on,
- * for example, MIPS), invoke __getnstimeofday
+ * If random_get_entropy does not return a value, i.e. it is not
+ * implemented for a given architecture, use a clock source.
* hoping that there are timers we can work with.
*/
- if ((0 == tmp) &&
- (0 == __getnstimeofday(&ts))) {
- tmp = ts.tv_sec;
- tmp = tmp << 32;
- tmp = tmp | ts.tv_nsec;
- }
+ if (tmp == 0)
+ tmp = ktime_get_ns();
*out = tmp;
}
diff --git a/crypto/kpp.c b/crypto/kpp.c
new file mode 100644
index 000000000000..d36ce05eee43
--- /dev/null
+++ b/crypto/kpp.c
@@ -0,0 +1,123 @@
+/*
+ * Key-agreement Protocol Primitives (KPP)
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include "internal.h"
+
+#ifdef CONFIG_NET
+static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_kpp rkpp;
+
+ strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
+ sizeof(struct crypto_report_kpp), &rkpp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+
+static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : kpp\n");
+}
+
+static void crypto_kpp_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
+ struct kpp_alg *alg = crypto_kpp_alg(kpp);
+
+ alg->exit(kpp);
+}
+
+static int crypto_kpp_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
+ struct kpp_alg *alg = crypto_kpp_alg(kpp);
+
+ if (alg->exit)
+ kpp->base.exit = crypto_kpp_exit_tfm;
+
+ if (alg->init)
+ return alg->init(kpp);
+
+ return 0;
+}
+
+static const struct crypto_type crypto_kpp_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_kpp_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_kpp_show,
+#endif
+ .report = crypto_kpp_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_KPP,
+ .tfmsize = offsetof(struct crypto_kpp, base),
+};
+
+struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_kpp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_kpp);
+
+static void kpp_prepare_alg(struct kpp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ base->cra_type = &crypto_kpp_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
+}
+
+int crypto_register_kpp(struct kpp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ kpp_prepare_alg(alg);
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_kpp);
+
+void crypto_unregister_kpp(struct kpp_alg *alg)
+{
+ crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_kpp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Key-agreement Protocol Primitives");
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index c4eb9da49d4f..86fb59b109a9 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -41,7 +41,7 @@ struct mcryptd_flush_list {
static struct mcryptd_flush_list __percpu *mcryptd_flist;
struct hashd_instance_ctx {
- struct crypto_shash_spawn spawn;
+ struct crypto_ahash_spawn spawn;
struct mcryptd_queue *queue;
};
@@ -272,18 +272,18 @@ static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_shash_spawn *spawn = &ictx->spawn;
+ struct crypto_ahash_spawn *spawn = &ictx->spawn;
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_shash *hash;
+ struct crypto_ahash *hash;
- hash = crypto_spawn_shash(spawn);
+ hash = crypto_spawn_ahash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
ctx->child = hash;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct mcryptd_hash_request_ctx) +
- crypto_shash_descsize(hash));
+ crypto_ahash_reqsize(hash));
return 0;
}
@@ -291,21 +291,21 @@ static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_shash(ctx->child);
+ crypto_free_ahash(ctx->child);
}
static int mcryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_shash *child = ctx->child;
+ struct crypto_ahash *child = ctx->child;
int err;
- crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
+ crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_shash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
+ err = crypto_ahash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
@@ -331,20 +331,20 @@ static int mcryptd_hash_enqueue(struct ahash_request *req,
static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_shash *child = ctx->child;
+ struct crypto_ahash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct shash_desc *desc = &rctx->desc;
+ struct ahash_request *desc = &rctx->areq;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_tfm(desc, child);
+ ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
+ rctx->complete, req_async);
- err = crypto_shash_init(desc);
-
- req->base.complete = rctx->complete;
+ rctx->out = req->result;
+ err = crypto_ahash_init(desc);
out:
local_bh_disable();
@@ -365,7 +365,8 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
if (unlikely(err == -EINPROGRESS))
goto out;
- err = shash_ahash_mcryptd_update(req, &rctx->desc);
+ rctx->out = req->result;
+ err = ahash_mcryptd_update(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
@@ -391,7 +392,8 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
if (unlikely(err == -EINPROGRESS))
goto out;
- err = shash_ahash_mcryptd_final(req, &rctx->desc);
+ rctx->out = req->result;
+ err = ahash_mcryptd_final(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
@@ -416,8 +418,8 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
if (unlikely(err == -EINPROGRESS))
goto out;
-
- err = shash_ahash_mcryptd_finup(req, &rctx->desc);
+ rctx->out = req->result;
+ err = ahash_mcryptd_finup(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
@@ -439,25 +441,21 @@ static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_shash *child = ctx->child;
+ struct crypto_ahash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct shash_desc *desc = &rctx->desc;
+ struct ahash_request *desc = &rctx->areq;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */
-
- err = shash_ahash_mcryptd_digest(req, desc);
+ ahash_request_set_tfm(desc, child);
+ ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
+ rctx->complete, req_async);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
+ rctx->out = req->result;
+ err = ahash_mcryptd_digest(desc);
- return;
out:
local_bh_disable();
rctx->complete(&req->base, err);
@@ -473,14 +471,14 @@ static int mcryptd_hash_export(struct ahash_request *req, void *out)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return crypto_shash_export(&rctx->desc, out);
+ return crypto_ahash_export(&rctx->areq, out);
}
static int mcryptd_hash_import(struct ahash_request *req, const void *in)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return crypto_shash_import(&rctx->desc, in);
+ return crypto_ahash_import(&rctx->areq, in);
}
static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
@@ -488,7 +486,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
- struct shash_alg *salg;
+ struct hash_alg_common *halg;
struct crypto_alg *alg;
u32 type = 0;
u32 mask = 0;
@@ -496,11 +494,11 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
mcryptd_check_internal(tb, &type, &mask);
- salg = shash_attr_alg(tb[1], type, mask);
- if (IS_ERR(salg))
- return PTR_ERR(salg);
+ halg = ahash_attr_alg(tb[1], type, mask);
+ if (IS_ERR(halg))
+ return PTR_ERR(halg);
- alg = &salg->base;
+ alg = &halg->base;
pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
sizeof(*ctx));
@@ -511,7 +509,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
ctx = ahash_instance_ctx(inst);
ctx->queue = queue;
- err = crypto_init_shash_spawn(&ctx->spawn, salg,
+ err = crypto_init_ahash_spawn(&ctx->spawn, halg,
ahash_crypto_instance(inst));
if (err)
goto out_free_inst;
@@ -521,8 +519,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
type |= CRYPTO_ALG_INTERNAL;
inst->alg.halg.base.cra_flags = type;
- inst->alg.halg.digestsize = salg->digestsize;
- inst->alg.halg.statesize = salg->statesize;
+ inst->alg.halg.digestsize = halg->digestsize;
+ inst->alg.halg.statesize = halg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
@@ -539,7 +537,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
err = ahash_register_instance(tmpl, inst);
if (err) {
- crypto_drop_shash(&ctx->spawn);
+ crypto_drop_ahash(&ctx->spawn);
out_free_inst:
kfree(inst);
}
@@ -575,7 +573,7 @@ static void mcryptd_free(struct crypto_instance *inst)
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AHASH:
- crypto_drop_shash(&hctx->spawn);
+ crypto_drop_ahash(&hctx->spawn);
kfree(ahash_instance(inst));
return;
default:
@@ -612,55 +610,38 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
}
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
-int shash_ahash_mcryptd_digest(struct ahash_request *req,
- struct shash_desc *desc)
+int ahash_mcryptd_digest(struct ahash_request *desc)
{
int err;
- err = crypto_shash_init(desc) ?:
- shash_ahash_mcryptd_finup(req, desc);
+ err = crypto_ahash_init(desc) ?:
+ ahash_mcryptd_finup(desc);
return err;
}
-EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
-int shash_ahash_mcryptd_update(struct ahash_request *req,
- struct shash_desc *desc)
+int ahash_mcryptd_update(struct ahash_request *desc)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
-
/* alignment is to be done by multi-buffer crypto algorithm if needed */
- return shash->update(desc, NULL, 0);
+ return crypto_ahash_update(desc);
}
-EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
-int shash_ahash_mcryptd_finup(struct ahash_request *req,
- struct shash_desc *desc)
+int ahash_mcryptd_finup(struct ahash_request *desc)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
-
/* alignment is to be done by multi-buffer crypto algorithm if needed */
- return shash->finup(desc, NULL, 0, req->result);
+ return crypto_ahash_finup(desc);
}
-EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
-int shash_ahash_mcryptd_final(struct ahash_request *req,
- struct shash_desc *desc)
+int ahash_mcryptd_final(struct ahash_request *desc)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
-
/* alignment is to be done by multi-buffer crypto algorithm if needed */
- return shash->final(desc, req->result);
+ return crypto_ahash_final(desc);
}
-EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
-struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
+struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
{
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
@@ -668,12 +649,12 @@ struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
}
EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
-struct shash_desc *mcryptd_shash_desc(struct ahash_request *req)
+struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return &rctx->desc;
+ return &rctx->areq;
}
-EXPORT_SYMBOL_GPL(mcryptd_shash_desc);
+EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
{
@@ -681,7 +662,6 @@ void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
}
EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
-
static int __init mcryptd_init(void)
{
int err, cpu;
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 8ba426635b1b..877019a6d3ea 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -92,19 +92,17 @@ static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
struct pkcs1pad_ctx {
struct crypto_akcipher *child;
- const char *hash_name;
unsigned int key_size;
};
struct pkcs1pad_inst_ctx {
struct crypto_akcipher_spawn spawn;
- const char *hash_name;
+ const struct rsa_asn1_template *digest_info;
};
struct pkcs1pad_request {
- struct scatterlist in_sg[3], out_sg[2];
+ struct scatterlist in_sg[2], out_sg[1];
uint8_t *in_buf, *out_buf;
-
struct akcipher_request child_req;
};
@@ -112,40 +110,48 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
- int err, size;
+ int err;
+
+ ctx->key_size = 0;
err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
+ if (err)
+ return err;
- if (!err) {
- /* Find out new modulus size from rsa implementation */
- size = crypto_akcipher_maxsize(ctx->child);
+ /* Find out new modulus size from rsa implementation */
+ err = crypto_akcipher_maxsize(ctx->child);
+ if (err < 0)
+ return err;
- ctx->key_size = size > 0 ? size : 0;
- if (size <= 0)
- err = size;
- }
+ if (err > PAGE_SIZE)
+ return -ENOTSUPP;
- return err;
+ ctx->key_size = err;
+ return 0;
}
static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
- int err, size;
+ int err;
+
+ ctx->key_size = 0;
err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
+ if (err)
+ return err;
- if (!err) {
- /* Find out new modulus size from rsa implementation */
- size = crypto_akcipher_maxsize(ctx->child);
+ /* Find out new modulus size from rsa implementation */
+ err = crypto_akcipher_maxsize(ctx->child);
+ if (err < 0)
+ return err;
- ctx->key_size = size > 0 ? size : 0;
- if (size <= 0)
- err = size;
- }
+ if (err > PAGE_SIZE)
+ return -ENOTSUPP;
- return err;
+ ctx->key_size = err;
+ return 0;
}
static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
@@ -164,19 +170,10 @@ static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
struct scatterlist *next)
{
- int nsegs = next ? 1 : 0;
-
- if (offset_in_page(buf) + len <= PAGE_SIZE) {
- nsegs += 1;
- sg_init_table(sg, nsegs);
- sg_set_buf(sg, buf, len);
- } else {
- nsegs += 2;
- sg_init_table(sg, nsegs);
- sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
- sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
- offset_in_page(buf) + len - PAGE_SIZE);
- }
+ int nsegs = next ? 2 : 1;
+
+ sg_init_table(sg, nsegs);
+ sg_set_buf(sg, buf, len);
if (next)
sg_chain(sg, nsegs, next);
@@ -187,37 +184,36 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
- size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
- size_t chunk_len, pad_left;
- struct sg_mapping_iter miter;
-
- if (!err) {
- if (pad_len) {
- sg_miter_start(&miter, req->dst,
- sg_nents_for_len(req->dst, pad_len),
- SG_MITER_ATOMIC | SG_MITER_TO_SG);
-
- pad_left = pad_len;
- while (pad_left) {
- sg_miter_next(&miter);
-
- chunk_len = min(miter.length, pad_left);
- memset(miter.addr, 0, chunk_len);
- pad_left -= chunk_len;
- }
-
- sg_miter_stop(&miter);
- }
-
- sg_pcopy_from_buffer(req->dst,
- sg_nents_for_len(req->dst, ctx->key_size),
- req_ctx->out_buf, req_ctx->child_req.dst_len,
- pad_len);
- }
+ unsigned int pad_len;
+ unsigned int len;
+ u8 *out_buf;
+
+ if (err)
+ goto out;
+
+ len = req_ctx->child_req.dst_len;
+ pad_len = ctx->key_size - len;
+
+ /* Four billion to one */
+ if (likely(!pad_len))
+ goto out;
+
+ out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
+ err = -ENOMEM;
+ if (!out_buf)
+ goto out;
+
+ sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
+ out_buf + pad_len, len);
+ sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, ctx->key_size),
+ out_buf, ctx->key_size);
+ kzfree(out_buf);
+
+out:
req->dst_len = ctx->key_size;
kfree(req_ctx->in_buf);
- kzfree(req_ctx->out_buf);
return err;
}
@@ -257,21 +253,8 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
return -EOVERFLOW;
}
- if (ctx->key_size > PAGE_SIZE)
- return -ENOTSUPP;
-
- /*
- * Replace both input and output to add the padding in the input and
- * the potential missing leading zeros in the output.
- */
- req_ctx->child_req.src = req_ctx->in_sg;
- req_ctx->child_req.src_len = ctx->key_size - 1;
- req_ctx->child_req.dst = req_ctx->out_sg;
- req_ctx->child_req.dst_len = ctx->key_size;
-
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
+ GFP_KERNEL);
if (!req_ctx->in_buf)
return -ENOMEM;
@@ -284,9 +267,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
- req_ctx->out_buf = kmalloc(ctx->key_size,
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
+ req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
if (!req_ctx->out_buf) {
kfree(req_ctx->in_buf);
return -ENOMEM;
@@ -299,6 +280,10 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
+ /* Reuse output buffer */
+ akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
+ req->dst, ctx->key_size - 1, req->dst_len);
+
err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS &&
(err != -EBUSY ||
@@ -380,18 +365,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
if (!ctx->key_size || req->src_len != ctx->key_size)
return -EINVAL;
- if (ctx->key_size > PAGE_SIZE)
- return -ENOTSUPP;
-
- /* Reuse input buffer, output to a new buffer */
- req_ctx->child_req.src = req->src;
- req_ctx->child_req.src_len = req->src_len;
- req_ctx->child_req.dst = req_ctx->out_sg;
- req_ctx->child_req.dst_len = ctx->key_size ;
-
- req_ctx->out_buf = kmalloc(ctx->key_size,
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
+ req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
if (!req_ctx->out_buf)
return -ENOMEM;
@@ -402,6 +376,11 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_decrypt_complete_cb, req);
+ /* Reuse input buffer, output to a new buffer */
+ akcipher_request_set_crypt(&req_ctx->child_req, req->src,
+ req_ctx->out_sg, req->src_len,
+ ctx->key_size);
+
err = crypto_akcipher_decrypt(&req_ctx->child_req);
if (err != -EINPROGRESS &&
(err != -EBUSY ||
@@ -416,20 +395,16 @@ static int pkcs1pad_sign(struct akcipher_request *req)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
- const struct rsa_asn1_template *digest_info = NULL;
+ struct akcipher_instance *inst = akcipher_alg_instance(tfm);
+ struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
+ const struct rsa_asn1_template *digest_info = ictx->digest_info;
int err;
unsigned int ps_end, digest_size = 0;
if (!ctx->key_size)
return -EINVAL;
- if (ctx->hash_name) {
- digest_info = rsa_lookup_asn1(ctx->hash_name);
- if (!digest_info)
- return -EINVAL;
-
- digest_size = digest_info->size;
- }
+ digest_size = digest_info->size;
if (req->src_len + digest_size > ctx->key_size - 11)
return -EOVERFLOW;
@@ -439,21 +414,8 @@ static int pkcs1pad_sign(struct akcipher_request *req)
return -EOVERFLOW;
}
- if (ctx->key_size > PAGE_SIZE)
- return -ENOTSUPP;
-
- /*
- * Replace both input and output to add the padding in the input and
- * the potential missing leading zeros in the output.
- */
- req_ctx->child_req.src = req_ctx->in_sg;
- req_ctx->child_req.src_len = ctx->key_size - 1;
- req_ctx->child_req.dst = req_ctx->out_sg;
- req_ctx->child_req.dst_len = ctx->key_size;
-
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
+ GFP_KERNEL);
if (!req_ctx->in_buf)
return -ENOMEM;
@@ -462,29 +424,20 @@ static int pkcs1pad_sign(struct akcipher_request *req)
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
req_ctx->in_buf[ps_end] = 0x00;
- if (digest_info) {
- memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
- digest_info->size);
- }
+ memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
+ digest_info->size);
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
- req_ctx->out_buf = kmalloc(ctx->key_size,
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!req_ctx->out_buf) {
- kfree(req_ctx->in_buf);
- return -ENOMEM;
- }
-
- pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
- ctx->key_size, NULL);
-
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
+ /* Reuse output buffer */
+ akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
+ req->dst, ctx->key_size - 1, req->dst_len);
+
err = crypto_akcipher_sign(&req_ctx->child_req);
if (err != -EINPROGRESS &&
(err != -EBUSY ||
@@ -499,56 +452,58 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
- const struct rsa_asn1_template *digest_info;
+ struct akcipher_instance *inst = akcipher_alg_instance(tfm);
+ struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
+ const struct rsa_asn1_template *digest_info = ictx->digest_info;
+ unsigned int dst_len;
unsigned int pos;
-
- if (err == -EOVERFLOW)
- /* Decrypted value had no leading 0 byte */
- err = -EINVAL;
+ u8 *out_buf;
if (err)
goto done;
- if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
- err = -EINVAL;
+ err = -EINVAL;
+ dst_len = req_ctx->child_req.dst_len;
+ if (dst_len < ctx->key_size - 1)
goto done;
+
+ out_buf = req_ctx->out_buf;
+ if (dst_len == ctx->key_size) {
+ if (out_buf[0] != 0x00)
+ /* Decrypted value had no leading 0 byte */
+ goto done;
+
+ dst_len--;
+ out_buf++;
}
err = -EBADMSG;
- if (req_ctx->out_buf[0] != 0x01)
+ if (out_buf[0] != 0x01)
goto done;
- for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
- if (req_ctx->out_buf[pos] != 0xff)
+ for (pos = 1; pos < dst_len; pos++)
+ if (out_buf[pos] != 0xff)
break;
- if (pos < 9 || pos == req_ctx->child_req.dst_len ||
- req_ctx->out_buf[pos] != 0x00)
+ if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
goto done;
pos++;
- if (ctx->hash_name) {
- digest_info = rsa_lookup_asn1(ctx->hash_name);
- if (!digest_info)
- goto done;
-
- if (memcmp(req_ctx->out_buf + pos, digest_info->data,
- digest_info->size))
- goto done;
+ if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
+ goto done;
- pos += digest_info->size;
- }
+ pos += digest_info->size;
err = 0;
- if (req->dst_len < req_ctx->child_req.dst_len - pos)
+ if (req->dst_len < dst_len - pos)
err = -EOVERFLOW;
- req->dst_len = req_ctx->child_req.dst_len - pos;
+ req->dst_len = dst_len - pos;
if (!err)
sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, req->dst_len),
- req_ctx->out_buf + pos, req->dst_len);
+ out_buf + pos, req->dst_len);
done:
kzfree(req_ctx->out_buf);
@@ -588,18 +543,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
if (!ctx->key_size || req->src_len < ctx->key_size)
return -EINVAL;
- if (ctx->key_size > PAGE_SIZE)
- return -ENOTSUPP;
-
- /* Reuse input buffer, output to a new buffer */
- req_ctx->child_req.src = req->src;
- req_ctx->child_req.src_len = req->src_len;
- req_ctx->child_req.dst = req_ctx->out_sg;
- req_ctx->child_req.dst_len = ctx->key_size;
-
- req_ctx->out_buf = kmalloc(ctx->key_size,
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
+ req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
if (!req_ctx->out_buf)
return -ENOMEM;
@@ -610,6 +554,11 @@ static int pkcs1pad_verify(struct akcipher_request *req)
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_verify_complete_cb, req);
+ /* Reuse input buffer, output to a new buffer */
+ akcipher_request_set_crypt(&req_ctx->child_req, req->src,
+ req_ctx->out_sg, req->src_len,
+ ctx->key_size);
+
err = crypto_akcipher_verify(&req_ctx->child_req);
if (err != -EINPROGRESS &&
(err != -EBUSY ||
@@ -626,12 +575,11 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct crypto_akcipher *child_tfm;
- child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst));
+ child_tfm = crypto_spawn_akcipher(&ictx->spawn);
if (IS_ERR(child_tfm))
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
- ctx->hash_name = ictx->hash_name;
return 0;
}
@@ -648,12 +596,12 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
struct crypto_akcipher_spawn *spawn = &ctx->spawn;
crypto_drop_akcipher(spawn);
- kfree(ctx->hash_name);
kfree(inst);
}
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
{
+ const struct rsa_asn1_template *digest_info;
struct crypto_attr_type *algt;
struct akcipher_instance *inst;
struct pkcs1pad_inst_ctx *ctx;
@@ -676,7 +624,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
hash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(hash_name))
- hash_name = NULL;
+ return PTR_ERR(hash_name);
+
+ digest_info = rsa_lookup_asn1(hash_name);
+ if (!digest_info)
+ return -EINVAL;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -684,7 +636,7 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
ctx = akcipher_instance_ctx(inst);
spawn = &ctx->spawn;
- ctx->hash_name = hash_name ? kstrdup(hash_name, GFP_KERNEL) : NULL;
+ ctx->digest_info = digest_info;
crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
@@ -696,27 +648,14 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
err = -ENAMETOOLONG;
- if (!hash_name) {
- if (snprintf(inst->alg.base.cra_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
- rsa_alg->base.cra_name) >=
- CRYPTO_MAX_ALG_NAME ||
- snprintf(inst->alg.base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
- rsa_alg->base.cra_driver_name) >=
- CRYPTO_MAX_ALG_NAME)
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >=
+ CRYPTO_MAX_ALG_NAME ||
+ snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "pkcs1pad(%s,%s)",
+ rsa_alg->base.cra_driver_name, hash_name) >=
+ CRYPTO_MAX_ALG_NAME)
goto out_drop_alg;
- } else {
- if (snprintf(inst->alg.base.cra_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
- rsa_alg->base.cra_name, hash_name) >=
- CRYPTO_MAX_ALG_NAME ||
- snprintf(inst->alg.base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
- rsa_alg->base.cra_driver_name, hash_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto out_free_hash;
- }
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
@@ -738,12 +677,10 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
err = akcipher_register_instance(tmpl, inst);
if (err)
- goto out_free_hash;
+ goto out_drop_alg;
return 0;
-out_free_hash:
- kfree(ctx->hash_name);
out_drop_alg:
crypto_drop_akcipher(spawn);
out_free_inst:
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 77d737f52147..4c280b6a3ea9 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -10,16 +10,23 @@
*/
#include <linux/module.h>
+#include <linux/mpi.h>
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
+struct rsa_mpi_key {
+ MPI n;
+ MPI e;
+ MPI d;
+};
+
/*
* RSAEP function [RFC3447 sec 5.1.1]
* c = m^e mod n;
*/
-static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m)
+static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m)
{
/* (1) Validate 0 <= m < n */
if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
@@ -33,7 +40,7 @@ static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m)
* RSADP function [RFC3447 sec 5.1.2]
* m = c^d mod n;
*/
-static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c)
+static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
{
/* (1) Validate 0 <= c < n */
if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
@@ -47,7 +54,7 @@ static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c)
* RSASP1 function [RFC3447 sec 5.2.1]
* s = m^d mod n
*/
-static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m)
+static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m)
{
/* (1) Validate 0 <= m < n */
if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
@@ -61,7 +68,7 @@ static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m)
* RSAVP1 function [RFC3447 sec 5.2.2]
* m = s^e mod n;
*/
-static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s)
+static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s)
{
/* (1) Validate 0 <= s < n */
if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
@@ -71,7 +78,7 @@ static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s)
return mpi_powm(m, s, key->e, key->n);
}
-static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm)
+static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
{
return akcipher_tfm_ctx(tfm);
}
@@ -79,7 +86,7 @@ static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm)
static int rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- const struct rsa_key *pkey = rsa_get_key(tfm);
+ const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
MPI m, c = mpi_alloc(0);
int ret = 0;
int sign;
@@ -101,7 +108,7 @@ static int rsa_enc(struct akcipher_request *req)
if (ret)
goto err_free_m;
- ret = mpi_write_to_sgl(c, req->dst, &req->dst_len, &sign);
+ ret = mpi_write_to_sgl(c, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_m;
@@ -118,7 +125,7 @@ err_free_c:
static int rsa_dec(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- const struct rsa_key *pkey = rsa_get_key(tfm);
+ const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
MPI c, m = mpi_alloc(0);
int ret = 0;
int sign;
@@ -140,7 +147,7 @@ static int rsa_dec(struct akcipher_request *req)
if (ret)
goto err_free_c;
- ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign);
+ ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_c;
@@ -156,7 +163,7 @@ err_free_m:
static int rsa_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- const struct rsa_key *pkey = rsa_get_key(tfm);
+ const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
MPI m, s = mpi_alloc(0);
int ret = 0;
int sign;
@@ -178,7 +185,7 @@ static int rsa_sign(struct akcipher_request *req)
if (ret)
goto err_free_m;
- ret = mpi_write_to_sgl(s, req->dst, &req->dst_len, &sign);
+ ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_m;
@@ -195,7 +202,7 @@ err_free_s:
static int rsa_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- const struct rsa_key *pkey = rsa_get_key(tfm);
+ const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
MPI s, m = mpi_alloc(0);
int ret = 0;
int sign;
@@ -219,7 +226,7 @@ static int rsa_verify(struct akcipher_request *req)
if (ret)
goto err_free_s;
- ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign);
+ ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_s;
@@ -233,6 +240,16 @@ err_free_m:
return ret;
}
+static void rsa_free_mpi_key(struct rsa_mpi_key *key)
+{
+ mpi_free(key->d);
+ mpi_free(key->e);
+ mpi_free(key->n);
+ key->d = NULL;
+ key->e = NULL;
+ key->n = NULL;
+}
+
static int rsa_check_key_length(unsigned int len)
{
switch (len) {
@@ -251,49 +268,87 @@ static int rsa_check_key_length(unsigned int len)
static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
- struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+ struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {0};
int ret;
- ret = rsa_parse_pub_key(pkey, key, keylen);
+ /* Free the old MPI key if any */
+ rsa_free_mpi_key(mpi_key);
+
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
if (ret)
return ret;
- if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) {
- rsa_free_key(pkey);
- ret = -EINVAL;
+ mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
+ if (!mpi_key->e)
+ goto err;
+
+ mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
+ if (!mpi_key->n)
+ goto err;
+
+ if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
+ rsa_free_mpi_key(mpi_key);
+ return -EINVAL;
}
- return ret;
+
+ return 0;
+
+err:
+ rsa_free_mpi_key(mpi_key);
+ return -ENOMEM;
}
static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
- struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+ struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {0};
int ret;
- ret = rsa_parse_priv_key(pkey, key, keylen);
+ /* Free the old MPI key if any */
+ rsa_free_mpi_key(mpi_key);
+
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
if (ret)
return ret;
- if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) {
- rsa_free_key(pkey);
- ret = -EINVAL;
+ mpi_key->d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
+ if (!mpi_key->d)
+ goto err;
+
+ mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
+ if (!mpi_key->e)
+ goto err;
+
+ mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
+ if (!mpi_key->n)
+ goto err;
+
+ if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
+ rsa_free_mpi_key(mpi_key);
+ return -EINVAL;
}
- return ret;
+
+ return 0;
+
+err:
+ rsa_free_mpi_key(mpi_key);
+ return -ENOMEM;
}
static int rsa_max_size(struct crypto_akcipher *tfm)
{
- struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+ struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
return pkey->n ? mpi_get_size(pkey->n) : -EINVAL;
}
static void rsa_exit_tfm(struct crypto_akcipher *tfm)
{
- struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+ struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
- rsa_free_key(pkey);
+ rsa_free_mpi_key(pkey);
}
static struct akcipher_alg rsa = {
@@ -310,7 +365,7 @@ static struct akcipher_alg rsa = {
.cra_driver_name = "rsa-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
- .cra_ctxsize = sizeof(struct rsa_key),
+ .cra_ctxsize = sizeof(struct rsa_mpi_key),
},
};
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index d226f48d0907..4df6451e7543 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -22,20 +22,29 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
+ const u8 *ptr = value;
+ size_t n_sz = vlen;
- key->n = mpi_read_raw_data(value, vlen);
-
- if (!key->n)
- return -ENOMEM;
-
- /* In FIPS mode only allow key size 2K & 3K */
- if (fips_enabled && (mpi_get_size(key->n) != 256 &&
- mpi_get_size(key->n) != 384)) {
- pr_err("RSA: key size not allowed in FIPS mode\n");
- mpi_free(key->n);
- key->n = NULL;
+ /* invalid key provided */
+ if (!value || !vlen)
return -EINVAL;
+
+ if (fips_enabled) {
+ while (!*ptr && n_sz) {
+ ptr++;
+ n_sz--;
+ }
+
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (n_sz != 256 && n_sz != 384) {
+ pr_err("RSA: key size not allowed in FIPS mode\n");
+ return -EINVAL;
+ }
}
+
+ key->n = value;
+ key->n_sz = vlen;
+
return 0;
}
@@ -44,10 +53,12 @@ int rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
{
struct rsa_key *key = context;
- key->e = mpi_read_raw_data(value, vlen);
+ /* invalid key provided */
+ if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
+ return -EINVAL;
- if (!key->e)
- return -ENOMEM;
+ key->e = value;
+ key->e_sz = vlen;
return 0;
}
@@ -57,46 +68,95 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
{
struct rsa_key *key = context;
- key->d = mpi_read_raw_data(value, vlen);
+ /* invalid key provided */
+ if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
+ return -EINVAL;
- if (!key->d)
- return -ENOMEM;
+ key->d = value;
+ key->d_sz = vlen;
- /* In FIPS mode only allow key size 2K & 3K */
- if (fips_enabled && (mpi_get_size(key->d) != 256 &&
- mpi_get_size(key->d) != 384)) {
- pr_err("RSA: key size not allowed in FIPS mode\n");
- mpi_free(key->d);
- key->d = NULL;
+ return 0;
+}
+
+int rsa_get_p(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct rsa_key *key = context;
+
+ /* invalid key provided */
+ if (!value || !vlen || vlen > key->n_sz)
return -EINVAL;
- }
+
+ key->p = value;
+ key->p_sz = vlen;
+
return 0;
}
-static void free_mpis(struct rsa_key *key)
+int rsa_get_q(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
{
- mpi_free(key->n);
- mpi_free(key->e);
- mpi_free(key->d);
- key->n = NULL;
- key->e = NULL;
- key->d = NULL;
+ struct rsa_key *key = context;
+
+ /* invalid key provided */
+ if (!value || !vlen || vlen > key->n_sz)
+ return -EINVAL;
+
+ key->q = value;
+ key->q_sz = vlen;
+
+ return 0;
}
-/**
- * rsa_free_key() - frees rsa key allocated by rsa_parse_key()
- *
- * @rsa_key: struct rsa_key key representation
- */
-void rsa_free_key(struct rsa_key *key)
+int rsa_get_dp(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct rsa_key *key = context;
+
+ /* invalid key provided */
+ if (!value || !vlen || vlen > key->n_sz)
+ return -EINVAL;
+
+ key->dp = value;
+ key->dp_sz = vlen;
+
+ return 0;
+}
+
+int rsa_get_dq(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
{
- free_mpis(key);
+ struct rsa_key *key = context;
+
+ /* invalid key provided */
+ if (!value || !vlen || vlen > key->n_sz)
+ return -EINVAL;
+
+ key->dq = value;
+ key->dq_sz = vlen;
+
+ return 0;
+}
+
+int rsa_get_qinv(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct rsa_key *key = context;
+
+ /* invalid key provided */
+ if (!value || !vlen || vlen > key->n_sz)
+ return -EINVAL;
+
+ key->qinv = value;
+ key->qinv_sz = vlen;
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(rsa_free_key);
/**
- * rsa_parse_pub_key() - extracts an rsa public key from BER encoded buffer
- * and stores it in the provided struct rsa_key
+ * rsa_parse_pub_key() - decodes the BER encoded buffer and stores in the
+ * provided struct rsa_key, pointers to the raw key as is,
+ * so that the caller can copy it or MPI parse it, etc.
*
* @rsa_key: struct rsa_key key representation
* @key: key in BER format
@@ -107,23 +167,15 @@ EXPORT_SYMBOL_GPL(rsa_free_key);
int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len)
{
- int ret;
-
- free_mpis(rsa_key);
- ret = asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
- if (ret < 0)
- goto error;
-
- return 0;
-error:
- free_mpis(rsa_key);
- return ret;
+ return asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
}
EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
/**
- * rsa_parse_pub_key() - extracts an rsa private key from BER encoded buffer
- * and stores it in the provided struct rsa_key
+ * rsa_parse_priv_key() - decodes the BER encoded buffer and stores in the
+ * provided struct rsa_key, pointers to the raw key
+ * as is, so that the caller can copy it or MPI parse it,
+ * etc.
*
* @rsa_key: struct rsa_key key representation
* @key: key in BER format
@@ -134,16 +186,6 @@ EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len)
{
- int ret;
-
- free_mpis(rsa_key);
- ret = asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
- if (ret < 0)
- goto error;
-
- return 0;
-error:
- free_mpis(rsa_key);
- return ret;
+ return asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
}
EXPORT_SYMBOL_GPL(rsa_parse_priv_key);
diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1
index 731aea5edb0c..4ce06758e8af 100644
--- a/crypto/rsaprivkey.asn1
+++ b/crypto/rsaprivkey.asn1
@@ -3,9 +3,9 @@ RsaPrivKey ::= SEQUENCE {
n INTEGER ({ rsa_get_n }),
e INTEGER ({ rsa_get_e }),
d INTEGER ({ rsa_get_d }),
- prime1 INTEGER,
- prime2 INTEGER,
- exponent1 INTEGER,
- exponent2 INTEGER,
- coefficient INTEGER
+ prime1 INTEGER ({ rsa_get_p }),
+ prime2 INTEGER ({ rsa_get_q }),
+ exponent1 INTEGER ({ rsa_get_dp }),
+ exponent2 INTEGER ({ rsa_get_dq }),
+ coefficient INTEGER ({ rsa_get_qinv })
}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index ea5815c5e128..52ce17a3dd63 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -18,8 +18,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
#include <linux/scatterlist.h>
static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
@@ -30,53 +28,6 @@ static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
memcpy(dst, src, nbytes);
}
-void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
-{
- walk->sg = sg;
-
- BUG_ON(!sg->length);
-
- walk->offset = sg->offset;
-}
-EXPORT_SYMBOL_GPL(scatterwalk_start);
-
-void *scatterwalk_map(struct scatter_walk *walk)
-{
- return kmap_atomic(scatterwalk_page(walk)) +
- offset_in_page(walk->offset);
-}
-EXPORT_SYMBOL_GPL(scatterwalk_map);
-
-static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
- unsigned int more)
-{
- if (out) {
- struct page *page;
-
- page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
- * PageSlab cannot be optimised away per se due to
- * use of volatile pointer.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
- flush_dcache_page(page);
- }
-
- if (more) {
- walk->offset += PAGE_SIZE - 1;
- walk->offset &= PAGE_MASK;
- if (walk->offset >= walk->sg->offset + walk->sg->length)
- scatterwalk_start(walk, sg_next(walk->sg));
- }
-}
-
-void scatterwalk_done(struct scatter_walk *walk, int out, int more)
-{
- if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
- scatterwalk_pagedone(walk, out, more);
-}
-EXPORT_SYMBOL_GPL(scatterwalk_done);
-
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out)
{
@@ -87,9 +38,11 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
if (len_this_page > nbytes)
len_this_page = nbytes;
- vaddr = scatterwalk_map(walk);
- memcpy_dir(buf, vaddr, len_this_page, out);
- scatterwalk_unmap(vaddr);
+ if (out != 2) {
+ vaddr = scatterwalk_map(walk);
+ memcpy_dir(buf, vaddr, len_this_page, out);
+ scatterwalk_unmap(vaddr);
+ }
scatterwalk_advance(walk, len_this_page);
@@ -99,7 +52,7 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
buf += len_this_page;
nbytes -= len_this_page;
- scatterwalk_pagedone(walk, out, 1);
+ scatterwalk_pagedone(walk, out & 1, 1);
}
}
EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
@@ -125,28 +78,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
}
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
-int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
-{
- int offset = 0, n = 0;
-
- /* num_bytes is too small */
- if (num_bytes < sg->length)
- return -1;
-
- do {
- offset += sg->length;
- n++;
- sg = sg_next(sg);
-
- /* num_bytes is too large */
- if (unlikely(!sg && (num_bytes < offset)))
- return -1;
- } while (sg && (num_bytes > offset));
-
- return n;
-}
-EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen);
-
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len)
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 15a749a5cab7..c7049231861f 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -14,50 +14,17 @@
*/
#include <crypto/internal/geniv.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/rng.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
-struct seqiv_ctx {
- spinlock_t lock;
- u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
-};
-
static void seqiv_free(struct crypto_instance *inst);
-static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
-{
- struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
- struct crypto_ablkcipher *geniv;
-
- if (err == -EINPROGRESS)
- return;
-
- if (err)
- goto out;
-
- geniv = skcipher_givcrypt_reqtfm(req);
- memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
-
-out:
- kfree(subreq->info);
-}
-
-static void seqiv_complete(struct crypto_async_request *base, int err)
-{
- struct skcipher_givcrypt_request *req = base->data;
-
- seqiv_complete2(req, err);
- skcipher_givcrypt_complete(req, err);
-}
-
static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
{
struct aead_request *subreq = aead_request_ctx(req);
@@ -85,65 +52,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
aead_request_complete(req, err);
}
-static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
- unsigned int ivsize)
-{
- unsigned int len = ivsize;
-
- if (ivsize > sizeof(u64)) {
- memset(info, 0, ivsize - sizeof(u64));
- len = sizeof(u64);
- }
- seq = cpu_to_be64(seq);
- memcpy(info + ivsize - len, &seq, len);
- crypto_xor(info, ctx->salt, ivsize);
-}
-
-static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
- crypto_completion_t compl;
- void *data;
- u8 *info;
- unsigned int ivsize;
- int err;
-
- ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
-
- compl = req->creq.base.complete;
- data = req->creq.base.data;
- info = req->creq.info;
-
- ivsize = crypto_ablkcipher_ivsize(geniv);
-
- if (unlikely(!IS_ALIGNED((unsigned long)info,
- crypto_ablkcipher_alignmask(geniv) + 1))) {
- info = kmalloc(ivsize, req->creq.base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
- GFP_ATOMIC);
- if (!info)
- return -ENOMEM;
-
- compl = seqiv_complete;
- data = req;
- }
-
- ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
- data);
- ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
- req->creq.nbytes, info);
-
- seqiv_geniv(ctx, info, req->seq, ivsize);
- memcpy(req->giv, info, ivsize);
-
- err = crypto_ablkcipher_encrypt(subreq);
- if (unlikely(info != req->creq.info))
- seqiv_complete2(req, err);
- return err;
-}
-
static int seqiv_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
@@ -165,12 +73,16 @@ static int seqiv_aead_encrypt(struct aead_request *req)
info = req->iv;
if (req->src != req->dst) {
- struct blkcipher_desc desc = {
- .tfm = ctx->null,
- };
+ SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
+
+ skcipher_request_set_tfm(nreq, ctx->sknull);
+ skcipher_request_set_callback(nreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(nreq, req->src, req->dst,
+ req->assoclen + req->cryptlen,
+ NULL);
- err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
- req->assoclen + req->cryptlen);
+ err = crypto_skcipher_encrypt(nreq);
if (err)
return err;
}
@@ -229,62 +141,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
return crypto_aead_decrypt(subreq);
}
-static int seqiv_init(struct crypto_tfm *tfm)
-{
- struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
- struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- int err;
-
- spin_lock_init(&ctx->lock);
-
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
-
- err = 0;
- if (!crypto_get_default_rng()) {
- crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
- crypto_ablkcipher_ivsize(geniv));
- crypto_put_default_rng();
- }
-
- return err ?: skcipher_geniv_init(tfm);
-}
-
-static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
- struct rtattr **tb)
-{
- struct crypto_instance *inst;
- int err;
-
- inst = skcipher_geniv_alloc(tmpl, tb, 0, 0);
-
- if (IS_ERR(inst))
- return PTR_ERR(inst);
-
- err = -EINVAL;
- if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64))
- goto free_inst;
-
- inst->alg.cra_init = seqiv_init;
- inst->alg.cra_exit = skcipher_geniv_exit;
-
- inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
- inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
-
- inst->alg.cra_alignmask |= __alignof__(u32) - 1;
-
- err = crypto_register_instance(tmpl, inst);
- if (err)
- goto free_inst;
-
-out:
- return err;
-
-free_inst:
- skcipher_geniv_free(inst);
- goto out;
-}
-
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct aead_instance *inst;
@@ -330,26 +186,20 @@ free_inst:
static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
- int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
- err = seqiv_ablkcipher_create(tmpl, tb);
- else
- err = seqiv_aead_create(tmpl, tb);
+ return -EINVAL;
- return err;
+ return seqiv_aead_create(tmpl, tb);
}
static void seqiv_free(struct crypto_instance *inst)
{
- if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
- skcipher_geniv_free(inst);
- else
- aead_geniv_free(aead_instance(inst));
+ aead_geniv_free(aead_instance(inst));
}
static struct crypto_template seqiv_tmpl = {
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
new file mode 100644
index 000000000000..62264397a2d2
--- /dev/null
+++ b/crypto/sha3_generic.c
@@ -0,0 +1,300 @@
+/*
+ * Cryptographic API.
+ *
+ * SHA-3, as specified in
+ * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
+ *
+ * SHA-3 code by Jeff Garzik <jeff@garzik.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)•
+ * any later version.
+ *
+ */
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/sha3.h>
+#include <asm/byteorder.h>
+
+#define KECCAK_ROUNDS 24
+
+#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
+
+static const u64 keccakf_rndc[24] = {
+ 0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
+ 0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
+ 0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
+ 0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
+ 0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
+ 0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
+ 0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
+ 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
+};
+
+static const int keccakf_rotc[24] = {
+ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
+ 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
+};
+
+static const int keccakf_piln[24] = {
+ 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
+ 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
+};
+
+/* update the state with given number of rounds */
+
+static void keccakf(u64 st[25])
+{
+ int i, j, round;
+ u64 t, bc[5];
+
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+ /* Theta */
+ for (i = 0; i < 5; i++)
+ bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15]
+ ^ st[i + 20];
+
+ for (i = 0; i < 5; i++) {
+ t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
+ for (j = 0; j < 25; j += 5)
+ st[j + i] ^= t;
+ }
+
+ /* Rho Pi */
+ t = st[1];
+ for (i = 0; i < 24; i++) {
+ j = keccakf_piln[i];
+ bc[0] = st[j];
+ st[j] = ROTL64(t, keccakf_rotc[i]);
+ t = bc[0];
+ }
+
+ /* Chi */
+ for (j = 0; j < 25; j += 5) {
+ for (i = 0; i < 5; i++)
+ bc[i] = st[j + i];
+ for (i = 0; i < 5; i++)
+ st[j + i] ^= (~bc[(i + 1) % 5]) &
+ bc[(i + 2) % 5];
+ }
+
+ /* Iota */
+ st[0] ^= keccakf_rndc[round];
+ }
+}
+
+static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz)
+{
+ memset(sctx, 0, sizeof(*sctx));
+ sctx->md_len = digest_sz;
+ sctx->rsiz = 200 - 2 * digest_sz;
+ sctx->rsizw = sctx->rsiz / 8;
+}
+
+static int sha3_224_init(struct shash_desc *desc)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+
+ sha3_init(sctx, SHA3_224_DIGEST_SIZE);
+ return 0;
+}
+
+static int sha3_256_init(struct shash_desc *desc)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+
+ sha3_init(sctx, SHA3_256_DIGEST_SIZE);
+ return 0;
+}
+
+static int sha3_384_init(struct shash_desc *desc)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+
+ sha3_init(sctx, SHA3_384_DIGEST_SIZE);
+ return 0;
+}
+
+static int sha3_512_init(struct shash_desc *desc)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+
+ sha3_init(sctx, SHA3_512_DIGEST_SIZE);
+ return 0;
+}
+
+static int sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int done;
+ const u8 *src;
+
+ done = 0;
+ src = data;
+
+ if ((sctx->partial + len) > (sctx->rsiz - 1)) {
+ if (sctx->partial) {
+ done = -sctx->partial;
+ memcpy(sctx->buf + sctx->partial, data,
+ done + sctx->rsiz);
+ src = sctx->buf;
+ }
+
+ do {
+ unsigned int i;
+
+ for (i = 0; i < sctx->rsizw; i++)
+ sctx->st[i] ^= ((u64 *) src)[i];
+ keccakf(sctx->st);
+
+ done += sctx->rsiz;
+ src = data + done;
+ } while (done + (sctx->rsiz - 1) < len);
+
+ sctx->partial = 0;
+ }
+ memcpy(sctx->buf + sctx->partial, src, len - done);
+ sctx->partial += (len - done);
+
+ return 0;
+}
+
+static int sha3_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int i, inlen = sctx->partial;
+
+ sctx->buf[inlen++] = 0x06;
+ memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
+ sctx->buf[sctx->rsiz - 1] |= 0x80;
+
+ for (i = 0; i < sctx->rsizw; i++)
+ sctx->st[i] ^= ((u64 *) sctx->buf)[i];
+
+ keccakf(sctx->st);
+
+ for (i = 0; i < sctx->rsizw; i++)
+ sctx->st[i] = cpu_to_le64(sctx->st[i]);
+
+ memcpy(out, sctx->st, sctx->md_len);
+
+ memset(sctx, 0, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg sha3_224 = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = sha3_224_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base = {
+ .cra_name = "sha3-224",
+ .cra_driver_name = "sha3-224-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct shash_alg sha3_256 = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = sha3_256_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base = {
+ .cra_name = "sha3-256",
+ .cra_driver_name = "sha3-256-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct shash_alg sha3_384 = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = sha3_384_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "sha3-384-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct shash_alg sha3_512 = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = sha3_512_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base = {
+ .cra_name = "sha3-512",
+ .cra_driver_name = "sha3-512-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha3_generic_mod_init(void)
+{
+ int ret;
+
+ ret = crypto_register_shash(&sha3_224);
+ if (ret < 0)
+ goto err_out;
+ ret = crypto_register_shash(&sha3_256);
+ if (ret < 0)
+ goto err_out_224;
+ ret = crypto_register_shash(&sha3_384);
+ if (ret < 0)
+ goto err_out_256;
+ ret = crypto_register_shash(&sha3_512);
+ if (ret < 0)
+ goto err_out_384;
+
+ return 0;
+
+err_out_384:
+ crypto_unregister_shash(&sha3_384);
+err_out_256:
+ crypto_unregister_shash(&sha3_256);
+err_out_224:
+ crypto_unregister_shash(&sha3_224);
+err_out:
+ return ret;
+}
+
+static void __exit sha3_generic_mod_fini(void)
+{
+ crypto_unregister_shash(&sha3_224);
+ crypto_unregister_shash(&sha3_256);
+ crypto_unregister_shash(&sha3_384);
+ crypto_unregister_shash(&sha3_512);
+}
+
+module_init(sha3_generic_mod_init);
+module_exit(sha3_generic_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm");
+
+MODULE_ALIAS_CRYPTO("sha3-224");
+MODULE_ALIAS_CRYPTO("sha3-224-generic");
+MODULE_ALIAS_CRYPTO("sha3-256");
+MODULE_ALIAS_CRYPTO("sha3-256-generic");
+MODULE_ALIAS_CRYPTO("sha3-384");
+MODULE_ALIAS_CRYPTO("sha3-384-generic");
+MODULE_ALIAS_CRYPTO("sha3-512");
+MODULE_ALIAS_CRYPTO("sha3-512-generic");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 69230e9d4ac9..f7d0018dcaee 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -16,7 +16,11 @@
#include <crypto/internal/skcipher.h>
#include <linux/bug.h>
+#include <linux/cryptouser.h>
#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/seq_file.h>
+#include <net/netlink.h>
#include "internal.h"
@@ -25,10 +29,11 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
if (alg->cra_type == &crypto_blkcipher_type)
return sizeof(struct crypto_blkcipher *);
- BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
- alg->cra_type != &crypto_givcipher_type);
+ if (alg->cra_type == &crypto_ablkcipher_type ||
+ alg->cra_type == &crypto_givcipher_type)
+ return sizeof(struct crypto_ablkcipher *);
- return sizeof(struct crypto_ablkcipher *);
+ return crypto_alg_extsize(alg);
}
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
@@ -216,26 +221,118 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
return 0;
}
+static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
+
+ alg->exit(skcipher);
+}
+
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
+ struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
+
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
return crypto_init_skcipher_ops_blkcipher(tfm);
- BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
- tfm->__crt_alg->cra_type != &crypto_givcipher_type);
+ if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
+ tfm->__crt_alg->cra_type == &crypto_givcipher_type)
+ return crypto_init_skcipher_ops_ablkcipher(tfm);
+
+ skcipher->setkey = alg->setkey;
+ skcipher->encrypt = alg->encrypt;
+ skcipher->decrypt = alg->decrypt;
+ skcipher->ivsize = alg->ivsize;
+ skcipher->keysize = alg->max_keysize;
+
+ if (alg->exit)
+ skcipher->base.exit = crypto_skcipher_exit_tfm;
- return crypto_init_skcipher_ops_ablkcipher(tfm);
+ if (alg->init)
+ return alg->init(skcipher);
+
+ return 0;
+}
+
+static void crypto_skcipher_free_instance(struct crypto_instance *inst)
+{
+ struct skcipher_instance *skcipher =
+ container_of(inst, struct skcipher_instance, s.base);
+
+ skcipher->free(skcipher);
+}
+
+static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
+ base);
+
+ seq_printf(m, "type : skcipher\n");
+ seq_printf(m, "async : %s\n",
+ alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
+ seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
+ seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
+ seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
+ seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
+ seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
}
+#ifdef CONFIG_NET
+static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_blkcipher rblkcipher;
+ struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
+ base);
+
+ strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
+ strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = skcipher->min_keysize;
+ rblkcipher.max_keysize = skcipher->max_keysize;
+ rblkcipher.ivsize = skcipher->ivsize;
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+ sizeof(struct crypto_report_blkcipher), &rblkcipher))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
static const struct crypto_type crypto_skcipher_type2 = {
.extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
+ .free = crypto_skcipher_free_instance,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_skcipher_show,
+#endif
+ .report = crypto_skcipher_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
- .type = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
};
+int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
+ const char *name, u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_skcipher_type2;
+ return crypto_grab_spawn(&spawn->base, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
+
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask)
{
@@ -243,5 +340,90 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
}
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
+int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
+ type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
+
+static int skcipher_prepare_alg(struct skcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ if (!alg->chunksize)
+ alg->chunksize = base->cra_blocksize;
+
+ base->cra_type = &crypto_skcipher_type2;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
+
+ return 0;
+}
+
+int crypto_register_skcipher(struct skcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = skcipher_prepare_alg(alg);
+ if (err)
+ return err;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_skcipher);
+
+void crypto_unregister_skcipher(struct skcipher_alg *alg)
+{
+ crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
+
+int crypto_register_skciphers(struct skcipher_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_register_skcipher(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (--i; i >= 0; --i)
+ crypto_unregister_skcipher(&algs[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_skciphers);
+
+void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_skcipher(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
+
+int skcipher_register_instance(struct crypto_template *tmpl,
+ struct skcipher_instance *inst)
+{
+ int err;
+
+ err = skcipher_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(skcipher_register_instance);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 579dce071463..ae22f05d5936 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -24,6 +24,7 @@
#include <crypto/aead.h>
#include <crypto/hash.h>
+#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
@@ -72,7 +73,8 @@ static char *check[] = {
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
- "lzo", "cts", "zlib", NULL
+ "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
+ NULL
};
struct tcrypt_result {
@@ -91,76 +93,6 @@ static void tcrypt_complete(struct crypto_async_request *req, int err)
complete(&res->completion);
}
-static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
- struct scatterlist *sg, int blen, int secs)
-{
- unsigned long start, end;
- int bcount;
- int ret;
-
- for (start = jiffies, end = start + secs * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- if (enc)
- ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
- else
- ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
-
- if (ret)
- return ret;
- }
-
- printk("%d operations in %d seconds (%ld bytes)\n",
- bcount, secs, (long)bcount * blen);
- return 0;
-}
-
-static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
- struct scatterlist *sg, int blen)
-{
- unsigned long cycles = 0;
- int ret = 0;
- int i;
-
- local_irq_disable();
-
- /* Warm-up run. */
- for (i = 0; i < 4; i++) {
- if (enc)
- ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
- else
- ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
-
- if (ret)
- goto out;
- }
-
- /* The real thing. */
- for (i = 0; i < 8; i++) {
- cycles_t start, end;
-
- start = get_cycles();
- if (enc)
- ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
- else
- ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
- end = get_cycles();
-
- if (ret)
- goto out;
-
- cycles += end - start;
- }
-
-out:
- local_irq_enable();
-
- if (ret == 0)
- printk("1 operation in %lu cycles (%d bytes)\n",
- (cycles + 4) / 8, blen);
-
- return ret;
-}
-
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -454,127 +386,148 @@ out_noxbuf:
return;
}
-static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
- struct cipher_speed_template *template,
- unsigned int tcount, u8 *keysize)
+static void test_hash_sg_init(struct scatterlist *sg)
{
- unsigned int ret, i, j, iv_len;
- const char *key;
- char iv[128];
- struct crypto_blkcipher *tfm;
- struct blkcipher_desc desc;
- const char *e;
- u32 *b_size;
+ int i;
- if (enc == ENCRYPT)
- e = "encryption";
- else
- e = "decryption";
+ sg_init_table(sg, TVMEMSIZE);
+ for (i = 0; i < TVMEMSIZE; i++) {
+ sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
+ memset(tvmem[i], 0xff, PAGE_SIZE);
+ }
+}
- tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
+static inline int do_one_ahash_op(struct ahash_request *req, int ret)
+{
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ struct tcrypt_result *tr = req->base.data;
- if (IS_ERR(tfm)) {
- printk("failed to load transform for %s: %ld\n", algo,
- PTR_ERR(tfm));
+ wait_for_completion(&tr->completion);
+ reinit_completion(&tr->completion);
+ ret = tr->err;
+ }
+ return ret;
+}
+
+struct test_mb_ahash_data {
+ struct scatterlist sg[TVMEMSIZE];
+ char result[64];
+ struct ahash_request *req;
+ struct tcrypt_result tresult;
+ char *xbuf[XBUFSIZE];
+};
+
+static void test_mb_ahash_speed(const char *algo, unsigned int sec,
+ struct hash_speed *speed)
+{
+ struct test_mb_ahash_data *data;
+ struct crypto_ahash *tfm;
+ unsigned long start, end;
+ unsigned long cycles;
+ unsigned int i, j, k;
+ int ret;
+
+ data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
+ if (!data)
return;
+
+ tfm = crypto_alloc_ahash(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto free_data;
}
- desc.tfm = tfm;
- desc.flags = 0;
- printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
- get_driver_name(crypto_blkcipher, tfm), e);
+ for (i = 0; i < 8; ++i) {
+ if (testmgr_alloc_buf(data[i].xbuf))
+ goto out;
- i = 0;
- do {
+ init_completion(&data[i].tresult.completion);
- b_size = block_sizes;
- do {
- struct scatterlist sg[TVMEMSIZE];
+ data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: hash: Failed to allocate request for %s\n",
+ algo);
+ goto out;
+ }
- if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
- printk("template (%u) too big for "
- "tvmem (%lu)\n", *keysize + *b_size,
- TVMEMSIZE * PAGE_SIZE);
- goto out;
- }
+ ahash_request_set_callback(data[i].req, 0,
+ tcrypt_complete, &data[i].tresult);
+ test_hash_sg_init(data[i].sg);
+ }
- printk("test %u (%d bit key, %d byte blocks): ", i,
- *keysize * 8, *b_size);
+ pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
+ get_driver_name(crypto_ahash, tfm));
- memset(tvmem[0], 0xff, PAGE_SIZE);
+ for (i = 0; speed[i].blen != 0; i++) {
+ /* For some reason this only tests digests. */
+ if (speed[i].blen != speed[i].plen)
+ continue;
- /* set key, plain text and IV */
- key = tvmem[0];
- for (j = 0; j < tcount; j++) {
- if (template[j].klen == *keysize) {
- key = template[j].key;
- break;
- }
- }
+ if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for tvmem (%lu)\n",
+ speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+ goto out;
+ }
- ret = crypto_blkcipher_setkey(tfm, key, *keysize);
- if (ret) {
- printk("setkey() failed flags=%x\n",
- crypto_blkcipher_get_flags(tfm));
- goto out;
- }
+ if (speed[i].klen)
+ crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
- sg_init_table(sg, TVMEMSIZE);
- sg_set_buf(sg, tvmem[0] + *keysize,
- PAGE_SIZE - *keysize);
- for (j = 1; j < TVMEMSIZE; j++) {
- sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
- memset (tvmem[j], 0xff, PAGE_SIZE);
- }
+ for (k = 0; k < 8; k++)
+ ahash_request_set_crypt(data[k].req, data[k].sg,
+ data[k].result, speed[i].blen);
- iv_len = crypto_blkcipher_ivsize(tfm);
- if (iv_len) {
- memset(&iv, 0xff, iv_len);
- crypto_blkcipher_set_iv(tfm, iv, iv_len);
- }
+ pr_info("test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen,
+ speed[i].blen / speed[i].plen);
- if (secs)
- ret = test_cipher_jiffies(&desc, enc, sg,
- *b_size, secs);
- else
- ret = test_cipher_cycles(&desc, enc, sg,
- *b_size);
+ start = get_cycles();
- if (ret) {
- printk("%s() failed flags=%x\n", e, desc.flags);
- break;
+ for (k = 0; k < 8; k++) {
+ ret = crypto_ahash_digest(data[k].req);
+ if (ret == -EINPROGRESS) {
+ ret = 0;
+ continue;
}
- b_size++;
- i++;
- } while (*b_size);
- keysize++;
- } while (*keysize);
-out:
- crypto_free_blkcipher(tfm);
-}
+ if (ret)
+ break;
-static void test_hash_sg_init(struct scatterlist *sg)
-{
- int i;
+ complete(&data[k].tresult.completion);
+ data[k].tresult.err = 0;
+ }
- sg_init_table(sg, TVMEMSIZE);
- for (i = 0; i < TVMEMSIZE; i++) {
- sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
- memset(tvmem[i], 0xff, PAGE_SIZE);
- }
-}
+ for (j = 0; j < k; j++) {
+ struct tcrypt_result *tr = &data[j].tresult;
-static inline int do_one_ahash_op(struct ahash_request *req, int ret)
-{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- struct tcrypt_result *tr = req->base.data;
+ wait_for_completion(&tr->completion);
+ if (tr->err)
+ ret = tr->err;
+ }
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
+ end = get_cycles();
+ cycles = end - start;
+ pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+ cycles, cycles / (8 * speed[i].blen));
+
+ if (ret) {
+ pr_err("At least one hashing failed ret=%d\n", ret);
+ break;
+ }
}
- return ret;
+
+out:
+ for (k = 0; k < 8; ++k)
+ ahash_request_free(data[k].req);
+
+ for (k = 0; k < 8; ++k)
+ testmgr_free_buf(data[k].xbuf);
+
+ crypto_free_ahash(tfm);
+
+free_data:
+ kfree(data);
}
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
@@ -812,7 +765,7 @@ static void test_hash_speed(const char *algo, unsigned int secs,
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}
-static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
+static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
@@ -825,7 +778,7 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
return ret;
}
-static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
+static int test_acipher_jiffies(struct skcipher_request *req, int enc,
int blen, int secs)
{
unsigned long start, end;
@@ -836,10 +789,10 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
time_before(jiffies, end); bcount++) {
if (enc)
ret = do_one_acipher_op(req,
- crypto_ablkcipher_encrypt(req));
+ crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
- crypto_ablkcipher_decrypt(req));
+ crypto_skcipher_decrypt(req));
if (ret)
return ret;
@@ -850,7 +803,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
return 0;
}
-static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
+static int test_acipher_cycles(struct skcipher_request *req, int enc,
int blen)
{
unsigned long cycles = 0;
@@ -861,10 +814,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
for (i = 0; i < 4; i++) {
if (enc)
ret = do_one_acipher_op(req,
- crypto_ablkcipher_encrypt(req));
+ crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
- crypto_ablkcipher_decrypt(req));
+ crypto_skcipher_decrypt(req));
if (ret)
goto out;
@@ -877,10 +830,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
start = get_cycles();
if (enc)
ret = do_one_acipher_op(req,
- crypto_ablkcipher_encrypt(req));
+ crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
- crypto_ablkcipher_decrypt(req));
+ crypto_skcipher_decrypt(req));
end = get_cycles();
if (ret)
@@ -897,16 +850,16 @@ out:
return ret;
}
-static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
- struct cipher_speed_template *template,
- unsigned int tcount, u8 *keysize)
+static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
+ struct cipher_speed_template *template,
+ unsigned int tcount, u8 *keysize, bool async)
{
unsigned int ret, i, j, k, iv_len;
struct tcrypt_result tresult;
const char *key;
char iv[128];
- struct ablkcipher_request *req;
- struct crypto_ablkcipher *tfm;
+ struct skcipher_request *req;
+ struct crypto_skcipher *tfm;
const char *e;
u32 *b_size;
@@ -917,7 +870,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
init_completion(&tresult.completion);
- tfm = crypto_alloc_ablkcipher(algo, 0, 0);
+ tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n", algo,
@@ -926,17 +879,17 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
}
pr_info("\ntesting speed of async %s (%s) %s\n", algo,
- get_driver_name(crypto_ablkcipher, tfm), e);
+ get_driver_name(crypto_skcipher, tfm), e);
- req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
algo);
goto out;
}
- ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &tresult);
i = 0;
do {
@@ -966,12 +919,12 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
}
}
- crypto_ablkcipher_clear_flags(tfm, ~0);
+ crypto_skcipher_clear_flags(tfm, ~0);
- ret = crypto_ablkcipher_setkey(tfm, key, *keysize);
+ ret = crypto_skcipher_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
- crypto_ablkcipher_get_flags(tfm));
+ crypto_skcipher_get_flags(tfm));
goto out_free_req;
}
@@ -995,11 +948,11 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
}
- iv_len = crypto_ablkcipher_ivsize(tfm);
+ iv_len = crypto_skcipher_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
- ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv);
+ skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
if (secs)
ret = test_acipher_jiffies(req, enc,
@@ -1010,7 +963,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
if (ret) {
pr_err("%s() failed flags=%x\n", e,
- crypto_ablkcipher_get_flags(tfm));
+ crypto_skcipher_get_flags(tfm));
break;
}
b_size++;
@@ -1020,9 +973,25 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
} while (*keysize);
out_free_req:
- ablkcipher_request_free(req);
+ skcipher_request_free(req);
out:
- crypto_free_ablkcipher(tfm);
+ crypto_free_skcipher(tfm);
+}
+
+static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
+ struct cipher_speed_template *template,
+ unsigned int tcount, u8 *keysize)
+{
+ return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
+ true);
+}
+
+static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
+ struct cipher_speed_template *template,
+ unsigned int tcount, u8 *keysize)
+{
+ return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
+ false);
}
static void test_available(void)
@@ -1284,6 +1253,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
ret += tcrypt_test("crct10dif");
break;
+ case 48:
+ ret += tcrypt_test("sha3-224");
+ break;
+
+ case 49:
+ ret += tcrypt_test("sha3-256");
+ break;
+
+ case 50:
+ ret += tcrypt_test("sha3-384");
+ break;
+
+ case 51:
+ ret += tcrypt_test("sha3-512");
+ break;
+
case 100:
ret += tcrypt_test("hmac(md5)");
break;
@@ -1328,6 +1313,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
ret += tcrypt_test("hmac(crc32)");
break;
+ case 111:
+ ret += tcrypt_test("hmac(sha3-224)");
+ break;
+
+ case 112:
+ ret += tcrypt_test("hmac(sha3-256)");
+ break;
+
+ case 113:
+ ret += tcrypt_test("hmac(sha3-384)");
+ break;
+
+ case 114:
+ ret += tcrypt_test("hmac(sha3-512)");
+ break;
+
case 150:
ret += tcrypt_test("ansi_cprng");
break;
@@ -1406,6 +1407,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32_48_64);
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
+ test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
@@ -1691,6 +1696,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
+ case 322:
+ test_hash_speed("sha3-224", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 323:
+ test_hash_speed("sha3-256", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 324:
+ test_hash_speed("sha3-384", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 325:
+ test_hash_speed("sha3-512", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+
case 399:
break;
@@ -1770,6 +1791,35 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
+ case 418:
+ test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 419:
+ test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 420:
+ test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+
+ case 421:
+ test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 422:
+ test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 423:
+ test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 424:
+ test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
case 499:
break;
@@ -1790,6 +1840,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32_48_64);
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
+ test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c727fb0cb021..5c9d5a5e7b65 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -32,6 +32,7 @@
#include <crypto/rng.h>
#include <crypto/drbg.h>
#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
#include "internal.h"
@@ -120,6 +121,11 @@ struct akcipher_test_suite {
unsigned int count;
};
+struct kpp_test_suite {
+ struct kpp_testvec *vecs;
+ unsigned int count;
+};
+
struct alg_test_desc {
const char *alg;
int (*test)(const struct alg_test_desc *desc, const char *driver,
@@ -134,6 +140,7 @@ struct alg_test_desc {
struct cprng_test_suite cprng;
struct drbg_test_suite drbg;
struct akcipher_test_suite akcipher;
+ struct kpp_test_suite kpp;
} suite;
};
@@ -1777,8 +1784,135 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
}
-static int do_test_rsa(struct crypto_akcipher *tfm,
- struct akcipher_testvec *vecs)
+static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
+ const char *alg)
+{
+ struct kpp_request *req;
+ void *input_buf = NULL;
+ void *output_buf = NULL;
+ struct tcrypt_result result;
+ unsigned int out_len_max;
+ int err = -ENOMEM;
+ struct scatterlist src, dst;
+
+ req = kpp_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return err;
+
+ init_completion(&result.completion);
+
+ err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
+ if (err < 0)
+ goto free_req;
+
+ out_len_max = crypto_kpp_maxsize(tfm);
+ output_buf = kzalloc(out_len_max, GFP_KERNEL);
+ if (!output_buf) {
+ err = -ENOMEM;
+ goto free_req;
+ }
+
+ /* Use appropriate parameter as base */
+ kpp_request_set_input(req, NULL, 0);
+ sg_init_one(&dst, output_buf, out_len_max);
+ kpp_request_set_output(req, &dst, out_len_max);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ /* Compute public key */
+ err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
+ if (err) {
+ pr_err("alg: %s: generate public key test failed. err %d\n",
+ alg, err);
+ goto free_output;
+ }
+ /* Verify calculated public key */
+ if (memcmp(vec->expected_a_public, sg_virt(req->dst),
+ vec->expected_a_public_size)) {
+ pr_err("alg: %s: generate public key test failed. Invalid output\n",
+ alg);
+ err = -EINVAL;
+ goto free_output;
+ }
+
+ /* Calculate shared secret key by using counter part (b) public key. */
+ input_buf = kzalloc(vec->b_public_size, GFP_KERNEL);
+ if (!input_buf) {
+ err = -ENOMEM;
+ goto free_output;
+ }
+
+ memcpy(input_buf, vec->b_public, vec->b_public_size);
+ sg_init_one(&src, input_buf, vec->b_public_size);
+ sg_init_one(&dst, output_buf, out_len_max);
+ kpp_request_set_input(req, &src, vec->b_public_size);
+ kpp_request_set_output(req, &dst, out_len_max);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+ err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
+ if (err) {
+ pr_err("alg: %s: compute shard secret test failed. err %d\n",
+ alg, err);
+ goto free_all;
+ }
+ /*
+ * verify shared secret from which the user will derive
+ * secret key by executing whatever hash it has chosen
+ */
+ if (memcmp(vec->expected_ss, sg_virt(req->dst),
+ vec->expected_ss_size)) {
+ pr_err("alg: %s: compute shared secret test failed. Invalid output\n",
+ alg);
+ err = -EINVAL;
+ }
+
+free_all:
+ kfree(input_buf);
+free_output:
+ kfree(output_buf);
+free_req:
+ kpp_request_free(req);
+ return err;
+}
+
+static int test_kpp(struct crypto_kpp *tfm, const char *alg,
+ struct kpp_testvec *vecs, unsigned int tcount)
+{
+ int ret, i;
+
+ for (i = 0; i < tcount; i++) {
+ ret = do_test_kpp(tfm, vecs++, alg);
+ if (ret) {
+ pr_err("alg: %s: test failed on vector %d, err=%d\n",
+ alg, i + 1, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
+ u32 type, u32 mask)
+{
+ struct crypto_kpp *tfm;
+ int err = 0;
+
+ tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ if (IS_ERR(tfm)) {
+ pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
+ driver, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ if (desc->suite.kpp.vecs)
+ err = test_kpp(tfm, desc->alg, desc->suite.kpp.vecs,
+ desc->suite.kpp.count);
+
+ crypto_free_kpp(tfm);
+ return err;
+}
+
+static int test_akcipher_one(struct crypto_akcipher *tfm,
+ struct akcipher_testvec *vecs)
{
char *xbuf[XBUFSIZE];
struct akcipher_request *req;
@@ -1807,6 +1941,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
if (err)
goto free_req;
+ err = -ENOMEM;
out_len_max = crypto_akcipher_maxsize(tfm);
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_enc)
@@ -1829,17 +1964,18 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
/* Run RSA encrypt - c = m^e mod n;*/
err = wait_async_op(&result, crypto_akcipher_encrypt(req));
if (err) {
- pr_err("alg: rsa: encrypt test failed. err %d\n", err);
+ pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
goto free_all;
}
if (req->dst_len != vecs->c_size) {
- pr_err("alg: rsa: encrypt test failed. Invalid output len\n");
+ pr_err("alg: akcipher: encrypt test failed. Invalid output len\n");
err = -EINVAL;
goto free_all;
}
/* verify that encrypted message is equal to expected */
if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
- pr_err("alg: rsa: encrypt test failed. Invalid output\n");
+ pr_err("alg: akcipher: encrypt test failed. Invalid output\n");
+ hexdump(outbuf_enc, vecs->c_size);
err = -EINVAL;
goto free_all;
}
@@ -1867,18 +2003,22 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
/* Run RSA decrypt - m = c^d mod n;*/
err = wait_async_op(&result, crypto_akcipher_decrypt(req));
if (err) {
- pr_err("alg: rsa: decrypt test failed. err %d\n", err);
+ pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
goto free_all;
}
out_len = req->dst_len;
- if (out_len != vecs->m_size) {
- pr_err("alg: rsa: decrypt test failed. Invalid output len\n");
+ if (out_len < vecs->m_size) {
+ pr_err("alg: akcipher: decrypt test failed. "
+ "Invalid output len %u\n", out_len);
err = -EINVAL;
goto free_all;
}
/* verify that decrypted message is equal to the original msg */
- if (memcmp(vecs->m, outbuf_dec, vecs->m_size)) {
- pr_err("alg: rsa: decrypt test failed. Invalid output\n");
+ if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) ||
+ memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size,
+ vecs->m_size)) {
+ pr_err("alg: akcipher: decrypt test failed. Invalid output\n");
+ hexdump(outbuf_dec, out_len);
err = -EINVAL;
}
free_all:
@@ -1891,28 +2031,22 @@ free_xbuf:
return err;
}
-static int test_rsa(struct crypto_akcipher *tfm, struct akcipher_testvec *vecs,
- unsigned int tcount)
+static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
+ struct akcipher_testvec *vecs, unsigned int tcount)
{
+ const char *algo =
+ crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
int ret, i;
for (i = 0; i < tcount; i++) {
- ret = do_test_rsa(tfm, vecs++);
- if (ret) {
- pr_err("alg: rsa: test failed on vector %d, err=%d\n",
- i + 1, ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
- struct akcipher_testvec *vecs, unsigned int tcount)
-{
- if (strncmp(alg, "rsa", 3) == 0)
- return test_rsa(tfm, vecs, tcount);
+ ret = test_akcipher_one(tfm, vecs++);
+ if (!ret)
+ continue;
+ pr_err("alg: akcipher: test %d failed for %s, err=%d\n",
+ i + 1, algo, ret);
+ return ret;
+ }
return 0;
}
@@ -2729,6 +2863,16 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "dh",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = {
+ .vecs = dh_tv_template,
+ .count = DH_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "digest_null",
.test = alg_test_null,
}, {
@@ -3157,6 +3301,16 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ecdh",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = {
+ .vecs = ecdh_tv_template,
+ .count = ECDH_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "gcm(aes)",
.test = alg_test_aead,
.fips_allowed = 1,
@@ -3249,6 +3403,46 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "hmac(sha3-224)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = hmac_sha3_224_tv_template,
+ .count = HMAC_SHA3_224_TEST_VECTORS
+ }
+ }
+ }, {
+ .alg = "hmac(sha3-256)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = hmac_sha3_256_tv_template,
+ .count = HMAC_SHA3_256_TEST_VECTORS
+ }
+ }
+ }, {
+ .alg = "hmac(sha3-384)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = hmac_sha3_384_tv_template,
+ .count = HMAC_SHA3_384_TEST_VECTORS
+ }
+ }
+ }, {
+ .alg = "hmac(sha3-512)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = hmac_sha3_512_tv_template,
+ .count = HMAC_SHA3_512_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "hmac(sha384)",
.test = alg_test_hash,
.fips_allowed = 1,
@@ -3659,6 +3853,46 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "sha3-224",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = sha3_224_tv_template,
+ .count = SHA3_224_TEST_VECTORS
+ }
+ }
+ }, {
+ .alg = "sha3-256",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = sha3_256_tv_template,
+ .count = SHA3_256_TEST_VECTORS
+ }
+ }
+ }, {
+ .alg = "sha3-384",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = sha3_384_tv_template,
+ .count = SHA3_384_TEST_VECTORS
+ }
+ }
+ }, {
+ .alg = "sha3-512",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = {
+ .vecs = sha3_512_tv_template,
+ .count = SHA3_512_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "sha384",
.test = alg_test_hash,
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 487ec880e889..acb6bbff781a 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -133,6 +133,17 @@ struct akcipher_testvec {
bool public_key_vec;
};
+struct kpp_testvec {
+ unsigned char *secret;
+ unsigned char *b_public;
+ unsigned char *expected_a_public;
+ unsigned char *expected_ss;
+ unsigned short secret_size;
+ unsigned short b_public_size;
+ unsigned short expected_a_public_size;
+ unsigned short expected_ss_size;
+};
+
static char zeroed_string[48];
/*
@@ -141,7 +152,7 @@ static char zeroed_string[48];
#ifdef CONFIG_CRYPTO_FIPS
#define RSA_TEST_VECTORS 2
#else
-#define RSA_TEST_VECTORS 4
+#define RSA_TEST_VECTORS 5
#endif
static struct akcipher_testvec rsa_tv_template[] = {
{
@@ -327,6 +338,516 @@ static struct akcipher_testvec rsa_tv_template[] = {
.m_size = 8,
.c_size = 256,
.public_key_vec = true,
+ }, {
+ .key =
+ "\x30\x82\x09\x29" /* sequence of 2345 bytes */
+ "\x02\x01\x00" /* version integer of 1 byte */
+ "\x02\x82\x02\x01" /* modulus - integer of 513 bytes */
+ "\x00\xC3\x8B\x55\x7B\x73\x4D\xFF\xE9\x9B\xC6\xDC\x67\x3C\xB4\x8E"
+ "\xA0\x86\xED\xF2\xB9\x50\x5C\x54\x5C\xBA\xE4\xA1\xB2\xA7\xAE\x2F"
+ "\x1B\x7D\xF1\xFB\xAC\x79\xC5\xDF\x1A\x00\xC9\xB2\xC1\x61\x25\x33"
+ "\xE6\x9C\xE9\xCF\xD6\x27\xC4\x4E\x44\x30\x44\x5E\x08\xA1\x87\x52"
+ "\xCC\x6B\x97\x70\x8C\xBC\xA5\x06\x31\x0C\xD4\x2F\xD5\x7D\x26\x24"
+ "\xA2\xE2\xAC\x78\xF4\x53\x14\xCE\xF7\x19\x2E\xD7\xF7\xE6\x0C\xB9"
+ "\x56\x7F\x0B\xF1\xB1\xE2\x43\x70\xBD\x86\x1D\xA1\xCC\x2B\x19\x08"
+ "\x76\xEF\x91\xAC\xBF\x20\x24\x0D\x38\xC0\x89\xB8\x9A\x70\xB3\x64"
+ "\xD9\x8F\x80\x41\x10\x5B\x9F\xB1\xCB\x76\x43\x00\x21\x25\x36\xD4"
+ "\x19\xFC\x55\x95\x10\xE4\x26\x74\x98\x2C\xD9\xBD\x0B\x2B\x04\xC2"
+ "\xAC\x82\x38\xB4\xDD\x4C\x04\x7E\x51\x36\x40\x1E\x0B\xC4\x7C\x25"
+ "\xDD\x4B\xB2\xE7\x20\x0A\x57\xF9\xB4\x94\xC3\x08\x33\x22\x6F\x8B"
+ "\x48\xDB\x03\x68\x5A\x5B\xBA\xAE\xF3\xAD\xCF\xC3\x6D\xBA\xF1\x28"
+ "\x67\x7E\x6C\x79\x07\xDE\xFC\xED\xE7\x96\xE3\x6C\xE0\x2C\x87\xF8"
+ "\x02\x01\x28\x38\x43\x21\x53\x84\x69\x75\x78\x15\x7E\xEE\xD2\x1B"
+ "\xB9\x23\x40\xA8\x86\x1E\x38\x83\xB2\x73\x1D\x53\xFB\x9E\x2A\x8A"
+ "\xB2\x75\x35\x01\xC3\xC3\xC4\x94\xE8\x84\x86\x64\x81\xF4\x42\xAA"
+ "\x3C\x0E\xD6\x4F\xBC\x0A\x09\x2D\xE7\x1B\xD4\x10\xA8\x54\xEA\x89"
+ "\x84\x8A\xCB\xF7\x5A\x3C\xCA\x76\x08\x29\x62\xB4\x6A\x22\xDF\x14"
+ "\x95\x71\xFD\xB6\x86\x39\xB8\x8B\xF8\x91\x7F\x38\xAA\x14\xCD\xE5"
+ "\xF5\x1D\xC2\x6D\x53\x69\x52\x84\x7F\xA3\x1A\x5E\x26\x04\x83\x06"
+ "\x73\x52\x56\xCF\x76\x26\xC9\xDD\x75\xD7\xFC\xF4\x69\xD8\x7B\x55"
+ "\xB7\x68\x13\x53\xB9\xE7\x89\xC3\xE8\xD6\x6E\xA7\x6D\xEA\x81\xFD"
+ "\xC4\xB7\x05\x5A\xB7\x41\x0A\x23\x8E\x03\x8A\x1C\xAE\xD3\x1E\xCE"
+ "\xE3\x5E\xFC\x19\x4A\xEE\x61\x9B\x8E\xE5\xE5\xDD\x85\xF9\x41\xEC"
+ "\x14\x53\x92\xF7\xDD\x06\x85\x02\x91\xE3\xEB\x6C\x43\x03\xB1\x36"
+ "\x7B\x89\x5A\xA8\xEB\xFC\xD5\xA8\x35\xDC\x81\xD9\x5C\xBD\xCA\xDC"
+ "\x9B\x98\x0B\x06\x5D\x0C\x5B\xEE\xF3\xD5\xCC\x57\xC9\x71\x2F\x90"
+ "\x3B\x3C\xF0\x8E\x4E\x35\x48\xAE\x63\x74\xA9\xFC\x72\x75\x8E\x34"
+ "\xA8\xF2\x1F\xEA\xDF\x3A\x37\x2D\xE5\x39\x39\xF8\x57\x58\x3C\x04"
+ "\xFE\x87\x06\x98\xBC\x7B\xD3\x21\x36\x60\x25\x54\xA7\x3D\xFA\x91"
+ "\xCC\xA8\x0B\x92\x8E\xB4\xF7\x06\xFF\x1E\x95\xCB\x07\x76\x97\x3B"
+ "\x9D"
+ "\x02\x03\x01\x00\x01" /* public key integer of 3 bytes */
+ "\x02\x82\x02\x00" /* private key integer of 512 bytes */
+ "\x74\xA9\xE0\x6A\x32\xB4\xCA\x85\xD9\x86\x9F\x60\x88\x7B\x40\xCC"
+ "\xCD\x33\x91\xA8\xB6\x25\x1F\xBF\xE3\x51\x1C\x97\xB6\x2A\xD9\xB8"
+ "\x11\x40\x19\xE3\x21\x13\xC8\xB3\x7E\xDC\xD7\x65\x40\x4C\x2D\xD6"
+ "\xDC\xAF\x32\x6C\x96\x75\x2C\x2C\xCA\x8F\x3F\x7A\xEE\xC4\x09\xC6"
+ "\x24\x3A\xC9\xCF\x6D\x8D\x17\x50\x94\x52\xD3\xE7\x0F\x2F\x7E\x94"
+ "\x1F\xA0\xBE\xD9\x25\xE8\x38\x42\x7C\x27\xD2\x79\xF8\x2A\x87\x38"
+ "\xEF\xBB\x74\x8B\xA8\x6E\x8C\x08\xC6\xC7\x4F\x0C\xBC\x79\xC6\xEF"
+ "\x0E\xA7\x5E\xE4\xF8\x8C\x09\xC7\x5E\x37\xCC\x87\x77\xCD\xCF\xD1"
+ "\x6D\x28\x1B\xA9\x62\xC0\xB8\x16\xA7\x8B\xF9\xBB\xCC\xB4\x15\x7F"
+ "\x1B\x69\x03\xF2\x7B\xEB\xE5\x8C\x14\xD6\x23\x4F\x52\x6F\x18\xA6"
+ "\x4B\x5B\x01\xAD\x35\xF9\x48\x53\xB3\x86\x35\x66\xD7\xE7\x29\xC0"
+ "\x09\xB5\xC6\xE6\xFA\xC4\xDA\x19\xBE\xD7\x4D\x41\x14\xBE\x6F\xDF"
+ "\x1B\xAB\xC0\xCA\x88\x07\xAC\xF1\x7D\x35\x83\x67\x28\x2D\x50\xE9"
+ "\xCE\x27\x71\x5E\x1C\xCF\xD2\x30\x65\x79\x72\x2F\x9C\xE1\xD2\x39"
+ "\x7F\xEF\x3B\x01\xF2\x14\x1D\xDF\xBD\x51\xD3\xA1\x53\x62\xCF\x5F"
+ "\x79\x84\xCE\x06\x96\x69\x29\x49\x82\x1C\x71\x4A\xA1\x66\xC8\x2F"
+ "\xFD\x7B\x96\x7B\xFC\xC4\x26\x58\xC4\xFC\x7C\xAF\xB5\xE8\x95\x83"
+ "\x87\xCB\x46\xDE\x97\xA7\xB3\xA2\x54\x5B\xD7\xAF\xAB\xEB\xC8\xF3"
+ "\x55\x9D\x48\x2B\x30\x9C\xDC\x26\x4B\xC2\x89\x45\x13\xB2\x01\x9A"
+ "\xA4\x65\xC3\xEC\x24\x2D\x26\x97\xEB\x80\x8A\x9D\x03\xBC\x59\x66"
+ "\x9E\xE2\xBB\xBB\x63\x19\x64\x93\x11\x7B\x25\x65\x30\xCD\x5B\x4B"
+ "\x2C\xFF\xDC\x2D\x30\x87\x1F\x3C\x88\x07\xD0\xFC\x48\xCC\x05\x8A"
+ "\xA2\xC8\x39\x3E\xD5\x51\xBC\x0A\xBE\x6D\xA8\xA0\xF6\x88\x06\x79"
+ "\x13\xFF\x1B\x45\xDA\x54\xC9\x24\x25\x8A\x75\x0A\x26\xD1\x69\x81"
+ "\x14\x14\xD1\x79\x7D\x8E\x76\xF2\xE0\xEB\xDD\x0F\xDE\xC2\xEC\x80"
+ "\xD7\xDC\x16\x99\x92\xBE\xCB\x40\x0C\xCE\x7C\x3B\x46\xA2\x5B\x5D"
+ "\x0C\x45\xEB\xE1\x00\xDE\x72\x50\xB1\xA6\x0B\x76\xC5\x8D\xFC\x82"
+ "\x38\x6D\x99\x14\x1D\x1A\x4A\xD3\x7C\x53\xB8\x12\x46\xA2\x30\x38"
+ "\x82\xF4\x96\x6E\x8C\xCE\x47\x0D\xAF\x0A\x3B\x45\xB7\x43\x95\x43"
+ "\x9E\x02\x2C\x44\x07\x6D\x1F\x3C\x66\x89\x09\xB6\x1F\x06\x30\xCC"
+ "\xAD\xCE\x7D\x9A\xDE\x3E\xFB\x6C\xE4\x58\x43\xD2\x4F\xA5\x9E\x5E"
+ "\xA7\x7B\xAE\x3A\xF6\x7E\xD9\xDB\xD3\xF5\xC5\x41\xAF\xE6\x9C\x91"
+ "\x02\x82\x01\x01" /* prime1 - integer of 257 bytes */
+ "\x00\xE0\xA6\x6C\xF0\xA2\xF8\x81\x85\x36\x43\xD0\x13\x0B\x33\x8B"
+ "\x8F\x78\x3D\xAC\xC7\x5E\x46\x6A\x7F\x05\xAE\x3E\x26\x0A\xA6\xD0"
+ "\x51\xF3\xC8\x61\xF5\x77\x22\x48\x10\x87\x4C\xD5\xA4\xD5\xAE\x2D"
+ "\x4E\x7A\xFE\x1C\x31\xE7\x6B\xFF\xA4\x69\x20\xF9\x2A\x0B\x99\xBE"
+ "\x7C\x32\x68\xAD\xB0\xC6\x94\x81\x41\x75\xDC\x06\x78\x0A\xB4\xCF"
+ "\xCD\x1B\x2D\x31\xE4\x7B\xEA\xA8\x35\x99\x75\x57\xC6\x0E\xF6\x78"
+ "\x4F\xA0\x92\x4A\x00\x1B\xE7\x96\xF2\x5B\xFD\x2C\x0A\x0A\x13\x81"
+ "\xAF\xCB\x59\x87\x31\xD9\x83\x65\xF2\x22\x48\xD0\x03\x67\x39\xF6"
+ "\xFF\xA8\x36\x07\x3A\x68\xE3\x7B\xA9\x64\xFD\x9C\xF7\xB1\x3D\xBF"
+ "\x26\x5C\xCC\x7A\xFC\xA2\x8F\x51\xD1\xE1\xE2\x3C\xEC\x06\x75\x7C"
+ "\x34\xF9\xA9\x33\x70\x11\xAD\x5A\xDC\x5F\xCF\x50\xF6\x23\x2F\x39"
+ "\xAC\x92\x48\x53\x4D\x01\x96\x3C\xD8\xDC\x1F\x23\x23\x78\x80\x34"
+ "\x54\x14\x76\x8B\xB6\xBB\xFB\x88\x78\x31\x59\x28\xD2\xB1\x75\x17"
+ "\x88\x04\x4A\x78\x62\x18\x2E\xF5\xFB\x9B\xEF\x15\xD8\x16\x47\xC6"
+ "\x42\xB1\x02\xDA\x9E\xE3\x84\x90\xB4\x2D\xC3\xCE\x13\xC9\x12\x7D"
+ "\x3E\xCD\x39\x39\xC9\xAD\xA1\x1A\xE6\xD5\xAD\x5A\x09\x4D\x1B\x0C"
+ "\xAB"
+ "\x02\x82\x01\x01" /* prime 2 - integer of 257 bytes */
+ "\x00\xDE\xD5\x1B\xF6\xCD\x83\xB1\xC6\x47\x7E\xB9\xC0\x6B\xA9\xB8"
+ "\x02\xF3\xAE\x40\x5D\xFC\xD3\xE5\x4E\xF1\xE3\x39\x04\x52\x84\x89"
+ "\x40\x37\xBB\xC2\xCD\x7F\x71\x77\x17\xDF\x6A\x4C\x31\x24\x7F\xB9"
+ "\x7E\x7F\xC8\x43\x4A\x3C\xEB\x8D\x1B\x7F\x21\x51\x67\x45\x8F\xA0"
+ "\x36\x29\x3A\x18\x45\xA5\x32\xEC\x74\x88\x3C\x98\x5D\x67\x3B\xD7"
+ "\x51\x1F\xE9\xAE\x09\x01\xDE\xDE\x7C\xFB\x60\xD1\xA5\x6C\xE9\x6A"
+ "\x93\x04\x02\x3A\xBB\x67\x02\xB9\xFD\x23\xF0\x02\x2B\x49\x85\xC9"
+ "\x5B\xE7\x4B\xDF\xA3\xF4\xEE\x59\x4C\x45\xEF\x8B\xC1\x6B\xDE\xDE"
+ "\xBC\x1A\xFC\xD2\x76\x3F\x33\x74\xA9\x8E\xA3\x7E\x0C\xC6\xCE\x70"
+ "\xA1\x5B\xA6\x77\xEA\x76\xEB\x18\xCE\xB9\xD7\x78\x8D\xAE\x06\xBB"
+ "\xD3\x1F\x16\x0D\x05\xAB\x4F\xC6\x52\xC8\x6B\x36\x51\x7D\x1D\x27"
+ "\xAF\x88\x9A\x6F\xCC\x25\x2E\x74\x06\x72\xCE\x9E\xDB\xE0\x9D\x30"
+ "\xEF\x55\xA5\x58\x21\xA7\x42\x12\x2C\x2C\x23\x87\xC1\x0F\xE8\x51"
+ "\xDA\x53\xDA\xFC\x05\x36\xDF\x08\x0E\x08\x36\xBE\x5C\x86\x9E\xCA"
+ "\x68\x90\x33\x12\x0B\x14\x82\xAB\x90\x1A\xD4\x49\x32\x9C\xBD\xAA"
+ "\xAB\x4E\x38\xF1\xEE\xED\x3D\x3F\xE8\xBD\x48\x56\xA6\x64\xEE\xC8"
+ "\xD7"
+ "\x02\x82\x01\x01" /* exponent 1 - integer of 257 bytes */
+ "\x00\x96\x5E\x6F\x8F\x06\xD6\xE6\x03\x1F\x96\x76\x81\x38\xBF\x30"
+ "\xCC\x40\x84\xAF\xD0\xE7\x06\xA5\x24\x0E\xCE\x59\xA5\x26\xFE\x0F"
+ "\x74\xBB\x83\xC6\x26\x02\xAF\x3C\xA3\x6B\x9C\xFF\x68\x0C\xEB\x40"
+ "\x42\x46\xCB\x2E\x5E\x2C\xF4\x3A\x32\x77\x77\xED\xAF\xBA\x02\x17"
+ "\xE1\x93\xF0\x43\x4A\x8F\x31\x39\xEF\x72\x0F\x6B\x79\x10\x59\x84"
+ "\xBA\x5A\x55\x7F\x0E\xDB\xEE\xEE\xD6\xA9\xB8\x44\x9F\x3A\xC6\xB9"
+ "\x33\x3B\x5C\x90\x11\xD0\x9B\xCC\x8A\xBF\x0E\x10\x5B\x4B\xF1\x50"
+ "\x9E\x35\xB3\xE0\x6D\x7A\x95\x9C\x38\x5D\xC0\x75\x13\xC2\x15\xA7"
+ "\x81\xEA\xBA\xF7\x4D\x9E\x85\x9D\xF1\x7D\xBA\xD0\x45\x6F\x2A\xD0"
+ "\x76\xC2\x28\xD0\xAD\xA7\xB5\xDC\xE3\x6A\x99\xFF\x83\x50\xB3\x75"
+ "\x07\x14\x91\xAF\xEF\x74\xB5\x9F\x9A\xE0\xBA\xA9\x0B\x87\xF3\x85"
+ "\x5C\x40\xB2\x0E\xA7\xFD\xC6\xED\x45\x8E\xD9\x7C\xB0\xB2\x68\xC6"
+ "\x1D\xFD\x70\x78\x06\x41\x7F\x95\x12\x36\x9D\xE2\x58\x5D\x15\xEE"
+ "\x41\x49\xF5\xFA\xEC\x56\x19\xA0\xE6\xE0\xB2\x40\xE1\xD9\xD0\x03"
+ "\x22\x02\xCF\xD1\x3C\x07\x38\x65\x8F\x65\x0E\xAA\x32\xCE\x25\x05"
+ "\x16\x73\x51\xB9\x9F\x88\x0B\xCD\x30\xF3\x97\xCC\x2B\x6B\xA4\x0E"
+ "\x6F"
+ "\x02\x82\x01\x00" /* exponent 2 - integer of 256 bytes */
+ "\x2A\x5F\x3F\xB8\x08\x90\x58\x47\xA9\xE4\xB1\x11\xA3\xE7\x5B\xF4"
+ "\x43\xBE\x08\xC3\x56\x86\x3C\x7E\x6C\x84\x96\x9C\xF9\xCB\xF6\x05"
+ "\x5E\x13\xB8\x11\x37\x80\xAD\xF2\xBE\x2B\x0A\x5D\xF5\xE0\xCB\xB7"
+ "\x00\x39\x66\x82\x41\x5F\x51\x2F\xBF\x56\xE8\x91\xC8\xAA\x6C\xFE"
+ "\x9F\x8C\x4A\x7D\x43\xD2\x91\x1F\xFF\x9F\xF6\x21\x1C\xB6\x46\x55"
+ "\x48\xCA\x38\xAB\xC1\xCD\x4D\x65\x5A\xAF\xA8\x6D\xDA\x6D\xF0\x34"
+ "\x10\x79\x14\x0D\xFA\xA2\x8C\x17\x54\xB4\x18\xD5\x7E\x5F\x90\x50"
+ "\x87\x84\xE7\xFB\xD7\x61\x53\x5D\xAB\x96\xC7\x6E\x7A\x42\xA0\xFC"
+ "\x07\xED\xB7\x5F\x80\xD9\x19\xFF\xFB\xFD\x9E\xC4\x73\x31\x62\x3D"
+ "\x6C\x9E\x15\x03\x62\xA5\x85\xCC\x19\x8E\x9D\x7F\xE3\x6D\xA8\x5D"
+ "\x96\xF5\xAC\x78\x3D\x81\x27\xE7\x29\xF1\x29\x1D\x09\xBB\x77\x86"
+ "\x6B\x65\x62\x88\xE1\x31\x1A\x22\xF7\xC5\xCE\x73\x65\x1C\xBE\xE7"
+ "\x63\xD3\xD3\x14\x63\x27\xAF\x28\xF3\x23\xB6\x76\xC1\xBD\x9D\x82"
+ "\xF4\x9B\x19\x7D\x2C\x57\xF0\xC2\x2A\x51\xAE\x95\x0D\x8C\x38\x54"
+ "\xF5\xC6\xA0\x51\xB7\x0E\xB9\xEC\xE7\x0D\x22\xF6\x1A\xD3\xFE\x16"
+ "\x21\x03\xB7\x0D\x85\xD3\x35\xC9\xDD\xE4\x59\x85\xBE\x7F\xA1\x75"
+ "\x02\x82\x01\x01" /* coefficient - integer of 257 bytes */
+ "\x00\xB9\x48\xD2\x54\x2F\x19\x54\x64\xAE\x62\x80\x61\x89\x80\xB4"
+ "\x48\x0B\x8D\x7E\x1B\x0F\x50\x08\x82\x3F\xED\x75\x84\xB7\x13\xE4"
+ "\xF8\x8D\xA8\xBB\x54\x21\x4C\x5A\x54\x07\x16\x4B\xB4\xA4\x9E\x30"
+ "\xBF\x7A\x30\x1B\x39\x60\xA3\x21\x53\xFB\xB0\xDC\x0F\x7C\x2C\xFB"
+ "\xAA\x95\x7D\x51\x39\x28\x33\x1F\x25\x31\x53\xF5\xD2\x64\x2B\xF2"
+ "\x1E\xB3\xC0\x6A\x0B\xC9\xA4\x42\x64\x5C\xFB\x15\xA3\xE8\x4C\x3A"
+ "\x9C\x3C\xBE\xA3\x39\x83\x23\xE3\x6D\x18\xCC\xC2\xDC\x63\x8D\xBA"
+ "\x98\xE0\xE0\x31\x4A\x2B\x37\x9C\x4D\x6B\xF3\x9F\x51\xE4\x43\x5C"
+ "\x83\x5F\xBF\x5C\xFE\x92\x45\x01\xAF\xF5\xC2\xF4\xB7\x56\x93\xA5"
+ "\xF4\xAA\x67\x3C\x48\x37\xBD\x9A\x3C\xFE\xA5\x9A\xB0\xD1\x6B\x85"
+ "\xDD\x81\xD4\xFA\xAD\x31\x83\xA8\x22\x9B\xFD\xB4\x61\xDC\x7A\x51"
+ "\x59\x62\x10\x1B\x7E\x44\xA3\xFE\x90\x51\x5A\x3E\x02\x87\xAD\xFA"
+ "\xDD\x0B\x1F\x3D\x35\xAF\xEE\x13\x85\x51\xA7\x42\xC0\xEE\x9E\x20"
+ "\xE9\xD0\x29\xB2\xE4\x21\xE4\x6D\x62\xB9\xF4\x48\x4A\xD8\x46\x8E"
+ "\x61\xA6\x2C\x5D\xDF\x8F\x97\x2B\x3A\x75\x1D\x83\x17\x6F\xC6\xB0"
+ "\xDE\xFC\x14\x25\x06\x5A\x60\xBB\xB8\x21\x89\xD1\xEF\x57\xF1\x71"
+ "\x3D",
+ .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a",
+ .c =
+ "\x5c\xce\x9c\xd7\x9a\x9e\xa1\xfe\x7a\x82\x3c\x68\x27\x98\xe3\x5d"
+ "\xd5\xd7\x07\x29\xf5\xfb\xc3\x1a\x7f\x63\x1e\x62\x31\x3b\x19\x87"
+ "\x79\x4f\xec\x7b\xf3\xcb\xea\x9b\x95\x52\x3a\x40\xe5\x87\x7b\x72"
+ "\xd1\x72\xc9\xfb\x54\x63\xd8\xc9\xd7\x2c\xfc\x7b\xc3\x14\x1e\xbc"
+ "\x18\xb4\x34\xa1\xbf\x14\xb1\x37\x31\x6e\xf0\x1b\x35\x19\x54\x07"
+ "\xf7\x99\xec\x3e\x63\xe2\xcd\x61\x28\x65\xc3\xcd\xb1\x38\x36\xa5"
+ "\xb2\xd7\xb0\xdc\x1f\xf5\xef\x19\xc7\x53\x32\x2d\x1c\x26\xda\xe4"
+ "\x0d\xd6\x90\x7e\x28\xd8\xdc\xe4\x61\x05\xd2\x25\x90\x01\xd3\x96"
+ "\x6d\xa6\xcf\x58\x20\xbb\x03\xf4\x01\xbc\x79\xb9\x18\xd8\xb8\xba"
+ "\xbd\x93\xfc\xf2\x62\x5d\x8c\x66\x1e\x0e\x84\x59\x93\xdd\xe2\x93"
+ "\xa2\x62\x7d\x08\x82\x7a\xdd\xfc\xb8\xbc\xc5\x4f\x9c\x4e\xbf\xb4"
+ "\xfc\xf4\xc5\x01\xe8\x00\x70\x4d\x28\x26\xcc\x2e\xfe\x0e\x58\x41"
+ "\x8b\xec\xaf\x7c\x4b\x54\xd0\xa0\x64\xf9\x32\xf4\x2e\x47\x65\x0a"
+ "\x67\x88\x39\x3a\xdb\xb2\xdb\x7b\xb5\xf6\x17\xa8\xd9\xc6\x5e\x28"
+ "\x13\x82\x8a\x99\xdb\x60\x08\xa5\x23\x37\xfa\x88\x90\x31\xc8\x9d"
+ "\x8f\xec\xfb\x85\x9f\xb1\xce\xa6\x24\x50\x46\x44\x47\xcb\x65\xd1"
+ "\xdf\xc0\xb1\x6c\x90\x1f\x99\x8e\x4d\xd5\x9e\x31\x07\x66\x87\xdf"
+ "\x01\xaa\x56\x3c\x71\xe0\x2b\x6f\x67\x3b\x23\xed\xc2\xbd\x03\x30"
+ "\x79\x76\x02\x10\x10\x98\x85\x8a\xff\xfd\x0b\xda\xa5\xd9\x32\x48"
+ "\x02\xa0\x0b\xb9\x2a\x8a\x18\xca\xc6\x8f\x3f\xbb\x16\xb2\xaa\x98"
+ "\x27\xe3\x60\x43\xed\x15\x70\xd4\x57\x15\xfe\x19\xd4\x9b\x13\x78"
+ "\x8a\xf7\x21\xf1\xa2\xa2\x2d\xb3\x09\xcf\x44\x91\x6e\x08\x3a\x30"
+ "\x81\x3e\x90\x93\x8a\x67\x33\x00\x59\x54\x9a\x25\xd3\x49\x8e\x9f"
+ "\xc1\x4b\xe5\x86\xf3\x50\x4c\xbc\xc5\xd3\xf5\x3a\x54\xe1\x36\x3f"
+ "\xe2\x5a\xb4\x37\xc0\xeb\x70\x35\xec\xf6\xb7\xe8\x44\x3b\x7b\xf3"
+ "\xf1\xf2\x1e\xdb\x60\x7d\xd5\xbe\xf0\x71\x34\x90\x4c\xcb\xd4\x35"
+ "\x51\xc7\xdd\xd8\xc9\x81\xf5\x5d\x57\x46\x2c\xb1\x7b\x9b\xaa\xcb"
+ "\xd1\x22\x25\x49\x44\xa3\xd4\x6b\x29\x7b\xd8\xb2\x07\x93\xbf\x3d"
+ "\x52\x49\x84\x79\xef\xb8\xe5\xc4\xad\xca\xa8\xc6\xf6\xa6\x76\x70"
+ "\x5b\x0b\xe5\x83\xc6\x0e\xef\x55\xf2\xe7\xff\x04\xea\xe6\x13\xbe"
+ "\x40\xe1\x40\x45\x48\x66\x75\x31\xae\x35\x64\x91\x11\x6f\xda\xee"
+ "\x26\x86\x45\x6f\x0b\xd5\x9f\x03\xb1\x65\x5b\xdb\xa4\xe4\xf9\x45",
+ .key_len = 2349,
+ .m_size = 8,
+ .c_size = 512,
+ }
+};
+
+#define DH_TEST_VECTORS 2
+
+struct kpp_testvec dh_tv_template[] = {
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x11\x02" /* len */
+ "\x00\x01\x00\x00" /* key_size */
+ "\x00\x01\x00\x00" /* p_size */
+ "\x01\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x02\x11" /* len */
+ "\x00\x00\x01\x00" /* key_size */
+ "\x00\x00\x01\x00" /* p_size */
+ "\x00\x00\x00\x01" /* g_size */
+#endif
+ /* xa */
+ "\x44\xc1\x48\x36\xa7\x2b\x6f\x4e\x43\x03\x68\xad\x31\x00\xda\xf3"
+ "\x2a\x01\xa8\x32\x63\x5f\x89\x32\x1f\xdf\x4c\xa1\x6a\xbc\x10\x15"
+ "\x90\x35\xc9\x26\x41\xdf\x7b\xaa\x56\x56\x3d\x85\x44\xb5\xc0\x8e"
+ "\x37\x83\x06\x50\xb3\x5f\x0e\x28\x2c\xd5\x46\x15\xe3\xda\x7d\x74"
+ "\x87\x13\x91\x4f\xd4\x2d\xf6\xc7\x5e\x14\x2c\x11\xc2\x26\xb4\x3a"
+ "\xe3\xb2\x36\x20\x11\x3b\x22\xf2\x06\x65\x66\xe2\x57\x58\xf8\x22"
+ "\x1a\x94\xbd\x2b\x0e\x8c\x55\xad\x61\x23\x45\x2b\x19\x1e\x63\x3a"
+ "\x13\x61\xe3\xa0\x79\x70\x3e\x6d\x98\x32\xbc\x7f\x82\xc3\x11\xd8"
+ "\xeb\x53\xb5\xfc\xb5\xd5\x3c\x4a\xea\x92\x3e\x01\xce\x15\x65\xd4"
+ "\xaa\x85\xc1\x11\x90\x83\x31\x6e\xfe\xe7\x7f\x7d\xed\xab\xf9\x29"
+ "\xf8\xc7\xf1\x68\xc6\xb7\xe4\x1f\x2f\x28\xa0\xc9\x1a\x50\x64\x29"
+ "\x4b\x01\x6d\x1a\xda\x46\x63\x21\x07\x40\x8c\x8e\x4c\x6f\xb5\xe5"
+ "\x12\xf3\xc2\x1b\x48\x27\x5e\x27\x01\xb1\xaa\xed\x68\x9b\x83\x18"
+ "\x8f\xb1\xeb\x1f\x04\xd1\x3c\x79\xed\x4b\xf7\x0a\x33\xdc\xe0\xc6"
+ "\xd8\x02\x51\x59\x00\x74\x30\x07\x4c\x2d\xac\xe4\x13\xf1\x80\xf0"
+ "\xce\xfa\xff\xa9\xce\x29\x46\xdd\x9d\xad\xd1\xc3\xc6\x58\x1a\x63"
+ /* p */
+ "\xb9\x36\x3a\xf1\x82\x1f\x60\xd3\x22\x47\xb8\xbc\x2d\x22\x6b\x81"
+ "\x7f\xe8\x20\x06\x09\x23\x73\x49\x9a\x59\x8b\x35\x25\xf8\x31\xbc"
+ "\x7d\xa8\x1c\x9d\x56\x0d\x1a\xf7\x4b\x4f\x96\xa4\x35\x77\x6a\x89"
+ "\xab\x42\x00\x49\x21\x71\xed\x28\x16\x1d\x87\x5a\x10\xa7\x9c\x64"
+ "\x94\xd4\x87\x3d\x28\xef\x44\xfe\x4b\xe2\xb4\x15\x8c\x82\xa6\xf3"
+ "\x50\x5f\xa8\xe8\xa2\x60\xe7\x00\x86\x78\x05\xd4\x78\x19\xa1\x98"
+ "\x62\x4e\x4a\x00\x78\x56\x96\xe6\xcf\xd7\x10\x1b\x74\x5d\xd0\x26"
+ "\x61\xdb\x6b\x32\x09\x51\xd8\xa5\xfd\x54\x16\x71\x01\xb3\x39\xe6"
+ "\x4e\x69\xb1\xd7\x06\x8f\xd6\x1e\xdc\x72\x25\x26\x74\xc8\x41\x06"
+ "\x5c\xd1\x26\x5c\xb0\x2f\xf9\x59\x13\xc1\x2a\x0f\x78\xea\x7b\xf7"
+ "\xbd\x59\xa0\x90\x1d\xfc\x33\x5b\x4c\xbf\x05\x9c\x3a\x3f\x69\xa2"
+ "\x45\x61\x4e\x10\x6a\xb3\x17\xc5\x68\x30\xfb\x07\x5f\x34\xc6\xfb"
+ "\x73\x07\x3c\x70\xf6\xae\xe7\x72\x84\xc3\x18\x81\x8f\xe8\x11\x1f"
+ "\x3d\x83\x83\x01\x2a\x14\x73\xbf\x32\x32\x2e\xc9\x4d\xdb\x2a\xca"
+ "\xee\x71\xf9\xda\xad\xe8\x82\x0b\x4d\x0c\x1f\xb6\x1d\xef\x00\x67"
+ "\x74\x3d\x95\xe0\xb7\xc4\x30\x8a\x24\x87\x12\x47\x27\x70\x0d\x73"
+ /* g */
+ "\x02",
+ .b_public =
+ "\x2a\x67\x5c\xfd\x63\x5d\xc0\x97\x0a\x8b\xa2\x1f\xf8\x8a\xcb\x54"
+ "\xca\x2f\xd3\x49\x3f\x01\x8e\x87\xfe\xcc\x94\xa0\x3e\xd4\x26\x79"
+ "\x9a\x94\x3c\x11\x81\x58\x5c\x60\x3d\xf5\x98\x90\x89\x64\x62\x1f"
+ "\xbd\x05\x6d\x2b\xcd\x84\x40\x9b\x4a\x1f\xe0\x19\xf1\xca\x20\xb3"
+ "\x4e\xa0\x4f\x15\xcc\xa5\xfe\xa5\xb4\xf5\x0b\x18\x7a\x5a\x37\xaa"
+ "\x58\x00\x19\x7f\xe2\xa3\xd9\x1c\x44\x57\xcc\xde\x2e\xc1\x38\xea"
+ "\xeb\xe3\x90\x40\xc4\x6c\xf7\xcd\xe9\x22\x50\x71\xf5\x7c\xdb\x37"
+ "\x0e\x80\xc3\xed\x7e\xb1\x2b\x2f\xbe\x71\xa6\x11\xa5\x9d\xf5\x39"
+ "\xf1\xa2\xe5\x85\xbc\x25\x91\x4e\x84\x8d\x26\x9f\x4f\xe6\x0f\xa6"
+ "\x2b\x6b\xf9\x0d\xaf\x6f\xbb\xfa\x2d\x79\x15\x31\x57\xae\x19\x60"
+ "\x22\x0a\xf5\xfd\x98\x0e\xbf\x5d\x49\x75\x58\x37\xbc\x7f\xf5\x21"
+ "\x56\x1e\xd5\xb3\x50\x0b\xca\x96\xf3\xd1\x3f\xb3\x70\xa8\x6d\x63"
+ "\x48\xfb\x3d\xd7\x29\x91\x45\xb5\x48\xcd\xb6\x78\x30\xf2\x3f\x1e"
+ "\xd6\x22\xd6\x35\x9b\xf9\x1f\x85\xae\xab\x4b\xd7\xe0\xc7\x86\x67"
+ "\x3f\x05\x7f\xa6\x0d\x2f\x0d\xbf\x53\x5f\x4d\x2c\x6d\x5e\x57\x40"
+ "\x30\x3a\x23\x98\xf9\xb4\x32\xf5\x32\x83\xdd\x0b\xae\x33\x97\x2f",
+ .expected_a_public =
+ "\x5c\x24\xdf\xeb\x5b\x4b\xf8\xc5\xef\x39\x48\x82\xe0\x1e\x62\xee"
+ "\x8a\xae\xdf\x93\x6c\x2b\x16\x95\x92\x16\x3f\x16\x7b\x75\x03\x85"
+ "\xd9\xf1\x69\xc2\x14\x87\x45\xfc\xa4\x19\xf6\xf0\xa4\xf3\xec\xd4"
+ "\x6c\x5c\x03\x3b\x94\xc2\x2f\x92\xe4\xce\xb3\xe4\x72\xe8\x17\xe6"
+ "\x23\x7e\x00\x01\x09\x59\x13\xbf\xc1\x2f\x99\xa9\x07\xaa\x02\x23"
+ "\x4a\xca\x39\x4f\xbc\xec\x0f\x27\x4f\x19\x93\x6c\xb9\x30\x52\xfd"
+ "\x2b\x9d\x86\xf1\x06\x1e\xb6\x56\x27\x4a\xc9\x8a\xa7\x8a\x48\x5e"
+ "\xb5\x60\xcb\xdf\xff\x03\x26\x10\xbf\x90\x8f\x46\x60\xeb\x9b\x9a"
+ "\xd6\x6f\x44\x91\x03\x92\x18\x2c\x96\x5e\x40\x19\xfb\xf4\x4f\x3a"
+ "\x02\x7b\xaf\xcc\x22\x20\x79\xb9\xf8\x9f\x8f\x85\x6b\xec\x44\xbb"
+ "\xe6\xa8\x8e\xb1\xe8\x2c\xee\x64\xee\xf8\xbd\x00\xf3\xe2\x2b\x93"
+ "\xcd\xe7\xc4\xdf\xc9\x19\x46\xfe\xb6\x07\x73\xc1\x8a\x64\x79\x26"
+ "\xe7\x30\xad\x2a\xdf\xe6\x8f\x59\xf5\x81\xbf\x4a\x29\x91\xe7\xb7"
+ "\xcf\x48\x13\x27\x75\x79\x40\xd9\xd6\x32\x52\x4e\x6a\x86\xae\x6f"
+ "\xc2\xbf\xec\x1f\xc2\x69\xb2\xb6\x59\xe5\xa5\x17\xa4\x77\xb7\x62"
+ "\x46\xde\xe8\xd2\x89\x78\x9a\xef\xa3\xb5\x8f\x26\xec\x80\xda\x39",
+ .expected_ss =
+ "\x8f\xf3\xac\xa2\xea\x22\x11\x5c\x45\x65\x1a\x77\x75\x2e\xcf\x46"
+ "\x23\x14\x1e\x67\x53\x4d\x35\xb0\x38\x1d\x4e\xb9\x41\x9a\x21\x24"
+ "\x6e\x9f\x40\xfe\x90\x51\xb1\x06\xa4\x7b\x87\x17\x2f\xe7\x5e\x22"
+ "\xf0\x7b\x54\x84\x0a\xac\x0a\x90\xd2\xd7\xe8\x7f\xe7\xe3\x30\x75"
+ "\x01\x1f\x24\x75\x56\xbe\xcc\x8d\x1e\x68\x0c\x41\x72\xd3\xfa\xbb"
+ "\xe5\x9c\x60\xc7\x28\x77\x0c\xbe\x89\xab\x08\xd6\x21\xe7\x2e\x1a"
+ "\x58\x7a\xca\x4f\x22\xf3\x2b\x30\xfd\xf4\x98\xc1\xa3\xf8\xf6\xcc"
+ "\xa9\xe4\xdb\x5b\xee\xd5\x5c\x6f\x62\x4c\xd1\x1a\x02\x2a\x23\xe4"
+ "\xb5\x57\xf3\xf9\xec\x04\x83\x54\xfe\x08\x5e\x35\xac\xfb\xa8\x09"
+ "\x82\x32\x60\x11\xb2\x16\x62\x6b\xdf\xda\xde\x9c\xcb\x63\x44\x6c"
+ "\x59\x26\x6a\x8f\xb0\x24\xcb\xa6\x72\x48\x1e\xeb\xe0\xe1\x09\x44"
+ "\xdd\xee\x66\x6d\x84\xcf\xa5\xc1\xb8\x36\x74\xd3\x15\x96\xc3\xe4"
+ "\xc6\x5a\x4d\x23\x97\x0c\x5c\xcb\xa9\xf5\x29\xc2\x0e\xff\x93\x82"
+ "\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11"
+ "\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50"
+ "\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17",
+ .secret_size = 529,
+ .b_public_size = 256,
+ .expected_a_public_size = 256,
+ .expected_ss_size = 256,
+ },
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x11\x02" /* len */
+ "\x00\x01\x00\x00" /* key_size */
+ "\x00\x01\x00\x00" /* p_size */
+ "\x01\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x02\x11" /* len */
+ "\x00\x00\x01\x00" /* key_size */
+ "\x00\x00\x01\x00" /* p_size */
+ "\x00\x00\x00\x01" /* g_size */
+#endif
+ /* xa */
+ "\x4d\x75\xa8\x6e\xba\x23\x3a\x0c\x63\x56\xc8\xc9\x5a\xa7\xd6\x0e"
+ "\xed\xae\x40\x78\x87\x47\x5f\xe0\xa7\x7b\xba\x84\x88\x67\x4e\xe5"
+ "\x3c\xcc\x5c\x6a\xe7\x4a\x20\xec\xbe\xcb\xf5\x52\x62\x9f\x37\x80"
+ "\x0c\x72\x7b\x83\x66\xa4\xf6\x7f\x95\x97\x1c\x6a\x5c\x7e\xf1\x67"
+ "\x37\xb3\x93\x39\x3d\x0b\x55\x35\xd9\xe5\x22\x04\x9f\xf8\xc1\x04"
+ "\xce\x13\xa5\xac\xe1\x75\x05\xd1\x2b\x53\xa2\x84\xef\xb1\x18\xf4"
+ "\x66\xdd\xea\xe6\x24\x69\x5a\x49\xe0\x7a\xd8\xdf\x1b\xb7\xf1\x6d"
+ "\x9b\x50\x2c\xc8\x1c\x1c\xa3\xb4\x37\xfb\x66\x3f\x67\x71\x73\xa9"
+ "\xff\x5f\xd9\xa2\x25\x6e\x25\x1b\x26\x54\xbf\x0c\xc6\xdb\xea\x0a"
+ "\x52\x6c\x16\x7c\x27\x68\x15\x71\x58\x73\x9d\xe6\xc2\x80\xaa\x97"
+ "\x31\x66\xfb\xa6\xfb\xfd\xd0\x9c\x1d\xbe\x81\x48\xf5\x9a\x32\xf1"
+ "\x69\x62\x18\x78\xae\x72\x36\xe6\x94\x27\xd1\xff\x18\x4f\x28\x6a"
+ "\x16\xbd\x6a\x60\xee\xe5\xf9\x6d\x16\xe4\xb8\xa6\x41\x9b\x23\x7e"
+ "\xf7\x9d\xd1\x1d\x03\x15\x66\x3a\xcf\xb6\x2c\x13\x96\x2c\x52\x21"
+ "\xe4\x2d\x48\x7a\x8a\x5d\xb2\x88\xed\x98\x61\x79\x8b\x6a\x1e\x5f"
+ "\xd0\x8a\x2d\x99\x5a\x2b\x0f\xbc\xef\x53\x8f\x32\xc1\xa2\x99\x26"
+ /* p */
+ "\xb9\x36\x3a\xf1\x82\x1f\x60\xd3\x22\x47\xb8\xbc\x2d\x22\x6b\x81"
+ "\x7f\xe8\x20\x06\x09\x23\x73\x49\x9a\x59\x8b\x35\x25\xf8\x31\xbc"
+ "\x7d\xa8\x1c\x9d\x56\x0d\x1a\xf7\x4b\x4f\x96\xa4\x35\x77\x6a\x89"
+ "\xab\x42\x00\x49\x21\x71\xed\x28\x16\x1d\x87\x5a\x10\xa7\x9c\x64"
+ "\x94\xd4\x87\x3d\x28\xef\x44\xfe\x4b\xe2\xb4\x15\x8c\x82\xa6\xf3"
+ "\x50\x5f\xa8\xe8\xa2\x60\xe7\x00\x86\x78\x05\xd4\x78\x19\xa1\x98"
+ "\x62\x4e\x4a\x00\x78\x56\x96\xe6\xcf\xd7\x10\x1b\x74\x5d\xd0\x26"
+ "\x61\xdb\x6b\x32\x09\x51\xd8\xa5\xfd\x54\x16\x71\x01\xb3\x39\xe6"
+ "\x4e\x69\xb1\xd7\x06\x8f\xd6\x1e\xdc\x72\x25\x26\x74\xc8\x41\x06"
+ "\x5c\xd1\x26\x5c\xb0\x2f\xf9\x59\x13\xc1\x2a\x0f\x78\xea\x7b\xf7"
+ "\xbd\x59\xa0\x90\x1d\xfc\x33\x5b\x4c\xbf\x05\x9c\x3a\x3f\x69\xa2"
+ "\x45\x61\x4e\x10\x6a\xb3\x17\xc5\x68\x30\xfb\x07\x5f\x34\xc6\xfb"
+ "\x73\x07\x3c\x70\xf6\xae\xe7\x72\x84\xc3\x18\x81\x8f\xe8\x11\x1f"
+ "\x3d\x83\x83\x01\x2a\x14\x73\xbf\x32\x32\x2e\xc9\x4d\xdb\x2a\xca"
+ "\xee\x71\xf9\xda\xad\xe8\x82\x0b\x4d\x0c\x1f\xb6\x1d\xef\x00\x67"
+ "\x74\x3d\x95\xe0\xb7\xc4\x30\x8a\x24\x87\x12\x47\x27\x70\x0d\x73"
+ /* g */
+ "\x02",
+ .b_public =
+ "\x99\x4d\xd9\x01\x84\x8e\x4a\x5b\xb8\xa5\x64\x8c\x6c\x00\x5c\x0e"
+ "\x1e\x1b\xee\x5d\x9f\x53\xe3\x16\x70\x01\xed\xbf\x4f\x14\x36\x6e"
+ "\xe4\x43\x45\x43\x49\xcc\xb1\xb0\x2a\xc0\x6f\x22\x55\x42\x17\x94"
+ "\x18\x83\xd7\x2a\x5c\x51\x54\xf8\x4e\x7c\x10\xda\x76\x68\x57\x77"
+ "\x1e\x62\x03\x30\x04\x7b\x4c\x39\x9c\x54\x01\x54\xec\xef\xb3\x55"
+ "\xa4\xc0\x24\x6d\x3d\xbd\xcc\x46\x5b\x00\x96\xc7\xea\x93\xd1\x3f"
+ "\xf2\x6a\x72\xe3\xf2\xc1\x92\x24\x5b\xda\x48\x70\x2c\xa9\x59\x97"
+ "\x19\xb1\xd6\x54\xb3\x9c\x2e\xb0\x63\x07\x9b\x5e\xac\xb5\xf2\xb1"
+ "\x5b\xf8\xf3\xd7\x2d\x37\x9b\x68\x6c\xf8\x90\x07\xbc\x37\x9a\xa5"
+ "\xe2\x91\x12\x25\x47\x77\xe3\x3d\xb2\x95\x69\x44\x0b\x91\x1e\xaf"
+ "\x7c\x8c\x7c\x34\x41\x6a\xab\x60\x6e\xc6\x52\xec\x7e\x94\x0a\x37"
+ "\xec\x98\x90\xdf\x3f\x02\xbd\x23\x52\xdd\xd9\xe5\x31\x80\x74\x25"
+ "\xb6\xd2\xd3\xcc\xd5\xcc\x6d\xf9\x7e\x4d\x78\xab\x77\x51\xfa\x77"
+ "\x19\x94\x49\x8c\x05\xd4\x75\xed\xd2\xb3\x64\x57\xe0\x52\x99\xc0"
+ "\x83\xe3\xbb\x5e\x2b\xf1\xd2\xc0\xb1\x37\x36\x0b\x7c\xb5\x63\x96"
+ "\x8e\xde\x04\x23\x11\x95\x62\x11\x9a\xce\x6f\x63\xc8\xd5\xd1\x8f",
+ .expected_a_public =
+ "\x90\x89\xe4\x82\xd6\x0a\xcf\x1a\xae\xce\x1b\x66\xa7\x19\x71\x18"
+ "\x8f\x95\x4b\x5b\x80\x45\x4a\x5a\x43\x99\x4d\x37\xcf\xa3\xa7\x28"
+ "\x9c\xc7\x73\xf1\xb2\x17\xf6\x99\xe3\x6b\x56\xcb\x3e\x35\x60\x7d"
+ "\x65\xc7\x84\x6b\x3e\x60\xee\xcd\xd2\x70\xe7\xc9\x32\x1c\xf0\xb4"
+ "\xf9\x52\xd9\x88\x75\xfd\x40\x2c\xa7\xbe\x19\x1c\x0a\xae\x93\xe1"
+ "\x71\xc7\xcd\x4f\x33\x5c\x10\x7d\x39\x56\xfc\x73\x84\xb2\x67\xc3"
+ "\x77\x26\x20\x97\x2b\xf8\x13\x43\x93\x9c\x9a\xa4\x08\xc7\x34\x83"
+ "\xe6\x98\x61\xe7\x16\x30\x2c\xb1\xdb\x2a\xb2\xcc\xc3\x02\xa5\x3c"
+ "\x71\x50\x14\x83\xc7\xbb\xa4\xbe\x98\x1b\xfe\xcb\x43\xe9\x97\x62"
+ "\xd6\xf0\x8c\xcb\x1c\xba\x1e\xa8\xa6\xa6\x50\xfc\x85\x7d\x47\xbf"
+ "\xf4\x3e\x23\xd3\x5f\xb2\x71\x3e\x40\x94\xaa\x87\x83\x2c\x6c\x8e"
+ "\x60\xfd\xdd\xf7\xf4\x76\x03\xd3\x1d\xec\x18\x51\xa3\xf2\x44\x1a"
+ "\x3f\xb4\x7c\x18\x0d\x68\x65\x92\x54\x0d\x2d\x81\x16\xf1\x84\x66"
+ "\x89\x92\xd0\x1a\x5e\x1f\x42\x46\x5b\xe5\x83\x86\x80\xd9\xcd\x3a"
+ "\x5a\x2f\xb9\x59\x9b\xe4\x43\x84\x64\xf3\x09\x1a\x0a\xa2\x64\x0f"
+ "\x77\x4e\x8d\x8b\xe6\x88\xd1\xfc\xaf\x8f\xdf\x1d\xbc\x31\xb3\xbd",
+ .expected_ss =
+ "\x34\xc3\x35\x14\x88\x46\x26\x23\x97\xbb\xdd\x28\x5c\x94\xf6\x47"
+ "\xca\xb3\x19\xaf\xca\x44\x9b\xc2\x7d\x89\xfd\x96\x14\xfd\x6d\x58"
+ "\xd8\xc4\x6b\x61\x2a\x0d\xf2\x36\x45\xc8\xe4\xa4\xed\x81\x53\x81"
+ "\x66\x1e\xe0\x5a\xb1\x78\x2d\x0b\x5c\xb4\xd1\xfc\x90\xc6\x9c\xdb"
+ "\x5a\x30\x0b\x14\x7d\xbe\xb3\x7d\xb1\xb2\x76\x3c\x6c\xef\x74\x6b"
+ "\xe7\x1f\x64\x0c\xab\x65\xe1\x76\x5c\x3d\x83\xb5\x8a\xfb\xaf\x0f"
+ "\xf2\x06\x14\x8f\xa0\xf6\xc1\x89\x78\xf2\xba\x72\x73\x3c\xf7\x76"
+ "\x21\x67\xbc\x24\x31\xb8\x09\x65\x0f\x0c\x02\x32\x4a\x98\x14\xfc"
+ "\x72\x2c\x25\x60\x68\x5f\x2f\x30\x1e\x5b\xf0\x3b\xd1\xa2\x87\xa0"
+ "\x54\xdf\xdb\xc0\xee\x0a\x0f\x47\xc9\x90\x20\x2c\xf9\xe3\x52\xad"
+ "\x27\x65\x8d\x54\x8d\xa8\xa1\xf3\xed\x15\xd4\x94\x28\x90\x31\x93"
+ "\x1b\xc0\x51\xbb\x43\x5d\x76\x3b\x1d\x2a\x71\x50\xea\x5d\x48\x94"
+ "\x7f\x6f\xf1\x48\xdb\x30\xe5\xae\x64\x79\xd9\x7a\xdb\xc6\xff\xd8"
+ "\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a"
+ "\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee"
+ "\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd",
+ .secret_size = 529,
+ .b_public_size = 256,
+ .expected_a_public_size = 256,
+ .expected_ss_size = 256,
+ }
+};
+
+#ifdef CONFIG_CRYPTO_FIPS
+#define ECDH_TEST_VECTORS 1
+#else
+#define ECDH_TEST_VECTORS 2
+#endif
+struct kpp_testvec ecdh_tv_template[] = {
+ {
+#ifndef CONFIG_CRYPTO_FIPS
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x02\x00" /* type */
+ "\x20\x00" /* len */
+ "\x01\x00" /* curve_id */
+ "\x18\x00" /* key_size */
+#else
+ "\x00\x02" /* type */
+ "\x00\x20" /* len */
+ "\x00\x01" /* curve_id */
+ "\x00\x18" /* key_size */
+#endif
+ "\xb5\x05\xb1\x71\x1e\xbf\x8c\xda"
+ "\x4e\x19\x1e\x62\x1f\x23\x23\x31"
+ "\x36\x1e\xd3\x84\x2f\xcc\x21\x72",
+ .b_public =
+ "\xc3\xba\x67\x4b\x71\xec\xd0\x76"
+ "\x7a\x99\x75\x64\x36\x13\x9a\x94"
+ "\x5d\x8b\xdc\x60\x90\x91\xfd\x3f"
+ "\xb0\x1f\x8a\x0a\x68\xc6\x88\x6e"
+ "\x83\x87\xdd\x67\x09\xf8\x8d\x96"
+ "\x07\xd6\xbd\x1c\xe6\x8d\x9d\x67",
+ .expected_a_public =
+ "\x1a\x04\xdb\xa5\xe1\xdd\x4e\x79"
+ "\xa3\xe6\xef\x0e\x5c\x80\x49\x85"
+ "\xfa\x78\xb4\xef\x49\xbd\x4c\x7c"
+ "\x22\x90\x21\x02\xf9\x1b\x81\x5d"
+ "\x0c\x8a\xa8\x98\xd6\x27\x69\x88"
+ "\x5e\xbc\x94\xd8\x15\x9e\x21\xce",
+ .expected_ss =
+ "\xf4\x57\xcc\x4f\x1f\x4e\x31\xcc"
+ "\xe3\x40\x60\xc8\x06\x93\xc6\x2e"
+ "\x99\x80\x81\x28\xaf\xc5\x51\x74",
+ .secret_size = 32,
+ .b_public_size = 48,
+ .expected_a_public_size = 48,
+ .expected_ss_size = 24
+ }, {
+#endif
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x02\x00" /* type */
+ "\x28\x00" /* len */
+ "\x02\x00" /* curve_id */
+ "\x20\x00" /* key_size */
+#else
+ "\x00\x02" /* type */
+ "\x00\x28" /* len */
+ "\x00\x02" /* curve_id */
+ "\x00\x20" /* key_size */
+#endif
+ "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
+ "\xf6\x62\x1b\x6e\x43\x84\x3a\xa3"
+ "\x8b\xe0\x86\xc3\x20\x19\xda\x92"
+ "\x50\x53\x03\xe1\xc0\xea\xb8\x82",
+ .expected_a_public =
+ "\x1a\x7f\xeb\x52\x00\xbd\x3c\x31"
+ "\x7d\xb6\x70\xc1\x86\xa6\xc7\xc4"
+ "\x3b\xc5\x5f\x6c\x6f\x58\x3c\xf5"
+ "\xb6\x63\x82\x77\x33\x24\xa1\x5f"
+ "\x6a\xca\x43\x6f\xf7\x7e\xff\x02"
+ "\x37\x08\xcc\x40\x5e\x7a\xfd\x6a"
+ "\x6a\x02\x6e\x41\x87\x68\x38\x77"
+ "\xfa\xa9\x44\x43\x2d\xef\x09\xdf",
+ .expected_ss =
+ "\xea\x17\x6f\x7e\x6e\x57\x26\x38"
+ "\x8b\xfb\x41\xeb\xba\xc8\x6d\xa5"
+ "\xa8\x72\xd1\xff\xc9\x47\x3d\xaa"
+ "\x58\x43\x9f\x34\x0f\x8c\xf3\xc9",
+ .b_public =
+ "\xcc\xb4\xda\x74\xb1\x47\x3f\xea"
+ "\x6c\x70\x9e\x38\x2d\xc7\xaa\xb7"
+ "\x29\xb2\x47\x03\x19\xab\xdd\x34"
+ "\xbd\xa8\x2c\x93\xe1\xa4\x74\xd9"
+ "\x64\x63\xf7\x70\x20\x2f\xa4\xe6"
+ "\x9f\x4a\x38\xcc\xc0\x2c\x49\x2f"
+ "\xb1\x32\xbb\xaf\x22\x61\xda\xcb"
+ "\x6f\xdb\xa9\xaa\xfc\x77\x81\xf3",
+ .secret_size = 40,
+ .b_public_size = 64,
+ .expected_a_public_size = 64,
+ .expected_ss_size = 32
}
};
@@ -376,6 +897,131 @@ static struct hash_testvec md4_tv_template [] = {
},
};
+#define SHA3_224_TEST_VECTORS 3
+static struct hash_testvec sha3_224_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
+ "\x3b\x6e\x15\x45\x4f\x0e\xb1\xab"
+ "\xd4\x59\x7f\x9a\x1b\x07\x8e\x3f"
+ "\x5b\x5a\x6b\xc7",
+ }, {
+ .plaintext = "a",
+ .psize = 1,
+ .digest = "\x9e\x86\xff\x69\x55\x7c\xa9\x5f"
+ "\x40\x5f\x08\x12\x69\x68\x5b\x38"
+ "\xe3\xa8\x19\xb3\x09\xee\x94\x2f"
+ "\x48\x2b\x6a\x8b",
+ }, {
+ .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
+ "jklmklmnlmnomnopnopq",
+ .psize = 56,
+ .digest = "\x8a\x24\x10\x8b\x15\x4a\xda\x21"
+ "\xc9\xfd\x55\x74\x49\x44\x79\xba"
+ "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea"
+ "\xd0\xfc\xce\x33",
+ },
+};
+
+#define SHA3_256_TEST_VECTORS 3
+static struct hash_testvec sha3_256_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
+ "\x51\xc1\x47\x56\xa0\x61\xd6\x62"
+ "\xf5\x80\xff\x4d\xe4\x3b\x49\xfa"
+ "\x82\xd8\x0a\x4b\x80\xf8\x43\x4a",
+ }, {
+ .plaintext = "a",
+ .psize = 1,
+ .digest = "\x80\x08\x4b\xf2\xfb\xa0\x24\x75"
+ "\x72\x6f\xeb\x2c\xab\x2d\x82\x15"
+ "\xea\xb1\x4b\xc6\xbd\xd8\xbf\xb2"
+ "\xc8\x15\x12\x57\x03\x2e\xcd\x8b",
+ }, {
+ .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
+ "jklmklmnlmnomnopnopq",
+ .psize = 56,
+ .digest = "\x41\xc0\xdb\xa2\xa9\xd6\x24\x08"
+ "\x49\x10\x03\x76\xa8\x23\x5e\x2c"
+ "\x82\xe1\xb9\x99\x8a\x99\x9e\x21"
+ "\xdb\x32\xdd\x97\x49\x6d\x33\x76",
+ },
+};
+
+
+#define SHA3_384_TEST_VECTORS 3
+static struct hash_testvec sha3_384_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
+ "\x01\x10\x7d\x85\x2e\x4c\x24\x85"
+ "\xc5\x1a\x50\xaa\xaa\x94\xfc\x61"
+ "\x99\x5e\x71\xbb\xee\x98\x3a\x2a"
+ "\xc3\x71\x38\x31\x26\x4a\xdb\x47"
+ "\xfb\x6b\xd1\xe0\x58\xd5\xf0\x04",
+ }, {
+ .plaintext = "a",
+ .psize = 1,
+ .digest = "\x18\x15\xf7\x74\xf3\x20\x49\x1b"
+ "\x48\x56\x9e\xfe\xc7\x94\xd2\x49"
+ "\xee\xb5\x9a\xae\x46\xd2\x2b\xf7"
+ "\x7d\xaf\xe2\x5c\x5e\xdc\x28\xd7"
+ "\xea\x44\xf9\x3e\xe1\x23\x4a\xa8"
+ "\x8f\x61\xc9\x19\x12\xa4\xcc\xd9",
+ }, {
+ .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
+ "jklmklmnlmnomnopnopq",
+ .psize = 56,
+ .digest = "\x99\x1c\x66\x57\x55\xeb\x3a\x4b"
+ "\x6b\xbd\xfb\x75\xc7\x8a\x49\x2e"
+ "\x8c\x56\xa2\x2c\x5c\x4d\x7e\x42"
+ "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a"
+ "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1"
+ "\x9e\xef\x51\xac\xd0\x65\x7c\x22",
+ },
+};
+
+
+#define SHA3_512_TEST_VECTORS 3
+static struct hash_testvec sha3_512_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
+ "\xc8\xb5\x67\xdc\x18\x5a\x75\x6e"
+ "\x97\xc9\x82\x16\x4f\xe2\x58\x59"
+ "\xe0\xd1\xdc\xc1\x47\x5c\x80\xa6"
+ "\x15\xb2\x12\x3a\xf1\xf5\xf9\x4c"
+ "\x11\xe3\xe9\x40\x2c\x3a\xc5\x58"
+ "\xf5\x00\x19\x9d\x95\xb6\xd3\xe3"
+ "\x01\x75\x85\x86\x28\x1d\xcd\x26",
+ }, {
+ .plaintext = "a",
+ .psize = 1,
+ .digest = "\x69\x7f\x2d\x85\x61\x72\xcb\x83"
+ "\x09\xd6\xb8\xb9\x7d\xac\x4d\xe3"
+ "\x44\xb5\x49\xd4\xde\xe6\x1e\xdf"
+ "\xb4\x96\x2d\x86\x98\xb7\xfa\x80"
+ "\x3f\x4f\x93\xff\x24\x39\x35\x86"
+ "\xe2\x8b\x5b\x95\x7a\xc3\xd1\xd3"
+ "\x69\x42\x0c\xe5\x33\x32\x71\x2f"
+ "\x99\x7b\xd3\x36\xd0\x9a\xb0\x2a",
+ }, {
+ .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
+ "jklmklmnlmnomnopnopq",
+ .psize = 56,
+ .digest = "\x04\xa3\x71\xe8\x4e\xcf\xb5\xb8"
+ "\xb7\x7c\xb4\x86\x10\xfc\xa8\x18"
+ "\x2d\xd4\x57\xce\x6f\x32\x6a\x0f"
+ "\xd3\xd7\xec\x2f\x1e\x91\x63\x6d"
+ "\xee\x69\x1f\xbe\x0c\x98\x53\x02"
+ "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63"
+ "\x46\xb5\x33\xb4\x9c\x03\x0d\x99"
+ "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e",
+ },
+};
+
+
/*
* MD5 test vectors from RFC1321
*/
@@ -3246,6 +3892,394 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
},
};
+#define HMAC_SHA3_224_TEST_VECTORS 4
+
+static struct hash_testvec hmac_sha3_224_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b",
+ .ksize = 20,
+ .plaintext = "Hi There",
+ .psize = 8,
+ .digest = "\x3b\x16\x54\x6b\xbc\x7b\xe2\x70"
+ "\x6a\x03\x1d\xca\xfd\x56\x37\x3d"
+ "\x98\x84\x36\x76\x41\xd8\xc5\x9a"
+ "\xf3\xc8\x60\xf7",
+ }, {
+ .key = "Jefe",
+ .ksize = 4,
+ .plaintext = "what do ya want for nothing?",
+ .psize = 28,
+ .digest = "\x7f\xdb\x8d\xd8\x8b\xd2\xf6\x0d"
+ "\x1b\x79\x86\x34\xad\x38\x68\x11"
+ "\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b"
+ "\xba\xce\x5e\x66",
+ .np = 4,
+ .tap = { 7, 7, 7, 7 }
+ }, {
+ .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa",
+ .ksize = 131,
+ .plaintext = "Test Using Large"
+ "r Than Block-Siz"
+ "e Key - Hash Key"
+ " First",
+ .psize = 54,
+ .digest = "\xb4\xa1\xf0\x4c\x00\x28\x7a\x9b"
+ "\x7f\x60\x75\xb3\x13\xd2\x79\xb8"
+ "\x33\xbc\x8f\x75\x12\x43\x52\xd0"
+ "\x5f\xb9\x99\x5f",
+ }, {
+ .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa",
+ .ksize = 131,
+ .plaintext =
+ "This is a test u"
+ "sing a larger th"
+ "an block-size ke"
+ "y and a larger t"
+ "han block-size d"
+ "ata. The key nee"
+ "ds to be hashed "
+ "before being use"
+ "d by the HMAC al"
+ "gorithm.",
+ .psize = 152,
+ .digest = "\x05\xd8\xcd\x6d\x00\xfa\xea\x8d"
+ "\x1e\xb6\x8a\xde\x28\x73\x0b\xbd"
+ "\x3c\xba\xb6\x92\x9f\x0a\x08\x6b"
+ "\x29\xcd\x62\xa0",
+ },
+};
+
+#define HMAC_SHA3_256_TEST_VECTORS 4
+
+static struct hash_testvec hmac_sha3_256_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b",
+ .ksize = 20,
+ .plaintext = "Hi There",
+ .psize = 8,
+ .digest = "\xba\x85\x19\x23\x10\xdf\xfa\x96"
+ "\xe2\xa3\xa4\x0e\x69\x77\x43\x51"
+ "\x14\x0b\xb7\x18\x5e\x12\x02\xcd"
+ "\xcc\x91\x75\x89\xf9\x5e\x16\xbb",
+ }, {
+ .key = "Jefe",
+ .ksize = 4,
+ .plaintext = "what do ya want for nothing?",
+ .psize = 28,
+ .digest = "\xc7\xd4\x07\x2e\x78\x88\x77\xae"
+ "\x35\x96\xbb\xb0\xda\x73\xb8\x87"
+ "\xc9\x17\x1f\x93\x09\x5b\x29\x4a"
+ "\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5",
+ .np = 4,
+ .tap = { 7, 7, 7, 7 }
+ }, {
+ .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa",
+ .ksize = 131,
+ .plaintext = "Test Using Large"
+ "r Than Block-Siz"
+ "e Key - Hash Key"
+ " First",
+ .psize = 54,
+ .digest = "\xed\x73\xa3\x74\xb9\x6c\x00\x52"
+ "\x35\xf9\x48\x03\x2f\x09\x67\x4a"
+ "\x58\xc0\xce\x55\x5c\xfc\x1f\x22"
+ "\x3b\x02\x35\x65\x60\x31\x2c\x3b",
+ }, {
+ .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa",
+ .ksize = 131,
+ .plaintext =
+ "This is a test u"
+ "sing a larger th"
+ "an block-size ke"
+ "y and a larger t"
+ "han block-size d"
+ "ata. The key nee"
+ "ds to be hashed "
+ "before being use"
+ "d by the HMAC al"
+ "gorithm.",
+ .psize = 152,
+ .digest = "\x65\xc5\xb0\x6d\x4c\x3d\xe3\x2a"
+ "\x7a\xef\x87\x63\x26\x1e\x49\xad"
+ "\xb6\xe2\x29\x3e\xc8\xe7\xc6\x1e"
+ "\x8d\xe6\x17\x01\xfc\x63\xe1\x23",
+ },
+};
+
+#define HMAC_SHA3_384_TEST_VECTORS 4
+
+static struct hash_testvec hmac_sha3_384_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b",
+ .ksize = 20,
+ .plaintext = "Hi There",
+ .psize = 8,
+ .digest = "\x68\xd2\xdc\xf7\xfd\x4d\xdd\x0a"
+ "\x22\x40\xc8\xa4\x37\x30\x5f\x61"
+ "\xfb\x73\x34\xcf\xb5\xd0\x22\x6e"
+ "\x1b\xc2\x7d\xc1\x0a\x2e\x72\x3a"
+ "\x20\xd3\x70\xb4\x77\x43\x13\x0e"
+ "\x26\xac\x7e\x3d\x53\x28\x86\xbd",
+ }, {
+ .key = "Jefe",
+ .ksize = 4,
+ .plaintext = "what do ya want for nothing?",
+ .psize = 28,
+ .digest = "\xf1\x10\x1f\x8c\xbf\x97\x66\xfd"
+ "\x67\x64\xd2\xed\x61\x90\x3f\x21"
+ "\xca\x9b\x18\xf5\x7c\xf3\xe1\xa2"
+ "\x3c\xa1\x35\x08\xa9\x32\x43\xce"
+ "\x48\xc0\x45\xdc\x00\x7f\x26\xa2"
+ "\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a",
+ .np = 4,
+ .tap = { 7, 7, 7, 7 }
+ }, {
+ .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa",
+ .ksize = 131,
+ .plaintext = "Test Using Large"
+ "r Than Block-Siz"
+ "e Key - Hash Key"
+ " First",
+ .psize = 54,
+ .digest = "\x0f\xc1\x95\x13\xbf\x6b\xd8\x78"
+ "\x03\x70\x16\x70\x6a\x0e\x57\xbc"
+ "\x52\x81\x39\x83\x6b\x9a\x42\xc3"
+ "\xd4\x19\xe4\x98\xe0\xe1\xfb\x96"
+ "\x16\xfd\x66\x91\x38\xd3\x3a\x11"
+ "\x05\xe0\x7c\x72\xb6\x95\x3b\xcc",
+ }, {
+ .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa",
+ .ksize = 131,
+ .plaintext =
+ "This is a test u"
+ "sing a larger th"
+ "an block-size ke"
+ "y and a larger t"
+ "han block-size d"
+ "ata. The key nee"
+ "ds to be hashed "
+ "before being use"
+ "d by the HMAC al"
+ "gorithm.",
+ .psize = 152,
+ .digest = "\x02\x6f\xdf\x6b\x50\x74\x1e\x37"
+ "\x38\x99\xc9\xf7\xd5\x40\x6d\x4e"
+ "\xb0\x9f\xc6\x66\x56\x36\xfc\x1a"
+ "\x53\x00\x29\xdd\xf5\xcf\x3c\xa5"
+ "\xa9\x00\xed\xce\x01\xf5\xf6\x1e"
+ "\x2f\x40\x8c\xdf\x2f\xd3\xe7\xe8",
+ },
+};
+
+#define HMAC_SHA3_512_TEST_VECTORS 4
+
+static struct hash_testvec hmac_sha3_512_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b",
+ .ksize = 20,
+ .plaintext = "Hi There",
+ .psize = 8,
+ .digest = "\xeb\x3f\xbd\x4b\x2e\xaa\xb8\xf5"
+ "\xc5\x04\xbd\x3a\x41\x46\x5a\xac"
+ "\xec\x15\x77\x0a\x7c\xab\xac\x53"
+ "\x1e\x48\x2f\x86\x0b\x5e\xc7\xba"
+ "\x47\xcc\xb2\xc6\xf2\xaf\xce\x8f"
+ "\x88\xd2\x2b\x6d\xc6\x13\x80\xf2"
+ "\x3a\x66\x8f\xd3\x88\x8b\xb8\x05"
+ "\x37\xc0\xa0\xb8\x64\x07\x68\x9e",
+ }, {
+ .key = "Jefe",
+ .ksize = 4,
+ .plaintext = "what do ya want for nothing?",
+ .psize = 28,
+ .digest = "\x5a\x4b\xfe\xab\x61\x66\x42\x7c"
+ "\x7a\x36\x47\xb7\x47\x29\x2b\x83"
+ "\x84\x53\x7c\xdb\x89\xaf\xb3\xbf"
+ "\x56\x65\xe4\xc5\xe7\x09\x35\x0b"
+ "\x28\x7b\xae\xc9\x21\xfd\x7c\xa0"
+ "\xee\x7a\x0c\x31\xd0\x22\xa9\x5e"
+ "\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83"
+ "\x96\x02\x75\xbe\xb4\xe6\x20\x24",
+ .np = 4,
+ .tap = { 7, 7, 7, 7 }
+ }, {
+ .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa",
+ .ksize = 131,
+ .plaintext = "Test Using Large"
+ "r Than Block-Siz"
+ "e Key - Hash Key"
+ " First",
+ .psize = 54,
+ .digest = "\x00\xf7\x51\xa9\xe5\x06\x95\xb0"
+ "\x90\xed\x69\x11\xa4\xb6\x55\x24"
+ "\x95\x1c\xdc\x15\xa7\x3a\x5d\x58"
+ "\xbb\x55\x21\x5e\xa2\xcd\x83\x9a"
+ "\xc7\x9d\x2b\x44\xa3\x9b\xaf\xab"
+ "\x27\xe8\x3f\xde\x9e\x11\xf6\x34"
+ "\x0b\x11\xd9\x91\xb1\xb9\x1b\xf2"
+ "\xee\xe7\xfc\x87\x24\x26\xc3\xa4",
+ }, {
+ .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa",
+ .ksize = 131,
+ .plaintext =
+ "This is a test u"
+ "sing a larger th"
+ "an block-size ke"
+ "y and a larger t"
+ "han block-size d"
+ "ata. The key nee"
+ "ds to be hashed "
+ "before being use"
+ "d by the HMAC al"
+ "gorithm.",
+ .psize = 152,
+ .digest = "\x38\xa4\x56\xa0\x04\xbd\x10\xd3"
+ "\x2c\x9a\xb8\x33\x66\x84\x11\x28"
+ "\x62\xc3\xdb\x61\xad\xcc\xa3\x18"
+ "\x29\x35\x5e\xaf\x46\xfd\x5c\x73"
+ "\xd0\x6a\x1f\x0d\x13\xfe\xc9\xa6"
+ "\x52\xfb\x38\x11\xb5\x77\xb1\xb1"
+ "\xd1\xb9\x78\x9f\x97\xae\x5b\x83"
+ "\xc6\xf4\x4d\xfc\xf1\xd6\x7e\xba",
+ },
+};
+
/*
* Poly1305 test vectors from RFC7539 A.3.
*/