From 652d5b8a8da8f05f7fb301067ffeef78b6f2eb01 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Fri, 23 Oct 2015 14:10:36 +0200 Subject: crypto: algif - Change some variable to size_t Some variable are set as int but store only positive values. Furthermore there are used in operation/function that wait for unsigned value. This patch set them as size_t. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- crypto/algif_aead.c | 6 +++--- crypto/algif_skcipher.c | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0aa6fdfb448a..f70bcf844975 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } while (size) { - unsigned long len = size; + size_t len = size; struct scatterlist *sg = NULL; /* use the existing memory in an allocated page */ @@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) /* allocate a new page */ len = min_t(unsigned long, size, aead_sndbuf(sk)); while (len) { - int plen = 0; + size_t plen = 0; if (sgl->cur >= ALG_MAX_PAGES) { aead_put_sgl(sk); @@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } sg = sgl->sg + sgl->cur; - plen = min_t(int, len, PAGE_SIZE); + plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg, alloc_page(GFP_KERNEL)); err = -ENOMEM; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index af31a0ee4057..bbb1b66e969c 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -40,7 +40,7 @@ struct skcipher_ctx { struct af_alg_completion completion; atomic_t inflight; - unsigned used; + size_t used; unsigned int len; bool more; @@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk) return 0; } -static void skcipher_pull_sgl(struct sock *sk, int used, int put) +static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; @@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put) sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { - int plen = min_t(int, used, sg[i].length); + size_t plen = min_t(size_t, used, sg[i].length); if (!sg_page(sg + i)) continue; @@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, while (size) { struct scatterlist *sg; unsigned long len = size; - int plen; + size_t plen; if (ctx->merge) { sgl = list_entry(ctx->tsgl.prev, @@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, sg_unmark_end(sg + sgl->cur); do { i = sgl->cur; - plen = min_t(int, len, PAGE_SIZE); + plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); err = -ENOMEM; -- cgit v1.2.3 From 457e6f73a12bb713cc3eec2c979d707cb5716a07 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Fri, 13 Nov 2015 12:01:33 +0100 Subject: crypto: rsa - only require output buffers as big as needed. rhe RSA operations explicitly left-align the integers being written skipping any leading zero bytes, but still require the output buffers to include just enough space for the integer + the leading zero bytes. Since the size of integer + the leading zero bytes (i.e. the key modulus size) can now be obtained more easily through crypto_akcipher_maxsize change the operations to only require as big a buffer as actually needed if the caller has that information. The semantics for request->dst_len don't change. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu --- crypto/rsa.c | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa.c b/crypto/rsa.c index 1093e041db03..58aad69a490c 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -91,12 +91,6 @@ static int rsa_enc(struct akcipher_request *req) goto err_free_c; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_c; - } - ret = -ENOMEM; m = mpi_read_raw_from_sgl(req->src, req->src_len); if (!m) @@ -136,12 +130,6 @@ static int rsa_dec(struct akcipher_request *req) goto err_free_m; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_m; - } - ret = -ENOMEM; c = mpi_read_raw_from_sgl(req->src, req->src_len); if (!c) @@ -180,12 +168,6 @@ static int rsa_sign(struct akcipher_request *req) goto err_free_s; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_s; - } - ret = -ENOMEM; m = mpi_read_raw_from_sgl(req->src, req->src_len); if (!m) @@ -225,12 +207,6 @@ static int rsa_verify(struct akcipher_request *req) goto err_free_m; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_m; - } - ret = -ENOMEM; s = mpi_read_raw_from_sgl(req->src, req->src_len); if (!s) { -- cgit v1.2.3 From 304e4818d4a45e83019ea30e4cfcb3ac2a8ce09a Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 16 Nov 2015 22:37:14 +0800 Subject: crypto: api - use list_first_entry_or_null and list_next_entry Simplify crypto_more_spawns() with list_first_entry_or_null() and list_next_entry(). Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu --- crypto/algapi.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 59bf491fe3d8..7be76aa31579 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, { struct crypto_spawn *spawn, *n; - if (list_empty(stack)) + spawn = list_first_entry_or_null(stack, struct crypto_spawn, list); + if (!spawn) return NULL; - spawn = list_first_entry(stack, struct crypto_spawn, list); - n = list_entry(spawn->list.next, struct crypto_spawn, list); + n = list_next_entry(spawn, list); if (spawn->alg && &n->list != stack && !n->alg) n->alg = (n->list.next == stack) ? alg : - &list_entry(n->list.next, struct crypto_spawn, - list)->inst->alg; + &list_next_entry(n, list)->inst->alg; list_move(&spawn->list, secondary_spawns); -- cgit v1.2.3 From 08346170d4483d58b8971fe9ff2a1318fd93d121 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 16 Nov 2015 22:37:15 +0800 Subject: crypto: mcryptd - use list_first_entry_or_null() Simplify mcryptd_opportunistic_flush() with list_first_entry_or_null(). Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu --- crypto/mcryptd.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index fe5b495a434d..f78d4fc4e38a 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void) flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); while (single_task_running()) { mutex_lock(&flist->lock); - if (list_empty(&flist->list)) { - mutex_unlock(&flist->lock); - return; - } - cstate = list_entry(flist->list.next, + cstate = list_first_entry_or_null(&flist->list, struct mcryptd_alg_cstate, flush_list); - if (!cstate->flusher_engaged) { + if (!cstate || !cstate->flusher_engaged) { mutex_unlock(&flist->lock); return; } -- cgit v1.2.3 From f18611da8683da19267e30187a191af7fa670206 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Tue, 17 Nov 2015 13:37:10 +0100 Subject: crypto: tcrypt - fix keysize argument of test_aead_speed for gcm(aes) The key sizes used by AES in GCM mode should be 128, 192 or 256 bits (16, 24 or 32 bytes). There is no additional 4byte nonce as for RFC 4106. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 46a4a757d478..270bc4b82bd9 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL, 0, 16, 16, aead_speed_template_20); test_aead_speed("gcm(aes)", ENCRYPT, sec, - NULL, 0, 16, 8, aead_speed_template_20); + NULL, 0, 16, 8, speed_template_16_24_32); break; case 212: -- cgit v1.2.3 From c012a79d0ce95bd8488a5a44cd8c00c275774518 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Wed, 25 Nov 2015 23:48:28 +0600 Subject: crypto: cryptod - use crypto_skcipher_type() for getting skcipher type The provides inline function - crypto_skcipher_type(). Let's use it in the cryptd_alloc_ablkcipher() instead of direct calculation. Signed-off-by: Alexander Kuleshov Signed-off-by: Herbert Xu --- crypto/cryptd.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c81861b1350b..c4af8aa1c304 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -887,8 +887,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-EINVAL); - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - type |= CRYPTO_ALG_TYPE_BLKCIPHER; + type = crypto_skcipher_type(type); mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); tfm = crypto_alloc_base(cryptd_alg_name, type, mask); -- cgit v1.2.3 From 1a07834024dfca5c4bed5de8f8714306e0a11836 Mon Sep 17 00:00:00 2001 From: "Wang, Rui Y" Date: Sun, 29 Nov 2015 22:45:34 +0800 Subject: crypto: cryptd - Assign statesize properly cryptd_create_hash() fails by returning -EINVAL. It is because after 8996eafdc ("crypto: ahash - ensure statesize is non-zero") all ahash drivers must have a non-zero statesize. This patch fixes the problem by properly assigning the statesize. Signed-off-by: Rui Wang Signed-off-by: Herbert Xu --- crypto/cryptd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c4af8aa1c304..7921251cdb13 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.halg.base.cra_flags = type; inst->alg.halg.digestsize = salg->digestsize; + inst->alg.halg.statesize = salg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; -- cgit v1.2.3 From 28a4618ad14cf17009a87d8b5718132a5d4ef852 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 5 Dec 2015 17:09:33 +0100 Subject: crypto: akcipher - add akcipher declarations needed by templates. Add a struct akcipher_instance and struct akcipher_spawn similar to how AEAD declares them and the macros for converting to/from crypto_instance/crypto_spawn. Also add register functions to avoid exposing crypto_akcipher_type. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu --- crypto/akcipher.c | 34 ++++++++++++++++- include/crypto/internal/akcipher.h | 78 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 120ec042ec9e..def301ed1288 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "internal.h" #ifdef CONFIG_NET @@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm) return 0; } +static void crypto_akcipher_free_instance(struct crypto_instance *inst) +{ + struct akcipher_instance *akcipher = akcipher_instance(inst); + + akcipher->free(akcipher); +} + static const struct crypto_type crypto_akcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_akcipher_init_tfm, + .free = crypto_akcipher_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_akcipher_show, #endif @@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = { .tfmsize = offsetof(struct crypto_akcipher, base), }; +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_akcipher_type; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_akcipher); + struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, u32 mask) { @@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); -int crypto_register_akcipher(struct akcipher_alg *alg) +static void akcipher_prepare_alg(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; base->cra_type = &crypto_akcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; +} + +int crypto_register_akcipher(struct akcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + akcipher_prepare_alg(alg); return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_akcipher); @@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst) +{ + akcipher_prepare_alg(&inst->alg); + return crypto_register_instance(tmpl, akcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(akcipher_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic public key cipher type"); diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index 9a2bda15e454..479a0078f0f7 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h @@ -13,6 +13,22 @@ #ifndef _CRYPTO_AKCIPHER_INT_H #define _CRYPTO_AKCIPHER_INT_H #include +#include + +struct akcipher_instance { + void (*free)(struct akcipher_instance *inst); + union { + struct { + char head[offsetof(struct akcipher_alg, base)]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; /* * Transform internal helpers. @@ -38,6 +54,56 @@ static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; } +static inline struct crypto_instance *akcipher_crypto_instance( + struct akcipher_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct akcipher_instance *akcipher_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct akcipher_instance, alg.base); +} + +static inline struct akcipher_instance *akcipher_alg_instance( + struct crypto_akcipher *akcipher) +{ + return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); +} + +static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) +{ + return crypto_instance_ctx(akcipher_crypto_instance(inst)); +} + +static inline void crypto_set_akcipher_spawn( + struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline struct crypto_akcipher *crypto_spawn_akcipher( + struct crypto_akcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct akcipher_alg *crypto_spawn_akcipher_alg( + struct crypto_akcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct akcipher_alg, base); +} + /** * crypto_register_akcipher() -- Register public key algorithm * @@ -57,4 +123,16 @@ int crypto_register_akcipher(struct akcipher_alg *alg); * @alg: algorithm definition */ void crypto_unregister_akcipher(struct akcipher_alg *alg); + +/** + * akcipher_register_instance() -- Unregister public key template instance + * + * Function registers an implementation of an asymmetric key algorithm + * created from a template + * + * @tmpl: the template from which the algorithm was created + * @inst: the template instance + */ +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst); #endif -- cgit v1.2.3 From 3d5b1ecdea6fb94f8c61554fcb2ba776a2d3d0e6 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 5 Dec 2015 17:09:34 +0100 Subject: crypto: rsa - RSA padding algorithm This patch adds PKCS#1 v1.5 standard RSA padding as a separate template. This way an RSA cipher with padding can be obtained by instantiating "pkcs1pad(rsa)". The reason for adding this is that RSA is almost never used without this padding (or OAEP) so it will be needed for either certificate work in the kernel or the userspace, and I also hear that it is likely implemented by hardware RSA in which case hardware implementations of the whole of pkcs1pad(rsa) can be provided. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + crypto/rsa-pkcs1pad.c | 617 ++++++++++++++++++++++++++++++++++++++++++ crypto/rsa.c | 16 +- include/crypto/internal/rsa.h | 2 + 4 files changed, 635 insertions(+), 1 deletion(-) create mode 100644 crypto/rsa-pkcs1pad.c (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index f7aba923458d..2acdbbd30475 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o rsa_generic-y += rsaprivkey-asn1.o rsa_generic-y += rsa.o rsa_generic-y += rsa_helper.o +rsa_generic-y += rsa-pkcs1pad.o obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o cryptomgr-y := algboss.o testmgr.o diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c new file mode 100644 index 000000000000..accc67d16686 --- /dev/null +++ b/crypto/rsa-pkcs1pad.c @@ -0,0 +1,617 @@ +/* + * RSA padding templates. + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + + unsigned int key_size; +}; + +struct pkcs1pad_request { + struct akcipher_request child_req; + + struct scatterlist in_sg[3], out_sg[2]; + uint8_t *in_buf, *out_buf; +}; + +static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + int err, size; + + err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); + + if (!err) { + /* Find out new modulus size from rsa implementation */ + size = crypto_akcipher_maxsize(ctx->child); + + ctx->key_size = size > 0 ? size : 0; + if (size <= 0) + err = size; + } + + return err; +} + +static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + int err, size; + + err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); + + if (!err) { + /* Find out new modulus size from rsa implementation */ + size = crypto_akcipher_maxsize(ctx->child); + + ctx->key_size = size > 0 ? size : 0; + if (size <= 0) + err = size; + } + + return err; +} + +static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + + /* + * The maximum destination buffer size for the encrypt/sign operations + * will be the same as for RSA, even though it's smaller for + * decrypt/verify. + */ + + return ctx->key_size ?: -EINVAL; +} + +static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, + struct scatterlist *next) +{ + int nsegs = next ? 1 : 0; + + if (offset_in_page(buf) + len <= PAGE_SIZE) { + nsegs += 1; + sg_init_table(sg, nsegs); + sg_set_buf(sg, buf, len); + } else { + nsegs += 2; + sg_init_table(sg, nsegs); + sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf)); + sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf), + offset_in_page(buf) + len - PAGE_SIZE); + } + + if (next) + sg_chain(sg, nsegs, next); +} + +static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len]; + + if (!err) { + if (req_ctx->child_req.dst_len < ctx->key_size) { + memset(zeros, 0, sizeof(zeros)); + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, + sizeof(zeros)), + zeros, sizeof(zeros)); + } + + sg_pcopy_from_buffer(req->dst, + sg_nents_for_len(req->dst, ctx->key_size), + req_ctx->out_buf, req_ctx->child_req.dst_len, + sizeof(zeros)); + } + req->dst_len = ctx->key_size; + + kfree(req_ctx->in_buf); + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_encrypt_sign_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, + pkcs1pad_encrypt_sign_complete(req, err)); +} + +static int pkcs1pad_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + unsigned int i, ps_end; + + if (!ctx->key_size) + return -EINVAL; + + if (req->src_len > ctx->key_size - 11) + return -EOVERFLOW; + + if (req->dst_len < ctx->key_size) { + req->dst_len = ctx->key_size; + return -EOVERFLOW; + } + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* + * Replace both input and output to add the padding in the input and + * the potential missing leading zeros in the output. + */ + req_ctx->child_req.src = req_ctx->in_sg; + req_ctx->child_req.src_len = ctx->key_size - 1; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size; + + req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->in_buf) + return -ENOMEM; + + ps_end = ctx->key_size - req->src_len - 2; + req_ctx->in_buf[0] = 0x02; + for (i = 1; i < ps_end; i++) + req_ctx->in_buf[i] = 1 + prandom_u32_max(255); + req_ctx->in_buf[ps_end] = 0x00; + + pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, + ctx->key_size - 1 - req->src_len, req->src); + + req_ctx->out_buf = kmalloc(ctx->key_size, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) { + kfree(req_ctx->in_buf); + return -ENOMEM; + } + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_encrypt_sign_complete_cb, req); + + err = crypto_akcipher_encrypt(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_encrypt_sign_complete(req, err); + + return err; +} + +static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + unsigned int pos; + + if (err == -EOVERFLOW) + /* Decrypted value had no leading 0 byte */ + err = -EINVAL; + + if (err) + goto done; + + if (req_ctx->child_req.dst_len != ctx->key_size - 1) { + err = -EINVAL; + goto done; + } + + if (req_ctx->out_buf[0] != 0x02) { + err = -EINVAL; + goto done; + } + for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) + if (req_ctx->out_buf[pos] == 0x00) + break; + if (pos < 9 || pos == req_ctx->child_req.dst_len) { + err = -EINVAL; + goto done; + } + pos++; + + if (req->dst_len < req_ctx->child_req.dst_len - pos) + err = -EOVERFLOW; + req->dst_len = req_ctx->child_req.dst_len - pos; + + if (!err) + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + req_ctx->out_buf + pos, req->dst_len); + +done: + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_decrypt_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); +} + +static int pkcs1pad_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + + if (!ctx->key_size || req->src_len != ctx->key_size) + return -EINVAL; + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* Reuse input buffer, output to a new buffer */ + req_ctx->child_req.src = req->src; + req_ctx->child_req.src_len = req->src_len; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size - 1; + + req_ctx->out_buf = kmalloc(ctx->key_size - 1, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) + return -ENOMEM; + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size - 1, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_decrypt_complete_cb, req); + + err = crypto_akcipher_decrypt(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_decrypt_complete(req, err); + + return err; +} + +static int pkcs1pad_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + unsigned int ps_end; + + if (!ctx->key_size) + return -EINVAL; + + if (req->src_len > ctx->key_size - 11) + return -EOVERFLOW; + + if (req->dst_len < ctx->key_size) { + req->dst_len = ctx->key_size; + return -EOVERFLOW; + } + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* + * Replace both input and output to add the padding in the input and + * the potential missing leading zeros in the output. + */ + req_ctx->child_req.src = req_ctx->in_sg; + req_ctx->child_req.src_len = ctx->key_size - 1; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size; + + req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->in_buf) + return -ENOMEM; + + ps_end = ctx->key_size - req->src_len - 2; + req_ctx->in_buf[0] = 0x01; + memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); + req_ctx->in_buf[ps_end] = 0x00; + + pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, + ctx->key_size - 1 - req->src_len, req->src); + + req_ctx->out_buf = kmalloc(ctx->key_size, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) { + kfree(req_ctx->in_buf); + return -ENOMEM; + } + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_encrypt_sign_complete_cb, req); + + err = crypto_akcipher_sign(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_encrypt_sign_complete(req, err); + + return err; +} + +static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + unsigned int pos; + + if (err == -EOVERFLOW) + /* Decrypted value had no leading 0 byte */ + err = -EINVAL; + + if (err) + goto done; + + if (req_ctx->child_req.dst_len != ctx->key_size - 1) { + err = -EINVAL; + goto done; + } + + if (req_ctx->out_buf[0] != 0x01) { + err = -EINVAL; + goto done; + } + for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) + if (req_ctx->out_buf[pos] != 0xff) + break; + if (pos < 9 || pos == req_ctx->child_req.dst_len || + req_ctx->out_buf[pos] != 0x00) { + err = -EINVAL; + goto done; + } + pos++; + + if (req->dst_len < req_ctx->child_req.dst_len - pos) + err = -EOVERFLOW; + req->dst_len = req_ctx->child_req.dst_len - pos; + + if (!err) + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + req_ctx->out_buf + pos, req->dst_len); + +done: + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_verify_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); +} + +/* + * The verify operation is here for completeness similar to the verification + * defined in RFC2313 section 10.2 except that block type 0 is not accepted, + * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to + * retrieve the DigestInfo from a signature, instead the user is expected + * to call the sign operation to generate the expected signature and compare + * signatures instead of the message-digests. + */ +static int pkcs1pad_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + + if (!ctx->key_size || req->src_len != ctx->key_size) + return -EINVAL; + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* Reuse input buffer, output to a new buffer */ + req_ctx->child_req.src = req->src; + req_ctx->child_req.src_len = req->src_len; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size - 1; + + req_ctx->out_buf = kmalloc(ctx->key_size - 1, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) + return -ENOMEM; + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size - 1, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_verify_complete_cb, req); + + err = crypto_akcipher_verify(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_verify_complete(req, err); + + return err; +} + +static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) +{ + struct akcipher_instance *inst = akcipher_alg_instance(tfm); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct crypto_akcipher *child_tfm; + + child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst)); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + + crypto_free_akcipher(ctx->child); +} + +static void pkcs1pad_free(struct akcipher_instance *inst) +{ + struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst); + + crypto_drop_akcipher(spawn); + + kfree(inst); +} + +static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct akcipher_instance *inst; + struct crypto_akcipher_spawn *spawn; + struct akcipher_alg *rsa_alg; + const char *rsa_alg_name; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) + return -EINVAL; + + rsa_alg_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(rsa_alg_name)) + return PTR_ERR(rsa_alg_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = akcipher_instance_ctx(inst); + crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); + err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, + crypto_requires_sync(algt->type, algt->mask)); + if (err) + goto out_free_inst; + + rsa_alg = crypto_spawn_akcipher_alg(spawn); + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_name) >= + CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = rsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); + + inst->alg.init = pkcs1pad_init_tfm; + inst->alg.exit = pkcs1pad_exit_tfm; + + inst->alg.encrypt = pkcs1pad_encrypt; + inst->alg.decrypt = pkcs1pad_decrypt; + inst->alg.sign = pkcs1pad_sign; + inst->alg.verify = pkcs1pad_verify; + inst->alg.set_pub_key = pkcs1pad_set_pub_key; + inst->alg.set_priv_key = pkcs1pad_set_priv_key; + inst->alg.max_size = pkcs1pad_get_max_size; + inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize; + + inst->free = pkcs1pad_free; + + err = akcipher_register_instance(tmpl, inst); + if (err) + goto out_drop_alg; + + return 0; + +out_drop_alg: + crypto_drop_akcipher(spawn); +out_free_inst: + kfree(inst); + return err; +} + +struct crypto_template rsa_pkcs1pad_tmpl = { + .name = "pkcs1pad", + .create = pkcs1pad_create, + .module = THIS_MODULE, +}; diff --git a/crypto/rsa.c b/crypto/rsa.c index 58aad69a490c..77d737f52147 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -13,6 +13,7 @@ #include #include #include +#include /* * RSAEP function [RFC3447 sec 5.1.1] @@ -315,11 +316,24 @@ static struct akcipher_alg rsa = { static int rsa_init(void) { - return crypto_register_akcipher(&rsa); + int err; + + err = crypto_register_akcipher(&rsa); + if (err) + return err; + + err = crypto_register_template(&rsa_pkcs1pad_tmpl); + if (err) { + crypto_unregister_akcipher(&rsa); + return err; + } + + return 0; } static void rsa_exit(void) { + crypto_unregister_template(&rsa_pkcs1pad_tmpl); crypto_unregister_akcipher(&rsa); } diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index f997e2d29b5a..c7585bdecbc2 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -27,4 +27,6 @@ int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len); void rsa_free_key(struct rsa_key *rsa_key); + +extern struct crypto_template rsa_pkcs1pad_tmpl; #endif -- cgit v1.2.3 From 161151d79ff4f7ed35d4ebb0eb7727a517c34ef2 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 6 Dec 2015 02:51:38 +0100 Subject: crypto: chacha20poly1305 - Skip encryption/decryption for 0-len If the length of the plaintext is zero, there's no need to waste cycles on encryption and decryption. Using the chacha20poly1305 construction for zero-length plaintexts is a common way of using a shared encryption key for AAD authentication. Signed-off-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'crypto') diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 99c3cce01290..7b6b935cef23 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; + if (rctx->cryptlen == 0) + goto skip; + chacha_iv(creq->iv, req, 1); sg_init_table(rctx->src, 2); @@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req) if (err) return err; +skip: return poly_verify_tag(req); } @@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; + if (req->cryptlen == 0) + goto skip; + chacha_iv(creq->iv, req, 1); sg_init_table(rctx->src, 2); @@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req) if (err) return err; +skip: return poly_genkey(req); } -- cgit v1.2.3 From e4bc02aced3731776c8828d34e13c02ebdec3088 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 7 Dec 2015 21:36:57 +0100 Subject: crypto: drbg - constify drbg_state_ops structures The drbg_state_ops structures are never modified, so declare them as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- crypto/drbg.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/drbg.c b/crypto/drbg.c index a7c23146b87f..ab6ef1d08568 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -626,7 +626,7 @@ out: return len; } -static struct drbg_state_ops drbg_ctr_ops = { +static const struct drbg_state_ops drbg_ctr_ops = { .update = drbg_ctr_update, .generate = drbg_ctr_generate, .crypto_init = drbg_init_sym_kernel, @@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg, return len; } -static struct drbg_state_ops drbg_hmac_ops = { +static const struct drbg_state_ops drbg_hmac_ops = { .update = drbg_hmac_update, .generate = drbg_hmac_generate, .crypto_init = drbg_init_hash_kernel, @@ -1032,7 +1032,7 @@ out: * scratchpad usage: as update and generate are used isolated, both * can use the scratchpad */ -static struct drbg_state_ops drbg_hash_ops = { +static const struct drbg_state_ops drbg_hash_ops = { .update = drbg_hash_update, .generate = drbg_hash_generate, .crypto_init = drbg_init_hash_kernel, -- cgit v1.2.3 From 1f6a9ab05ab500a033c1d5490c3a6bd993bfd602 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 9 Dec 2015 15:05:28 -0500 Subject: crypto: asymmetric_keys - signature.c does not need This file does not contain any modular related function calls. So get rid of module.h since it drags in a lot of other headers and adds to the preprocessing load. It does export some symbols though, so we'll need to ensure it has export.h present instead. Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Paul Gortmaker Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/signature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 9441240f7d2a..004d5fc8e56b 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -13,7 +13,7 @@ #define pr_fmt(fmt) "SIG: "fmt #include -#include +#include #include #include #include "asymmetric_keys.h" -- cgit v1.2.3 From 5319216dcfee14886abb2b7090e8fcf2e2d8a611 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 12 Dec 2015 00:03:51 -0500 Subject: crypto: rsa-pkcs1pad - don't allocate buffer on stack Avoid the s390 compile "warning: 'pkcs1pad_encrypt_sign_complete' uses dynamic stack allocation" reported by kbuild test robot. Don't use a flat zero-filled buffer, instead zero the contents of the SGL. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index accc67d16686..50f5c97e1087 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -110,21 +110,32 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len]; + size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; + size_t chunk_len, pad_left; + struct sg_mapping_iter miter; if (!err) { - if (req_ctx->child_req.dst_len < ctx->key_size) { - memset(zeros, 0, sizeof(zeros)); - sg_copy_from_buffer(req->dst, - sg_nents_for_len(req->dst, - sizeof(zeros)), - zeros, sizeof(zeros)); + if (pad_len) { + sg_miter_start(&miter, req->dst, + sg_nents_for_len(req->dst, pad_len), + SG_MITER_ATOMIC | SG_MITER_TO_SG); + + pad_left = pad_len; + while (pad_left) { + sg_miter_next(&miter); + + chunk_len = min(miter.length, pad_left); + memset(miter.addr, 0, chunk_len); + pad_left -= chunk_len; + } + + sg_miter_stop(&miter); } sg_pcopy_from_buffer(req->dst, sg_nents_for_len(req->dst, ctx->key_size), req_ctx->out_buf, req_ctx->child_req.dst_len, - sizeof(zeros)); + pad_len); } req->dst_len = ctx->key_size; -- cgit v1.2.3 From 0c4c78de0417ced1da92351a3013e631860ea576 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 17 Dec 2015 13:45:39 +0100 Subject: crypto: hash - add zero length message hash for shax and md5 Some crypto drivers cannot process empty data message and return a precalculated hash for md5/sha1/sha224/sha256. This patch add thoses precalculated hash in include/crypto. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- crypto/md5.c | 6 ++++++ crypto/sha1_generic.c | 7 +++++++ crypto/sha256_generic.c | 16 ++++++++++++++++ include/crypto/md5.h | 2 ++ include/crypto/sha.h | 6 ++++++ 5 files changed, 37 insertions(+) (limited to 'crypto') diff --git a/crypto/md5.c b/crypto/md5.c index 33d17e9a8702..2355a7c25c45 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -24,6 +24,12 @@ #include #include +const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { + 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, + 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, +}; +EXPORT_SYMBOL_GPL(md5_zero_message_hash); + /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 39e3acc438d9..6877cbb9105f 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -26,6 +26,13 @@ #include #include +const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09 +}; +EXPORT_SYMBOL_GPL(sha1_zero_message_hash); + static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, int blocks) { diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 78431163ed3c..8f9c47e1a96e 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -27,6 +27,22 @@ #include #include +const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +EXPORT_SYMBOL_GPL(sha224_zero_message_hash); + +const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +EXPORT_SYMBOL_GPL(sha256_zero_message_hash); + static inline u32 Ch(u32 x, u32 y, u32 z) { return z ^ (x & (y ^ z)); diff --git a/include/crypto/md5.h b/include/crypto/md5.h index 146af825eedb..327deac963c0 100644 --- a/include/crypto/md5.h +++ b/include/crypto/md5.h @@ -13,6 +13,8 @@ #define MD5_H2 0x98badcfeUL #define MD5_H3 0x10325476UL +extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; + struct md5_state { u32 hash[MD5_HASH_WORDS]; u32 block[MD5_BLOCK_WORDS]; diff --git a/include/crypto/sha.h b/include/crypto/sha.h index dd7905a3c22e..c94d3eb1cefd 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -64,6 +64,12 @@ #define SHA512_H6 0x1f83d9abfb41bd6bULL #define SHA512_H7 0x5be0cd19137e2179ULL +extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE]; + +extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; + +extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; + struct sha1_state { u32 state[SHA1_DIGEST_SIZE / 4]; u64 count; -- cgit v1.2.3