diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-10 09:47:13 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-10 09:47:13 -0800 |
commit | 045169816b31b10faed984b01c390db1b32ee4c1 (patch) | |
tree | ecb1d0c763f3d8cb0af749717b66ac47e6981d33 /crypto | |
parent | cd6628953e4216b65e7d91ab70ff8e5b65c9fde9 (diff) | |
parent | 678b5c6b22fed89a13d5b2267f423069a9b11c80 (diff) | |
download | linux-045169816b31b10faed984b01c390db1b32ee4c1.tar.bz2 |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu:
"This fixes the following issues:
- Fix pointer size when caam is used with AArch64 boot loader on
AArch32 kernel.
- Fix ahash state corruption in marvell driver.
- Fix buggy algif_aed tag handling.
- Prevent mcryptd from being used with incompatible algorithms which
can cause crashes"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
crypto: algif_aead - fix uninitialized variable warning
crypto: mcryptd - Check mcryptd algorithm compatibility
crypto: algif_aead - fix AEAD tag memory handling
crypto: caam - fix pointer size for AArch64 boot loader, AArch32 kernel
crypto: marvell - Don't corrupt state of an STD req for re-stepped ahash
crypto: marvell - Don't copy hash operation twice into the SRAM
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/algif_aead.c | 59 | ||||
-rw-r--r-- | crypto/mcryptd.c | 19 |
2 files changed, 49 insertions, 29 deletions
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 80a0f1a78551..e9c0993b131d 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -81,7 +81,11 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx) { unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); - return ctx->used >= ctx->aead_assoclen + as; + /* + * The minimum amount of memory needed for an AEAD cipher is + * the AAD and in case of decryption the tag. + */ + return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); } static void aead_reset_ctx(struct aead_ctx *ctx) @@ -416,7 +420,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, unsigned int i, reqlen = GET_REQ_SIZE(tfm); int err = -ENOMEM; unsigned long used; - size_t outlen; + size_t outlen = 0; size_t usedpages = 0; lock_sock(sk); @@ -426,12 +430,15 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, goto unlock; } - used = ctx->used; - outlen = used; - if (!aead_sufficient_data(ctx)) goto unlock; + used = ctx->used; + if (ctx->enc) + outlen = used + as; + else + outlen = used - as; + req = sock_kmalloc(sk, reqlen, GFP_KERNEL); if (unlikely(!req)) goto unlock; @@ -445,7 +452,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, aead_request_set_ad(req, ctx->aead_assoclen); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, aead_async_cb, sk); - used -= ctx->aead_assoclen + (ctx->enc ? as : 0); + used -= ctx->aead_assoclen; /* take over all tx sgls from ctx */ areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, @@ -461,7 +468,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, areq->tsgls = sgl->cur; /* create rx sgls */ - while (iov_iter_count(&msg->msg_iter)) { + while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), (outlen - usedpages)); @@ -491,16 +498,14 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, last_rsgl = rsgl; - /* we do not need more iovecs as we have sufficient memory */ - if (outlen <= usedpages) - break; - iov_iter_advance(&msg->msg_iter, err); } - err = -EINVAL; + /* ensure output buffer is sufficiently large */ - if (usedpages < outlen) - goto free; + if (usedpages < outlen) { + err = -EINVAL; + goto unlock; + } aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, areq->iv); @@ -571,6 +576,7 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) goto unlock; } + /* data length provided by caller via sendmsg/sendpage */ used = ctx->used; /* @@ -585,16 +591,27 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) if (!aead_sufficient_data(ctx)) goto unlock; - outlen = used; + /* + * Calculate the minimum output buffer size holding the result of the + * cipher operation. When encrypting data, the receiving buffer is + * larger by the tag length compared to the input buffer as the + * encryption operation generates the tag. For decryption, the input + * buffer provides the tag which is consumed resulting in only the + * plaintext without a buffer for the tag returned to the caller. + */ + if (ctx->enc) + outlen = used + as; + else + outlen = used - as; /* * The cipher operation input data is reduced by the associated data * length as this data is processed separately later on. */ - used -= ctx->aead_assoclen + (ctx->enc ? as : 0); + used -= ctx->aead_assoclen; /* convert iovecs of output buffers into scatterlists */ - while (iov_iter_count(&msg->msg_iter)) { + while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), (outlen - usedpages)); @@ -621,16 +638,14 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) last_rsgl = rsgl; - /* we do not need more iovecs as we have sufficient memory */ - if (outlen <= usedpages) - break; iov_iter_advance(&msg->msg_iter, err); } - err = -EINVAL; /* ensure output buffer is sufficiently large */ - if (usedpages < outlen) + if (usedpages < outlen) { + err = -EINVAL; goto unlock; + } sg_mark_end(sgl->sg + sgl->cur - 1); aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 94ee44acd465..c207458d6299 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -254,18 +254,22 @@ out_free_inst: goto out; } -static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type, +static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type, u32 *mask) { struct crypto_attr_type *algt; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) - return; - if ((algt->type & CRYPTO_ALG_INTERNAL)) - *type |= CRYPTO_ALG_INTERNAL; - if ((algt->mask & CRYPTO_ALG_INTERNAL)) - *mask |= CRYPTO_ALG_INTERNAL; + return false; + + *type |= algt->type & CRYPTO_ALG_INTERNAL; + *mask |= algt->mask & CRYPTO_ALG_INTERNAL; + + if (*type & *mask & CRYPTO_ALG_INTERNAL) + return true; + else + return false; } static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) @@ -492,7 +496,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, u32 mask = 0; int err; - mcryptd_check_internal(tb, &type, &mask); + if (!mcryptd_check_internal(tb, &type, &mask)) + return -EINVAL; halg = ahash_attr_alg(tb[1], type, mask); if (IS_ERR(halg)) |