summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablkcipher.c2
-rw-r--r--crypto/akcipher.c34
-rw-r--r--crypto/algapi.c9
-rw-r--r--crypto/algif_aead.c14
-rw-r--r--crypto/algif_skcipher.c81
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/async_tx/async_memcpy.c2
-rw-r--r--crypto/async_tx/async_pq.c4
-rw-r--r--crypto/async_tx/async_raid6_recov.c4
-rw-r--r--crypto/async_tx/async_xor.c4
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--crypto/chacha20poly1305.c8
-rw-r--r--crypto/cryptd.c4
-rw-r--r--crypto/drbg.c6
-rw-r--r--crypto/mcryptd.c8
-rw-r--r--crypto/md5.c6
-rw-r--r--crypto/rsa-pkcs1pad.c628
-rw-r--r--crypto/rsa.c40
-rw-r--r--crypto/sha1_generic.c7
-rw-r--r--crypto/sha256_generic.c16
-rw-r--r--crypto/tcrypt.c2
22 files changed, 783 insertions, 101 deletions
diff --git a/crypto/Makefile b/crypto/Makefile
index f7aba923458d..2acdbbd30475 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
rsa_generic-y += rsaprivkey-asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
+rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
cryptomgr-y := algboss.o testmgr.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index b4ffc5be1a93..e5b5721809e2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
+ walk->iv = req->info;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
walk->iv_buffer = NULL;
- walk->iv = req->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) {
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 120ec042ec9e..def301ed1288 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -21,6 +21,7 @@
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/akcipher.h>
+#include <crypto/internal/akcipher.h>
#include "internal.h"
#ifdef CONFIG_NET
@@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
}
+static void crypto_akcipher_free_instance(struct crypto_instance *inst)
+{
+ struct akcipher_instance *akcipher = akcipher_instance(inst);
+
+ akcipher->free(akcipher);
+}
+
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
+ .free = crypto_akcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
@@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = {
.tfmsize = offsetof(struct crypto_akcipher, base),
};
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
+ u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_akcipher_type;
+ return crypto_grab_spawn(&spawn->base, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
+
struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
u32 mask)
{
@@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
-int crypto_register_akcipher(struct akcipher_alg *alg)
+static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+}
+
+int crypto_register_akcipher(struct akcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ akcipher_prepare_alg(alg);
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_akcipher);
@@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
+int akcipher_register_instance(struct crypto_template *tmpl,
+ struct akcipher_instance *inst)
+{
+ akcipher_prepare_alg(&inst->alg);
+ return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(akcipher_register_instance);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 59bf491fe3d8..7be76aa31579 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
{
struct crypto_spawn *spawn, *n;
- if (list_empty(stack))
+ spawn = list_first_entry_or_null(stack, struct crypto_spawn, list);
+ if (!spawn)
return NULL;
- spawn = list_first_entry(stack, struct crypto_spawn, list);
- n = list_entry(spawn->list.next, struct crypto_spawn, list);
+ n = list_next_entry(spawn, list);
if (spawn->alg && &n->list != stack && !n->alg)
n->alg = (n->list.next == stack) ? alg :
- &list_entry(n->list.next, struct crypto_spawn,
- list)->inst->alg;
+ &list_next_entry(n, list)->inst->alg;
list_move(&spawn->list, secondary_spawns);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 0aa6fdfb448a..147069c9afd0 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -106,7 +106,7 @@ static void aead_wmem_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM |
POLLRDBAND);
@@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
if (flags & MSG_DONTWAIT)
return -EAGAIN;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
for (;;) {
if (signal_pending(current))
@@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
}
finish_wait(sk_sleep(sk), &wait);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}
@@ -157,7 +157,7 @@ static void aead_data_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLRDNORM |
POLLRDBAND);
@@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
while (size) {
- unsigned long len = size;
+ size_t len = size;
struct scatterlist *sg = NULL;
/* use the existing memory in an allocated page */
@@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/* allocate a new page */
len = min_t(unsigned long, size, aead_sndbuf(sk));
while (len) {
- int plen = 0;
+ size_t plen = 0;
if (sgl->cur >= ALG_MAX_PAGES) {
aead_put_sgl(sk);
@@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
sg = sgl->sg + sgl->cur;
- plen = min_t(int, len, PAGE_SIZE);
+ plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg, alloc_page(GFP_KERNEL));
err = -ENOMEM;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index af31a0ee4057..eaa9f9be5b87 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -40,14 +40,14 @@ struct skcipher_ctx {
struct af_alg_completion completion;
atomic_t inflight;
- unsigned used;
+ size_t used;
unsigned int len;
bool more;
bool merge;
bool enc;
- struct ablkcipher_request req;
+ struct skcipher_request req;
};
struct skcipher_async_rsgl {
@@ -64,13 +64,13 @@ struct skcipher_async_req {
};
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
- crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
+ crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
#define GET_REQ_SIZE(ctx) \
- crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
+ crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
#define GET_IV_SIZE(ctx) \
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
+ crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
@@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
return 0;
}
-static void skcipher_pull_sgl(struct sock *sk, int used, int put)
+static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
@@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put)
sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
- int plen = min_t(int, used, sg[i].length);
+ size_t plen = min_t(size_t, used, sg[i].length);
if (!sg_page(sg + i))
continue;
@@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
if (flags & MSG_DONTWAIT)
return -EAGAIN;
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
for (;;) {
if (signal_pending(current))
@@ -238,7 +238,7 @@ static void skcipher_wmem_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM |
POLLRDBAND);
@@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
return -EAGAIN;
}
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
for (;;) {
if (signal_pending(current))
@@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
}
finish_wait(sk_sleep(sk), &wait);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}
@@ -288,7 +288,7 @@ static void skcipher_data_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLRDNORM |
POLLRDBAND);
@@ -302,8 +302,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
- unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+ unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
long copied = 0;
@@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
while (size) {
struct scatterlist *sg;
unsigned long len = size;
- int plen;
+ size_t plen;
if (ctx->merge) {
sgl = list_entry(ctx->tsgl.prev,
@@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
sg_unmark_end(sg + sgl->cur);
do {
i = sgl->cur;
- plen = min_t(int, len, PAGE_SIZE);
+ plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
err = -ENOMEM;
@@ -507,7 +507,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
unsigned int reqlen = sizeof(struct skcipher_async_req) +
@@ -531,9 +531,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
}
sg_init_table(sreq->tsg, tx_nents);
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
- ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
- ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- skcipher_async_cb, sk);
+ skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ skcipher_async_cb, sk);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
@@ -608,10 +608,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
if (mark)
sg_mark_end(sreq->tsg + txbufs - 1);
- ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
- len, sreq->iv);
- err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
+ skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
+ len, sreq->iv);
+ err = ctx->enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
@@ -632,7 +632,7 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
+ unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
@@ -669,14 +669,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
if (!used)
goto free;
- ablkcipher_request_set_crypt(&ctx->req, sg,
- ctx->rsgl.sg, used,
- ctx->iv);
+ skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
+ ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
- crypto_ablkcipher_encrypt(&ctx->req) :
- crypto_ablkcipher_decrypt(&ctx->req),
+ crypto_skcipher_encrypt(&ctx->req) :
+ crypto_skcipher_decrypt(&ctx->req),
&ctx->completion);
free:
@@ -751,17 +750,17 @@ static struct proto_ops algif_skcipher_ops = {
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_ablkcipher(name, type, mask);
+ return crypto_alloc_skcipher(name, type, mask);
}
static void skcipher_release(void *private)
{
- crypto_free_ablkcipher(private);
+ crypto_free_skcipher(private);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_ablkcipher_setkey(private, key, keylen);
+ return crypto_skcipher_setkey(private, key, keylen);
}
static void skcipher_wait(struct sock *sk)
@@ -778,13 +777,13 @@ static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
if (atomic_read(&ctx->inflight))
skcipher_wait(sk);
skcipher_free_sgl(sk);
- sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+ sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
@@ -793,20 +792,20 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
+ unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
@@ -819,9 +818,9 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
- ablkcipher_request_set_tfm(&ctx->req, private);
- ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
+ skcipher_request_set_tfm(&ctx->req, private);
+ skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 9441240f7d2a..004d5fc8e56b 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -13,7 +13,7 @@
#define pr_fmt(fmt) "SIG: "fmt
#include <keys/asymmetric-subtype.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/err.h>
#include <crypto/public_key.h>
#include "asymmetric_keys.h"
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index f8c0b8dbeb75..88bc8e6b2a54 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dmaengine_unmap_data *unmap = NULL;
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 5d355e0c2633..c0748bbd4c08 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
/* XORing P/Q is only implemented in software */
if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
@@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
BUG_ON(disks < 4);
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 934a84981495..8fab6275ea1f 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
u8 *a, *b, *c;
if (dma)
- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) {
struct device *dev = dma->dev;
@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
u8 *d, *s;
if (dma)
- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) {
dma_addr_t dma_dest[2];
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index e1bce26cd4f9..da75777f2b3f 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
BUG_ON(src_cnt <= 1);
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
struct dma_async_tx_descriptor *tx;
@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
BUG_ON(src_cnt <= 1);
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) {
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 11b981492031..8cc1622b2ee0 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
+ walk->iv = desc->info;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
walk->buffer = NULL;
- walk->iv = desc->info;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = blkcipher_copy_iv(walk);
if (err)
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 99c3cce01290..7b6b935cef23 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
+ if (rctx->cryptlen == 0)
+ goto skip;
+
chacha_iv(creq->iv, req, 1);
sg_init_table(rctx->src, 2);
@@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req)
if (err)
return err;
+skip:
return poly_verify_tag(req);
}
@@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
+ if (req->cryptlen == 0)
+ goto skip;
+
chacha_iv(creq->iv, req, 1);
sg_init_table(rctx->src, 2);
@@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req)
if (err)
return err;
+skip:
return poly_genkey(req);
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index c81861b1350b..7921251cdb13 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
@@ -887,8 +888,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ type = crypto_skcipher_type(type);
mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index a7c23146b87f..ab6ef1d08568 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -626,7 +626,7 @@ out:
return len;
}
-static struct drbg_state_ops drbg_ctr_ops = {
+static const struct drbg_state_ops drbg_ctr_ops = {
.update = drbg_ctr_update,
.generate = drbg_ctr_generate,
.crypto_init = drbg_init_sym_kernel,
@@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
return len;
}
-static struct drbg_state_ops drbg_hmac_ops = {
+static const struct drbg_state_ops drbg_hmac_ops = {
.update = drbg_hmac_update,
.generate = drbg_hmac_generate,
.crypto_init = drbg_init_hash_kernel,
@@ -1032,7 +1032,7 @@ out:
* scratchpad usage: as update and generate are used isolated, both
* can use the scratchpad
*/
-static struct drbg_state_ops drbg_hash_ops = {
+static const struct drbg_state_ops drbg_hash_ops = {
.update = drbg_hash_update,
.generate = drbg_hash_generate,
.crypto_init = drbg_init_hash_kernel,
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index fe5b495a434d..f78d4fc4e38a 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void)
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
while (single_task_running()) {
mutex_lock(&flist->lock);
- if (list_empty(&flist->list)) {
- mutex_unlock(&flist->lock);
- return;
- }
- cstate = list_entry(flist->list.next,
+ cstate = list_first_entry_or_null(&flist->list,
struct mcryptd_alg_cstate, flush_list);
- if (!cstate->flusher_engaged) {
+ if (!cstate || !cstate->flusher_engaged) {
mutex_unlock(&flist->lock);
return;
}
diff --git a/crypto/md5.c b/crypto/md5.c
index 33d17e9a8702..2355a7c25c45 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -24,6 +24,12 @@
#include <linux/cryptohash.h>
#include <asm/byteorder.h>
+const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+};
+EXPORT_SYMBOL_GPL(md5_zero_message_hash);
+
/* XXX: this stuff can be optimized */
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
{
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
new file mode 100644
index 000000000000..50f5c97e1087
--- /dev/null
+++ b/crypto/rsa-pkcs1pad.c
@@ -0,0 +1,628 @@
+/*
+ * RSA padding templates.
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/akcipher.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+struct pkcs1pad_ctx {
+ struct crypto_akcipher *child;
+
+ unsigned int key_size;
+};
+
+struct pkcs1pad_request {
+ struct akcipher_request child_req;
+
+ struct scatterlist in_sg[3], out_sg[2];
+ uint8_t *in_buf, *out_buf;
+};
+
+static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int err, size;
+
+ err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
+
+ if (!err) {
+ /* Find out new modulus size from rsa implementation */
+ size = crypto_akcipher_maxsize(ctx->child);
+
+ ctx->key_size = size > 0 ? size : 0;
+ if (size <= 0)
+ err = size;
+ }
+
+ return err;
+}
+
+static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int err, size;
+
+ err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
+
+ if (!err) {
+ /* Find out new modulus size from rsa implementation */
+ size = crypto_akcipher_maxsize(ctx->child);
+
+ ctx->key_size = size > 0 ? size : 0;
+ if (size <= 0)
+ err = size;
+ }
+
+ return err;
+}
+
+static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ /*
+ * The maximum destination buffer size for the encrypt/sign operations
+ * will be the same as for RSA, even though it's smaller for
+ * decrypt/verify.
+ */
+
+ return ctx->key_size ?: -EINVAL;
+}
+
+static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
+ struct scatterlist *next)
+{
+ int nsegs = next ? 1 : 0;
+
+ if (offset_in_page(buf) + len <= PAGE_SIZE) {
+ nsegs += 1;
+ sg_init_table(sg, nsegs);
+ sg_set_buf(sg, buf, len);
+ } else {
+ nsegs += 2;
+ sg_init_table(sg, nsegs);
+ sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
+ sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
+ offset_in_page(buf) + len - PAGE_SIZE);
+ }
+
+ if (next)
+ sg_chain(sg, nsegs, next);
+}
+
+static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
+ size_t chunk_len, pad_left;
+ struct sg_mapping_iter miter;
+
+ if (!err) {
+ if (pad_len) {
+ sg_miter_start(&miter, req->dst,
+ sg_nents_for_len(req->dst, pad_len),
+ SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+ pad_left = pad_len;
+ while (pad_left) {
+ sg_miter_next(&miter);
+
+ chunk_len = min(miter.length, pad_left);
+ memset(miter.addr, 0, chunk_len);
+ pad_left -= chunk_len;
+ }
+
+ sg_miter_stop(&miter);
+ }
+
+ sg_pcopy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, ctx->key_size),
+ req_ctx->out_buf, req_ctx->child_req.dst_len,
+ pad_len);
+ }
+ req->dst_len = ctx->key_size;
+
+ kfree(req_ctx->in_buf);
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_encrypt_sign_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req,
+ pkcs1pad_encrypt_sign_complete(req, err));
+}
+
+static int pkcs1pad_encrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+ unsigned int i, ps_end;
+
+ if (!ctx->key_size)
+ return -EINVAL;
+
+ if (req->src_len > ctx->key_size - 11)
+ return -EOVERFLOW;
+
+ if (req->dst_len < ctx->key_size) {
+ req->dst_len = ctx->key_size;
+ return -EOVERFLOW;
+ }
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /*
+ * Replace both input and output to add the padding in the input and
+ * the potential missing leading zeros in the output.
+ */
+ req_ctx->child_req.src = req_ctx->in_sg;
+ req_ctx->child_req.src_len = ctx->key_size - 1;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size;
+
+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->in_buf)
+ return -ENOMEM;
+
+ ps_end = ctx->key_size - req->src_len - 2;
+ req_ctx->in_buf[0] = 0x02;
+ for (i = 1; i < ps_end; i++)
+ req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
+ req_ctx->in_buf[ps_end] = 0x00;
+
+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+ ctx->key_size - 1 - req->src_len, req->src);
+
+ req_ctx->out_buf = kmalloc(ctx->key_size,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf) {
+ kfree(req_ctx->in_buf);
+ return -ENOMEM;
+ }
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_encrypt_sign_complete_cb, req);
+
+ err = crypto_akcipher_encrypt(&req_ctx->child_req);
+ if (err != -EINPROGRESS &&
+ (err != -EBUSY ||
+ !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return pkcs1pad_encrypt_sign_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ unsigned int pos;
+
+ if (err == -EOVERFLOW)
+ /* Decrypted value had no leading 0 byte */
+ err = -EINVAL;
+
+ if (err)
+ goto done;
+
+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (req_ctx->out_buf[0] != 0x02) {
+ err = -EINVAL;
+ goto done;
+ }
+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
+ if (req_ctx->out_buf[pos] == 0x00)
+ break;
+ if (pos < 9 || pos == req_ctx->child_req.dst_len) {
+ err = -EINVAL;
+ goto done;
+ }
+ pos++;
+
+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
+ err = -EOVERFLOW;
+ req->dst_len = req_ctx->child_req.dst_len - pos;
+
+ if (!err)
+ sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, req->dst_len),
+ req_ctx->out_buf + pos, req->dst_len);
+
+done:
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_decrypt_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
+}
+
+static int pkcs1pad_decrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+
+ if (!ctx->key_size || req->src_len != ctx->key_size)
+ return -EINVAL;
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /* Reuse input buffer, output to a new buffer */
+ req_ctx->child_req.src = req->src;
+ req_ctx->child_req.src_len = req->src_len;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size - 1;
+
+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf)
+ return -ENOMEM;
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size - 1, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_decrypt_complete_cb, req);
+
+ err = crypto_akcipher_decrypt(&req_ctx->child_req);
+ if (err != -EINPROGRESS &&
+ (err != -EBUSY ||
+ !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return pkcs1pad_decrypt_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_sign(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+ unsigned int ps_end;
+
+ if (!ctx->key_size)
+ return -EINVAL;
+
+ if (req->src_len > ctx->key_size - 11)
+ return -EOVERFLOW;
+
+ if (req->dst_len < ctx->key_size) {
+ req->dst_len = ctx->key_size;
+ return -EOVERFLOW;
+ }
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /*
+ * Replace both input and output to add the padding in the input and
+ * the potential missing leading zeros in the output.
+ */
+ req_ctx->child_req.src = req_ctx->in_sg;
+ req_ctx->child_req.src_len = ctx->key_size - 1;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size;
+
+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->in_buf)
+ return -ENOMEM;
+
+ ps_end = ctx->key_size - req->src_len - 2;
+ req_ctx->in_buf[0] = 0x01;
+ memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
+ req_ctx->in_buf[ps_end] = 0x00;
+
+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+ ctx->key_size - 1 - req->src_len, req->src);
+
+ req_ctx->out_buf = kmalloc(ctx->key_size,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf) {
+ kfree(req_ctx->in_buf);
+ return -ENOMEM;
+ }
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_encrypt_sign_complete_cb, req);
+
+ err = crypto_akcipher_sign(&req_ctx->child_req);
+ if (err != -EINPROGRESS &&
+ (err != -EBUSY ||
+ !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return pkcs1pad_encrypt_sign_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ unsigned int pos;
+
+ if (err == -EOVERFLOW)
+ /* Decrypted value had no leading 0 byte */
+ err = -EINVAL;
+
+ if (err)
+ goto done;
+
+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (req_ctx->out_buf[0] != 0x01) {
+ err = -EINVAL;
+ goto done;
+ }
+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
+ if (req_ctx->out_buf[pos] != 0xff)
+ break;
+ if (pos < 9 || pos == req_ctx->child_req.dst_len ||
+ req_ctx->out_buf[pos] != 0x00) {
+ err = -EINVAL;
+ goto done;
+ }
+ pos++;
+
+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
+ err = -EOVERFLOW;
+ req->dst_len = req_ctx->child_req.dst_len - pos;
+
+ if (!err)
+ sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, req->dst_len),
+ req_ctx->out_buf + pos, req->dst_len);
+
+done:
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_verify_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
+}
+
+/*
+ * The verify operation is here for completeness similar to the verification
+ * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
+ * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
+ * retrieve the DigestInfo from a signature, instead the user is expected
+ * to call the sign operation to generate the expected signature and compare
+ * signatures instead of the message-digests.
+ */
+static int pkcs1pad_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+
+ if (!ctx->key_size || req->src_len != ctx->key_size)
+ return -EINVAL;
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /* Reuse input buffer, output to a new buffer */
+ req_ctx->child_req.src = req->src;
+ req_ctx->child_req.src_len = req->src_len;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size - 1;
+
+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf)
+ return -ENOMEM;
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size - 1, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_verify_complete_cb, req);
+
+ err = crypto_akcipher_verify(&req_ctx->child_req);
+ if (err != -EINPROGRESS &&
+ (err != -EBUSY ||
+ !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return pkcs1pad_verify_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct akcipher_instance *inst = akcipher_alg_instance(tfm);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct crypto_akcipher *child_tfm;
+
+ child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst));
+ if (IS_ERR(child_tfm))
+ return PTR_ERR(child_tfm);
+
+ ctx->child = child_tfm;
+
+ return 0;
+}
+
+static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ crypto_free_akcipher(ctx->child);
+}
+
+static void pkcs1pad_free(struct akcipher_instance *inst)
+{
+ struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst);
+
+ crypto_drop_akcipher(spawn);
+
+ kfree(inst);
+}
+
+static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ struct akcipher_instance *inst;
+ struct crypto_akcipher_spawn *spawn;
+ struct akcipher_alg *rsa_alg;
+ const char *rsa_alg_name;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
+ return -EINVAL;
+
+ rsa_alg_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(rsa_alg_name))
+ return PTR_ERR(rsa_alg_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ spawn = akcipher_instance_ctx(inst);
+ crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
+ err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
+ crypto_requires_sync(algt->type, algt->mask));
+ if (err)
+ goto out_free_inst;
+
+ rsa_alg = crypto_spawn_akcipher_alg(spawn);
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME ||
+ snprintf(inst->alg.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_driver_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_drop_alg;
+
+ inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
+ inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
+
+ inst->alg.init = pkcs1pad_init_tfm;
+ inst->alg.exit = pkcs1pad_exit_tfm;
+
+ inst->alg.encrypt = pkcs1pad_encrypt;
+ inst->alg.decrypt = pkcs1pad_decrypt;
+ inst->alg.sign = pkcs1pad_sign;
+ inst->alg.verify = pkcs1pad_verify;
+ inst->alg.set_pub_key = pkcs1pad_set_pub_key;
+ inst->alg.set_priv_key = pkcs1pad_set_priv_key;
+ inst->alg.max_size = pkcs1pad_get_max_size;
+ inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
+
+ inst->free = pkcs1pad_free;
+
+ err = akcipher_register_instance(tmpl, inst);
+ if (err)
+ goto out_drop_alg;
+
+ return 0;
+
+out_drop_alg:
+ crypto_drop_akcipher(spawn);
+out_free_inst:
+ kfree(inst);
+ return err;
+}
+
+struct crypto_template rsa_pkcs1pad_tmpl = {
+ .name = "pkcs1pad",
+ .create = pkcs1pad_create,
+ .module = THIS_MODULE,
+};
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 1093e041db03..77d737f52147 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -13,6 +13,7 @@
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
/*
* RSAEP function [RFC3447 sec 5.1.1]
@@ -91,12 +92,6 @@ static int rsa_enc(struct akcipher_request *req)
goto err_free_c;
}
- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_c;
- }
-
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
@@ -136,12 +131,6 @@ static int rsa_dec(struct akcipher_request *req)
goto err_free_m;
}
- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_m;
- }
-
ret = -ENOMEM;
c = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!c)
@@ -180,12 +169,6 @@ static int rsa_sign(struct akcipher_request *req)
goto err_free_s;
}
- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_s;
- }
-
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
@@ -225,12 +208,6 @@ static int rsa_verify(struct akcipher_request *req)
goto err_free_m;
}
- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_m;
- }
-
ret = -ENOMEM;
s = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!s) {
@@ -339,11 +316,24 @@ static struct akcipher_alg rsa = {
static int rsa_init(void)
{
- return crypto_register_akcipher(&rsa);
+ int err;
+
+ err = crypto_register_akcipher(&rsa);
+ if (err)
+ return err;
+
+ err = crypto_register_template(&rsa_pkcs1pad_tmpl);
+ if (err) {
+ crypto_unregister_akcipher(&rsa);
+ return err;
+ }
+
+ return 0;
}
static void rsa_exit(void)
{
+ crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
}
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 39e3acc438d9..6877cbb9105f 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -26,6 +26,13 @@
#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
+const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
+
static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
int blocks)
{
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 78431163ed3c..8f9c47e1a96e 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -27,6 +27,22 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
+const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+};
+EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
+
+const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
+
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 46a4a757d478..270bc4b82bd9 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
- NULL, 0, 16, 8, aead_speed_template_20);
+ NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212: