summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 14:46:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 14:46:51 -0700
commit2a8ba8f032160552a3beffab8aae9019ff477504 (patch)
treeb50f70a3c8f7c2e179e1587d33ea3542d68525f9 /crypto
parentec2a7587e0a91d5c1afe23a0a73edfce06c5e4e0 (diff)
parente954bc91bdd4bb08b8325478c5004b24a23a3522 (diff)
downloadlinux-2a8ba8f032160552a3beffab8aae9019ff477504.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (46 commits) random: simplify fips mode crypto: authenc - Fix cryptlen calculation crypto: talitos - add support for sha224 crypto: talitos - add hash algorithms crypto: talitos - second prepare step for adding ahash algorithms crypto: talitos - prepare for adding ahash algorithms crypto: n2 - Add Niagara2 crypto driver crypto: skcipher - Add ablkcipher_walk interfaces crypto: testmgr - Add testing for async hashing and update/final crypto: tcrypt - Add speed tests for async hashing crypto: scatterwalk - Fix scatterwalk_done() test crypto: hifn_795x - Rename ablkcipher_walk to hifn_cipher_walk padata: Use get_online_cpus/put_online_cpus in padata_free padata: Add some code comments padata: Flush the padata queues actively padata: Use a timer to handle remaining objects in the reorder queues crypto: shash - Remove usage of CRYPTO_MINALIGN crypto: mv_cesa - Use resource_size crypto: omap - OMAP macros corrected padata: Use get_online_cpus/put_online_cpus ... Fix up conflicts in arch/arm/mach-omap2/devices.c
Diffstat (limited to 'crypto')
-rw-r--r--crypto/ablkcipher.c277
-rw-r--r--crypto/algapi.c2
-rw-r--r--crypto/authenc.c8
-rw-r--r--crypto/internal.h2
-rw-r--r--crypto/pcrypt.c11
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/shash.c2
-rw-r--r--crypto/tcrypt.c343
-rw-r--r--crypto/tcrypt.h29
-rw-r--r--crypto/testmgr.c66
-rw-r--r--crypto/testmgr.h64
-rw-r--r--crypto/vmac.c75
12 files changed, 801 insertions, 80 deletions
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index fe980dae1727..98a66103f4f2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -24,10 +24,287 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <crypto/scatterwalk.h>
+
#include "internal.h"
static const char *skcipher_default_geniv __read_mostly;
+struct ablkcipher_buffer {
+ struct list_head entry;
+ struct scatter_walk dst;
+ unsigned int len;
+ void *data;
+};
+
+enum {
+ ABLKCIPHER_WALK_SLOW = 1 << 0,
+};
+
+static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
+{
+ scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
+}
+
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+ struct ablkcipher_buffer *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
+ ablkcipher_buffer_write(p);
+ list_del(&p->entry);
+ kfree(p);
+ }
+}
+EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
+
+static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
+ struct ablkcipher_buffer *p)
+{
+ p->dst = walk->out;
+ list_add_tail(&p->entry, &walk->buffers);
+}
+
+/* Get a spot of the specified length that does not straddle a page.
+ * The caller needs to ensure that there is enough space for this operation.
+ */
+static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
+{
+ u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+ return max(start, end_page);
+}
+
+static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+ unsigned int bsize)
+{
+ unsigned int n = bsize;
+
+ for (;;) {
+ unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
+
+ if (len_this_page > n)
+ len_this_page = n;
+ scatterwalk_advance(&walk->out, n);
+ if (n == len_this_page)
+ break;
+ n -= len_this_page;
+ scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
+ }
+
+ return bsize;
+}
+
+static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
+ unsigned int n)
+{
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+
+ return n;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk);
+
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk, int err)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int nbytes = 0;
+
+ if (likely(err >= 0)) {
+ unsigned int n = walk->nbytes - err;
+
+ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
+ n = ablkcipher_done_fast(walk, n);
+ else if (WARN_ON(err)) {
+ err = -EINVAL;
+ goto err;
+ } else
+ n = ablkcipher_done_slow(walk, n);
+
+ nbytes = walk->total - n;
+ err = 0;
+ }
+
+ scatterwalk_done(&walk->in, 0, nbytes);
+ scatterwalk_done(&walk->out, 1, nbytes);
+
+err:
+ walk->total = nbytes;
+ walk->nbytes = nbytes;
+
+ if (nbytes) {
+ crypto_yield(req->base.flags);
+ return ablkcipher_walk_next(req, walk);
+ }
+
+ if (walk->iv != req->info)
+ memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
+ if (walk->iv_buffer)
+ kfree(walk->iv_buffer);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
+
+static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk,
+ unsigned int bsize,
+ unsigned int alignmask,
+ void **src_p, void **dst_p)
+{
+ unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
+ struct ablkcipher_buffer *p;
+ void *src, *dst, *base;
+ unsigned int n;
+
+ n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
+ n += (aligned_bsize * 3 - (alignmask + 1) +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
+
+ p = kmalloc(n, GFP_ATOMIC);
+ if (!p)
+ ablkcipher_walk_done(req, walk, -ENOMEM);
+
+ base = p + 1;
+
+ dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
+ src = dst = ablkcipher_get_spot(dst, bsize);
+
+ p->len = bsize;
+ p->data = dst;
+
+ scatterwalk_copychunks(src, &walk->in, bsize, 0);
+
+ ablkcipher_queue_write(walk, p);
+
+ walk->nbytes = bsize;
+ walk->flags |= ABLKCIPHER_WALK_SLOW;
+
+ *src_p = src;
+ *dst_p = dst;
+
+ return 0;
+}
+
+static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
+ struct crypto_tfm *tfm,
+ unsigned int alignmask)
+{
+ unsigned bs = walk->blocksize;
+ unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
+ unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+ unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+ (alignmask + 1);
+ u8 *iv;
+
+ size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+ walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
+ if (!walk->iv_buffer)
+ return -ENOMEM;
+
+ iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
+ iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = ablkcipher_get_spot(iv, ivsize);
+
+ walk->iv = memcpy(iv, walk->iv, ivsize);
+ return 0;
+}
+
+static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ walk->src.page = scatterwalk_page(&walk->in);
+ walk->src.offset = offset_in_page(walk->in.offset);
+ walk->dst.page = scatterwalk_page(&walk->out);
+ walk->dst.offset = offset_in_page(walk->out.offset);
+
+ return 0;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int alignmask, bsize, n;
+ void *src, *dst;
+ int err;
+
+ alignmask = crypto_tfm_alg_alignmask(tfm);
+ n = walk->total;
+ if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
+ req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+ return ablkcipher_walk_done(req, walk, -EINVAL);
+ }
+
+ walk->flags &= ~ABLKCIPHER_WALK_SLOW;
+ src = dst = NULL;
+
+ bsize = min(walk->blocksize, n);
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (n < bsize ||
+ !scatterwalk_aligned(&walk->in, alignmask) ||
+ !scatterwalk_aligned(&walk->out, alignmask)) {
+ err = ablkcipher_next_slow(req, walk, bsize, alignmask,
+ &src, &dst);
+ goto set_phys_lowmem;
+ }
+
+ walk->nbytes = n;
+
+ return ablkcipher_next_fast(req, walk);
+
+set_phys_lowmem:
+ if (err >= 0) {
+ walk->src.page = virt_to_page(src);
+ walk->dst.page = virt_to_page(dst);
+ walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
+ walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
+ }
+
+ return err;
+}
+
+static int ablkcipher_walk_first(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int alignmask;
+
+ alignmask = crypto_tfm_alg_alignmask(tfm);
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
+ walk->nbytes = walk->total;
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->iv_buffer = NULL;
+ walk->iv = req->info;
+ if (unlikely(((unsigned long)walk->iv & alignmask))) {
+ int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+ if (err)
+ return err;
+ }
+
+ scatterwalk_start(&walk->in, walk->in.sg);
+ scatterwalk_start(&walk->out, walk->out.sg);
+
+ return ablkcipher_walk_next(req, walk);
+}
+
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
+ return ablkcipher_walk_first(req, walk);
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
+
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 76fae27ed01c..c3cf1a69a47a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -544,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
{
int err = -EINVAL;
- if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
+ if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
goto out;
spawn->frontend = frontend;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 05eb32e0d949..b9884ee0adb6 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -181,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
@@ -196,6 +197,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
goto out;
authsize = crypto_aead_authsize(authenc);
+ cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
@@ -209,7 +211,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
@@ -228,11 +230,13 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
authsize = crypto_aead_authsize(authenc);
+ cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
@@ -246,7 +250,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
diff --git a/crypto/internal.h b/crypto/internal.h
index 2d226362e594..d4384b08ab29 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -6,7 +6,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 80201241b698..247178cb98ec 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -315,16 +315,13 @@ out_free_inst:
goto out;
}
-static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
+static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
+ u32 type, u32 mask)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- alg = crypto_get_attr_alg(tb, algt->type,
- (algt->mask & CRYPTO_ALG_TYPE_MASK));
+ alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
if (IS_ERR(alg))
return ERR_CAST(alg);
@@ -365,7 +362,7 @@ static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- return pcrypt_alloc_aead(tb);
+ return pcrypt_alloc_aead(tb, algt->type, algt->mask);
}
return ERR_PTR(-EINVAL);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 3de89a424401..41e529af0773 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
- if (!offset_in_page(walk->offset) || !more)
+ if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
scatterwalk_pagedone(walk, out, more);
}
EXPORT_SYMBOL_GPL(scatterwalk_done);
diff --git a/crypto/shash.c b/crypto/shash.c
index 91f7b9d83881..22fd9433141f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
u8 *buffer, *alignbuffer;
int err;
- absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1));
+ absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a35159947a26..3ca68f9fc14d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -394,6 +394,17 @@ out:
return 0;
}
+static void test_hash_sg_init(struct scatterlist *sg)
+{
+ int i;
+
+ sg_init_table(sg, TVMEMSIZE);
+ for (i = 0; i < TVMEMSIZE; i++) {
+ sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
+ memset(tvmem[i], 0xff, PAGE_SIZE);
+ }
+}
+
static void test_hash_speed(const char *algo, unsigned int sec,
struct hash_speed *speed)
{
@@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
- sg_init_table(sg, TVMEMSIZE);
- for (i = 0; i < TVMEMSIZE; i++) {
- sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
- memset(tvmem[i], 0xff, PAGE_SIZE);
- }
-
+ test_hash_sg_init(sg);
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
printk(KERN_ERR
@@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
+ if (speed[i].klen)
+ crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
+
printk(KERN_INFO "test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -458,6 +467,250 @@ out:
crypto_free_hash(tfm);
}
+struct tcrypt_result {
+ struct completion completion;
+ int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+ struct tcrypt_result *res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+static inline int do_one_ahash_op(struct ahash_request *req, int ret)
+{
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ struct tcrypt_result *tr = req->base.data;
+
+ ret = wait_for_completion_interruptible(&tr->completion);
+ if (!ret)
+ ret = tr->err;
+ INIT_COMPLETION(tr->completion);
+ }
+ return ret;
+}
+
+static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
+ char *out, int sec)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ return ret;
+ }
+
+ printk("%6u opers/sec, %9lu bytes/sec\n",
+ bcount / sec, ((long)bcount * blen) / sec);
+
+ return 0;
+}
+
+static int test_ahash_jiffies(struct ahash_request *req, int blen,
+ int plen, char *out, int sec)
+{
+ unsigned long start, end;
+ int bcount, pcount;
+ int ret;
+
+ if (plen == blen)
+ return test_ahash_jiffies_digest(req, blen, out, sec);
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = crypto_ahash_init(req);
+ if (ret)
+ return ret;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ return ret;
+ }
+ /* we assume there is enough space in 'out' for the result */
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%6u opers/sec, %9lu bytes/sec\n",
+ bcount / sec, ((long)bcount * blen) / sec);
+
+ return 0;
+}
+
+static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
+ char *out)
+{
+ unsigned long cycles = 0;
+ int ret, i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ goto out;
+
+ end = get_cycles();
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret)
+ return ret;
+
+ pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+ cycles / 8, cycles / (8 * blen));
+
+ return 0;
+}
+
+static int test_ahash_cycles(struct ahash_request *req, int blen,
+ int plen, char *out)
+{
+ unsigned long cycles = 0;
+ int i, pcount, ret;
+
+ if (plen == blen)
+ return test_ahash_cycles_digest(req, blen, out);
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto out;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ goto out;
+ }
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto out;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ goto out;
+ }
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ goto out;
+
+ end = get_cycles();
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret)
+ return ret;
+
+ pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+ cycles / 8, cycles / (8 * blen));
+
+ return 0;
+}
+
+static void test_ahash_speed(const char *algo, unsigned int sec,
+ struct hash_speed *speed)
+{
+ struct scatterlist sg[TVMEMSIZE];
+ struct tcrypt_result tresult;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ static char output[1024];
+ int i, ret;
+
+ printk(KERN_INFO "\ntesting speed of async %s\n", algo);
+
+ tfm = crypto_alloc_ahash(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ return;
+ }
+
+ if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
+ pr_err("digestsize(%u) > outputbuffer(%zu)\n",
+ crypto_ahash_digestsize(tfm), sizeof(output));
+ goto out;
+ }
+
+ test_hash_sg_init(sg);
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("ahash request allocation failure\n");
+ goto out;
+ }
+
+ init_completion(&tresult.completion);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &tresult);
+
+ for (i = 0; speed[i].blen != 0; i++) {
+ if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for tvmem (%lu)\n",
+ speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+ break;
+ }
+
+ pr_info("test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+
+ ahash_request_set_crypt(req, sg, output, speed[i].plen);
+
+ if (sec)
+ ret = test_ahash_jiffies(req, speed[i].blen,
+ speed[i].plen, output, sec);
+ else
+ ret = test_ahash_cycles(req, speed[i].blen,
+ speed[i].plen, output);
+
+ if (ret) {
+ pr_err("hashing failed ret=%d\n", ret);
+ break;
+ }
+ }
+
+ ahash_request_free(req);
+
+out:
+ crypto_free_ahash(tfm);
+}
+
static void test_available(void)
{
char **name = check;
@@ -881,9 +1134,87 @@ static int do_test(int m)
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
+ case 318:
+ test_hash_speed("ghash-generic", sec, hash_speed_template_16);
+ if (mode > 300 && mode < 400) break;
+
case 399:
break;
+ case 400:
+ /* fall through */
+
+ case 401:
+ test_ahash_speed("md4", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 402:
+ test_ahash_speed("md5", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 403:
+ test_ahash_speed("sha1", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 404:
+ test_ahash_speed("sha256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 405:
+ test_ahash_speed("sha384", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 406:
+ test_ahash_speed("sha512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 407:
+ test_ahash_speed("wp256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 408:
+ test_ahash_speed("wp384", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 409:
+ test_ahash_speed("wp512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 410:
+ test_ahash_speed("tgr128", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 411:
+ test_ahash_speed("tgr160", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 412:
+ test_ahash_speed("tgr192", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 413:
+ test_ahash_speed("sha224", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 414:
+ test_ahash_speed("rmd128", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 415:
+ test_ahash_speed("rmd160", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 416:
+ test_ahash_speed("rmd256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 417:
+ test_ahash_speed("rmd320", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 499:
+ break;
+
case 1000:
test_available();
break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 966bbfaf95b1..10cb925132c9 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -25,6 +25,7 @@ struct cipher_speed_template {
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
+ unsigned int klen; /* key length */
};
/*
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
+static struct hash_speed hash_speed_template_16[] = {
+ { .blen = 16, .plen = 16, .klen = 16, },
+ { .blen = 64, .plen = 16, .klen = 16, },
+ { .blen = 64, .plen = 64, .klen = 16, },
+ { .blen = 256, .plen = 16, .klen = 16, },
+ { .blen = 256, .plen = 64, .klen = 16, },
+ { .blen = 256, .plen = 256, .klen = 16, },
+ { .blen = 1024, .plen = 16, .klen = 16, },
+ { .blen = 1024, .plen = 256, .klen = 16, },
+ { .blen = 1024, .plen = 1024, .klen = 16, },
+ { .blen = 2048, .plen = 16, .klen = 16, },
+ { .blen = 2048, .plen = 256, .klen = 16, },
+ { .blen = 2048, .plen = 1024, .klen = 16, },
+ { .blen = 2048, .plen = 2048, .klen = 16, },
+ { .blen = 4096, .plen = 16, .klen = 16, },
+ { .blen = 4096, .plen = 256, .klen = 16, },
+ { .blen = 4096, .plen = 1024, .klen = 16, },
+ { .blen = 4096, .plen = 4096, .klen = 16, },
+ { .blen = 8192, .plen = 16, .klen = 16, },
+ { .blen = 8192, .plen = 256, .klen = 16, },
+ { .blen = 8192, .plen = 1024, .klen = 16, },
+ { .blen = 8192, .plen = 4096, .klen = 16, },
+ { .blen = 8192, .plen = 8192, .klen = 16, },
+
+ /* End marker */
+ { .blen = 0, .plen = 0, .klen = 0, }
+};
+
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c494d7610be1..5c8aaa0cb0b9 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
+static int do_one_async_hash_op(struct ahash_request *req,
+ struct tcrypt_result *tr,
+ int ret)
+{
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ ret = wait_for_completion_interruptible(&tr->completion);
+ if (!ret)
+ ret = tr->err;
+ INIT_COMPLETION(tr->completion);
+ }
+ return ret;
+}
+
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
- unsigned int tcount)
+ unsigned int tcount, bool use_digest)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
unsigned int i, j, k, temp;
@@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &tresult.completion);
- if (!ret && !(ret = tresult.err)) {
- INIT_COMPLETION(tresult.completion);
- break;
+ if (use_digest) {
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_digest(req));
+ if (ret) {
+ pr_err("alg: hash: digest failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ } else {
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_init(req));
+ if (ret) {
+ pr_err("alt: hash: init failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_update(req));
+ if (ret) {
+ pr_err("alt: hash: update failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_final(req));
+ if (ret) {
+ pr_err("alt: hash: final failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
}
- /* fall through */
- default:
- printk(KERN_ERR "alg: hash: digest failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
}
if (memcmp(result, template[i].digest,
@@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
return PTR_ERR(tfm);
}
- err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count);
+ err = test_hash(tfm, desc->suite.hash.vecs,
+ desc->suite.hash.count, true);
+ if (!err)
+ err = test_hash(tfm, desc->suite.hash.vecs,
+ desc->suite.hash.count, false);
crypto_free_ahash(tfm);
return err;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index fb765173d41c..74e35377fd30 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1669,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-#define VMAC_AES_TEST_VECTORS 1
-static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
+#define VMAC_AES_TEST_VECTORS 8
+static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
'\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07',
'\x04', '\x01', '\x04', '\x03',};
+static char vmac_string2[128] = {'a', 'b', 'c',};
+static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ };
+
static struct hash_testvec aes_vmac128_tv_template[] = {
{
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = NULL,
+ .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string1,
+ .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string2,
+ .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
+ .psize = 128,
+ .ksize = 16,
+ }, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string,
- .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
+ .plaintext = vmac_string3,
+ .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = NULL,
+ .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string1,
+ .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string2,
+ .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string3,
+ .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
.psize = 128,
.ksize = 16,
},
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 0a9468e575de..0999274a27ac 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
+#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
+
#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH 1
#define INDEX_LOW 0
@@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
@@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
- le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
@@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
- le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
- le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
- le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
@@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
- le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
- le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
- le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
- le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
- le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
- le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
- le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
@@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; \
rh = rl = t = 0; \
for (i = 0; i < nw; i += 2) { \
- t1 = le64_to_cpup(mp+i) + kp[i]; \
- t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
+ t1 = pe64_to_cpup(mp+i) + kp[i]; \
+ t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
m2 = MUL32(t1 >> 32, t2); \
m1 = MUL32(t1, t2 >> 32); \
ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
@@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx)
ctx->first_block_processed = 0;
}
-static u64 l3hash(u64 p1, u64 p2,
- u64 k1, u64 k2, u64 len)
+static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
@@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes,
}
p = be64_to_cpup(out_p + i);
h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
- return p + h;
+ return le64_to_cpu(p + h);
}
static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
@@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent,
static int vmac_init(struct shash_desc *pdesc)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-
- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
return 0;
}