summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig9
-rw-r--r--crypto/aead.c1
-rw-r--r--crypto/ansi_cprng.c82
-rw-r--r--crypto/async_tx/Kconfig5
-rw-r--r--crypto/async_tx/async_pq.c74
-rw-r--r--crypto/async_tx/async_raid6_recov.c100
-rw-r--r--crypto/async_tx/async_xor.c33
-rw-r--r--crypto/cryptd.c7
-rw-r--r--crypto/digest.c240
-rw-r--r--crypto/gcm.c107
-rw-r--r--crypto/hash.c183
-rw-r--r--crypto/proc.c19
-rw-r--r--crypto/testmgr.c11
-rw-r--r--crypto/testmgr.h15
14 files changed, 329 insertions, 557 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 26b5dd0cb564..81c185a6971f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -440,6 +440,15 @@ config CRYPTO_WP512
See also:
<http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html>
+config CRYPTO_GHASH_CLMUL_NI_INTEL
+ tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
+ depends on (X86 || UML_X86) && 64BIT
+ select CRYPTO_SHASH
+ select CRYPTO_CRYPTD
+ help
+ GHASH is message digest algorithm for GCM (Galois/Counter Mode).
+ The implementation is accelerated by CLMUL-NI of Intel.
+
comment "Ciphers"
config CRYPTO_AES
diff --git a/crypto/aead.c b/crypto/aead.c
index d9aa733db164..0a55da70845e 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 3aa6e3834bfe..2bc332142849 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -85,7 +85,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2,
* Returns DEFAULT_BLK_SZ bytes of random data per call
* returns 0 if generation succeded, <0 if something went wrong
*/
-static int _get_more_prng_bytes(struct prng_context *ctx)
+static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
{
int i;
unsigned char tmp[DEFAULT_BLK_SZ];
@@ -132,7 +132,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
*/
if (!memcmp(ctx->rand_data, ctx->last_rand_data,
DEFAULT_BLK_SZ)) {
- if (fips_enabled) {
+ if (cont_test) {
panic("cprng %p Failed repetition check!\n",
ctx);
}
@@ -185,16 +185,14 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
}
/* Our exported functions */
-static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
+static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
+ int do_cont_test)
{
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
- if (nbytes < 0)
- return -EINVAL;
-
spin_lock_bh(&ctx->prng_lock);
err = -EINVAL;
@@ -220,7 +218,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
remainder:
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx) < 0) {
+ if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
memset(buf, 0, nbytes);
err = -EINVAL;
goto done;
@@ -247,7 +245,7 @@ empty_rbuf:
*/
for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx) < 0) {
+ if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
memset(buf, 0, nbytes);
err = -EINVAL;
goto done;
@@ -356,7 +354,7 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
{
struct prng_context *prng = crypto_rng_ctx(tfm);
- return get_prng_bytes(rdata, dlen, prng);
+ return get_prng_bytes(rdata, dlen, prng, 0);
}
/*
@@ -404,19 +402,79 @@ static struct crypto_alg rng_alg = {
}
};
+#ifdef CONFIG_CRYPTO_FIPS
+static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen)
+{
+ struct prng_context *prng = crypto_rng_ctx(tfm);
+
+ return get_prng_bytes(rdata, dlen, prng, 1);
+}
+
+static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+{
+ u8 rdata[DEFAULT_BLK_SZ];
+ int rc;
+
+ struct prng_context *prng = crypto_rng_ctx(tfm);
+
+ rc = cprng_reset(tfm, seed, slen);
+
+ if (!rc)
+ goto out;
+
+ /* this primes our continuity test */
+ rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
+ prng->rand_data_valid = DEFAULT_BLK_SZ;
+
+out:
+ return rc;
+}
+
+static struct crypto_alg fips_rng_alg = {
+ .cra_name = "fips(ansi_cprng)",
+ .cra_driver_name = "fips_ansi_cprng",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_ctxsize = sizeof(struct prng_context),
+ .cra_type = &crypto_rng_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(rng_alg.cra_list),
+ .cra_init = cprng_init,
+ .cra_exit = cprng_exit,
+ .cra_u = {
+ .rng = {
+ .rng_make_random = fips_cprng_get_random,
+ .rng_reset = fips_cprng_reset,
+ .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
+ }
+ }
+};
+#endif
/* Module initalization */
static int __init prng_mod_init(void)
{
- if (fips_enabled)
- rng_alg.cra_priority += 200;
+ int rc = 0;
- return crypto_register_alg(&rng_alg);
+ rc = crypto_register_alg(&rng_alg);
+#ifdef CONFIG_CRYPTO_FIPS
+ if (rc)
+ goto out;
+
+ rc = crypto_register_alg(&fips_rng_alg);
+
+out:
+#endif
+ return rc;
}
static void __exit prng_mod_fini(void)
{
crypto_unregister_alg(&rng_alg);
+#ifdef CONFIG_CRYPTO_FIPS
+ crypto_unregister_alg(&fips_rng_alg);
+#endif
return;
}
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index e5aeb2b79e6f..e28e276ac611 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV
select ASYNC_CORE
select ASYNC_PQ
+config ASYNC_TX_DISABLE_PQ_VAL_DMA
+ bool
+
+config ASYNC_TX_DISABLE_XOR_VAL_DMA
+ bool
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index b88db6d1dc65..ec87f53d5059 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -26,14 +26,10 @@
#include <linux/async_tx.h>
/**
- * scribble - space to hold throwaway P buffer for synchronous gen_syndrome
+ * pq_scribble_page - space to hold throwaway P or Q buffer for
+ * synchronous gen_syndrome
*/
-static struct page *scribble;
-
-static bool is_raid6_zero_block(struct page *p)
-{
- return p == (void *) raid6_empty_zero_page;
-}
+static struct page *pq_scribble_page;
/* the struct page *blocks[] parameter passed to async_gen_syndrome()
* and async_syndrome_val() contains the 'P' destination address at
@@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
* sources and update the coefficients accordingly
*/
for (i = 0, idx = 0; i < src_cnt; i++) {
- if (is_raid6_zero_block(blocks[i]))
+ if (blocks[i] == NULL)
continue;
dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
DMA_TO_DEVICE);
@@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
srcs = (void **) blocks;
for (i = 0; i < disks; i++) {
- if (is_raid6_zero_block(blocks[i])) {
+ if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
- srcs[i] = blocks[i];
+ srcs[i] = (void*)raid6_empty_zero_page;
} else
srcs[i] = page_address(blocks[i]) + offset;
}
@@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
* PAGE_SIZE as a temporary buffer of this size is used in the
* synchronous path. 'disks' always accounts for both destination
- * buffers.
+ * buffers. If any source buffers (blocks[i] where i < disks - 2) are
+ * set to NULL those buffers will be replaced with the raid6_zero_page
+ * in the synchronous path and omitted in the hardware-asynchronous
+ * path.
*
* 'blocks' note: if submit->scribble is NULL then the contents of
- * 'blocks' may be overridden
+ * 'blocks' may be overwritten to perform address conversions
+ * (dma_map_page() or page_address()).
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
async_tx_quiesce(&submit->depend_tx);
if (!P(blocks, disks)) {
- P(blocks, disks) = scribble;
+ P(blocks, disks) = pq_scribble_page;
BUG_ON(len + offset > PAGE_SIZE);
}
if (!Q(blocks, disks)) {
- Q(blocks, disks) = scribble;
+ Q(blocks, disks) = pq_scribble_page;
BUG_ON(len + offset > PAGE_SIZE);
}
do_sync_gen_syndrome(blocks, offset, disks, len, submit);
@@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
}
EXPORT_SYMBOL_GPL(async_gen_syndrome);
+static inline struct dma_chan *
+pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
+{
+ #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+ return NULL;
+ #endif
+ return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
+ disks, len);
+}
+
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
@@ -260,13 +270,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
size_t len, enum sum_check_flags *pqres, struct page *spare,
struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
- NULL, 0, blocks, disks,
- len);
+ struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx;
+ unsigned char coefs[disks-2];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
dma_addr_t *dma_src = NULL;
+ int src_cnt = 0;
BUG_ON(disks < 4);
@@ -285,22 +295,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
__func__, disks, len);
if (!P(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_P;
+ else
+ pq[0] = dma_map_page(dev, P(blocks, disks),
+ offset, len,
+ DMA_TO_DEVICE);
if (!Q(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+ else
+ pq[1] = dma_map_page(dev, Q(blocks, disks),
+ offset, len,
+ DMA_TO_DEVICE);
+
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- for (i = 0; i < disks; i++)
+ for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
- BUG_ON(is_raid6_zero_block(blocks[i]));
- dma_src[i] = dma_map_page(dev, blocks[i],
- offset, len,
- DMA_TO_DEVICE);
+ dma_src[src_cnt] = dma_map_page(dev, blocks[i],
+ offset, len,
+ DMA_TO_DEVICE);
+ coefs[src_cnt] = raid6_gfexp[i];
+ src_cnt++;
}
for (;;) {
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
- disks - 2,
- raid6_gfexp,
+ src_cnt,
+ coefs,
len, pqres,
dma_flags);
if (likely(tx))
@@ -373,9 +393,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val);
static int __init async_pq_init(void)
{
- scribble = alloc_page(GFP_KERNEL);
+ pq_scribble_page = alloc_page(GFP_KERNEL);
- if (scribble)
+ if (pq_scribble_page)
return 0;
pr_err("%s: failed to allocate required spare page\n", __func__);
@@ -385,7 +405,7 @@ static int __init async_pq_init(void)
static void __exit async_pq_exit(void)
{
- put_page(scribble);
+ put_page(pq_scribble_page);
}
module_init(async_pq_init);
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 6d73dde4786d..943f2abac9b4 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
}
static struct dma_async_tx_descriptor *
-__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
- struct async_submit_ctl *submit)
+__2data_recov_4(int disks, size_t bytes, int faila, int failb,
+ struct page **blocks, struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *a, *b;
@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
- p = blocks[4-2];
- q = blocks[4-1];
+ p = blocks[disks-2];
+ q = blocks[disks-1];
a = blocks[faila];
b = blocks[failb];
@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
}
static struct dma_async_tx_descriptor *
-__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
- struct async_submit_ctl *submit)
+__2data_recov_5(int disks, size_t bytes, int faila, int failb,
+ struct page **blocks, struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *g, *dp, *dq;
@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
- int uninitialized_var(good);
- int i;
+ int good_srcs, good, i;
- for (i = 0; i < 3; i++) {
+ good_srcs = 0;
+ good = -1;
+ for (i = 0; i < disks-2; i++) {
+ if (blocks[i] == NULL)
+ continue;
if (i == faila || i == failb)
continue;
- else {
- good = i;
- break;
- }
+ good = i;
+ good_srcs++;
}
- BUG_ON(i >= 3);
+ BUG_ON(good_srcs > 1);
- p = blocks[5-2];
- q = blocks[5-1];
+ p = blocks[disks-2];
+ q = blocks[disks-1];
g = blocks[good];
/* Compute syndrome with zero for the missing data pages
@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
* delta p and delta q
*/
dp = blocks[faila];
- blocks[faila] = (void *)raid6_empty_zero_page;
+ blocks[faila] = NULL;
blocks[disks-2] = dp;
dq = blocks[failb];
- blocks[failb] = (void *)raid6_empty_zero_page;
+ blocks[failb] = NULL;
blocks[disks-1] = dq;
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
@@ -323,6 +324,8 @@ struct dma_async_tx_descriptor *
async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
struct page **blocks, struct async_submit_ctl *submit)
{
+ int non_zero_srcs, i;
+
BUG_ON(faila == failb);
if (failb < faila)
swap(faila, failb);
@@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
*/
if (!submit->scribble) {
void **ptrs = (void **) blocks;
- int i;
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
- ptrs[i] = page_address(blocks[i]);
+ if (blocks[i] == NULL)
+ ptrs[i] = (void *) raid6_empty_zero_page;
+ else
+ ptrs[i] = page_address(blocks[i]);
raid6_2data_recov(disks, bytes, faila, failb, ptrs);
@@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
return NULL;
}
- switch (disks) {
- case 4:
+ non_zero_srcs = 0;
+ for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
+ if (blocks[i])
+ non_zero_srcs++;
+ switch (non_zero_srcs) {
+ case 0:
+ case 1:
+ /* There must be at least 2 sources - the failed devices. */
+ BUG();
+
+ case 2:
/* dma devices do not uniformly understand a zero source pq
* operation (in contrast to the synchronous case), so
- * explicitly handle the 4 disk special case
+ * explicitly handle the special case of a 4 disk array with
+ * both data disks missing.
*/
- return __2data_recov_4(bytes, faila, failb, blocks, submit);
- case 5:
+ return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
+ case 3:
/* dma devices do not uniformly understand a single
* source pq operation (in contrast to the synchronous
- * case), so explicitly handle the 5 disk special case
+ * case), so explicitly handle the special case of a 5 disk
+ * array with 2 of 3 data disks missing.
*/
- return __2data_recov_5(bytes, faila, failb, blocks, submit);
+ return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
default:
return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
}
@@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
+ int good_srcs, good, i;
struct page *srcs[2];
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
@@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
*/
if (!scribble) {
void **ptrs = (void **) blocks;
- int i;
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
- ptrs[i] = page_address(blocks[i]);
+ if (blocks[i] == NULL)
+ ptrs[i] = (void*)raid6_empty_zero_page;
+ else
+ ptrs[i] = page_address(blocks[i]);
raid6_datap_recov(disks, bytes, faila, ptrs);
@@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
return NULL;
}
+ good_srcs = 0;
+ good = -1;
+ for (i = 0; i < disks-2; i++) {
+ if (i == faila)
+ continue;
+ if (blocks[i]) {
+ good = i;
+ good_srcs++;
+ if (good_srcs > 1)
+ break;
+ }
+ }
+ BUG_ON(good_srcs == 0);
+
p = blocks[disks-2];
q = blocks[disks-1];
@@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
* Use the dead data page as temporary storage for delta q
*/
dq = blocks[faila];
- blocks[faila] = (void *)raid6_empty_zero_page;
+ blocks[faila] = NULL;
blocks[disks-1] = dq;
- /* in the 4 disk case we only need to perform a single source
- * multiplication
+ /* in the 4-disk case we only need to perform a single source
+ * multiplication with the one good data block.
*/
- if (disks == 4) {
- int good = faila == 0 ? 1 : 0;
+ if (good_srcs == 1) {
struct page *g = blocks[good];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index b459a9034aac..079ae8ca590b 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags;
- int xor_src_cnt;
+ int xor_src_cnt = 0;
dma_addr_t dma_dest;
/* map the dest bidrectional in case it is re-used as a source */
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
for (i = 0; i < src_cnt; i++) {
/* only map the dest once */
+ if (!src_list[i])
+ continue;
if (unlikely(src_list[i] == dest)) {
- dma_src[i] = dma_dest;
+ dma_src[xor_src_cnt++] = dma_dest;
continue;
}
- dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
- len, DMA_TO_DEVICE);
+ dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
+ len, DMA_TO_DEVICE);
}
+ src_cnt = xor_src_cnt;
while (src_cnt) {
submit->flags = flags_orig;
@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit)
{
int i;
- int xor_src_cnt;
+ int xor_src_cnt = 0;
int src_off = 0;
void *dest_buf;
void **srcs;
@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
/* convert to buffer pointers */
for (i = 0; i < src_cnt; i++)
- srcs[i] = page_address(src_list[i]) + offset;
-
+ if (src_list[i])
+ srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
+ src_cnt = xor_src_cnt;
/* set destination address */
dest_buf = page_address(dest) + offset;
@@ -230,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
memcmp(a, a + 4, len - 4) == 0);
}
+static inline struct dma_chan *
+xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
+ struct page **src_list, int src_cnt, size_t len)
+{
+ #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+ return NULL;
+ #endif
+ return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
+ src_cnt, len);
+}
+
/**
* async_xor_val - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously
@@ -251,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
- &dest, 1, src_list,
- src_cnt, len);
+ struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t *dma_src = NULL;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 3d7fe8306e2a..704c14115323 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -711,6 +711,13 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_ahash_child);
+struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ return &rctx->desc;
+}
+EXPORT_SYMBOL_GPL(cryptd_shash_desc);
+
void cryptd_free_ahash(struct cryptd_ahash *tfm)
{
crypto_free_ahash(&tfm->base);
diff --git a/crypto/digest.c b/crypto/digest.c
deleted file mode 100644
index 5d3f1303da98..000000000000
--- a/crypto/digest.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Digest operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/hardirq.h>
-#include <linux/highmem.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-
-#include "internal.h"
-
-static int init(struct hash_desc *desc)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
-
- tfm->__crt_alg->cra_digest.dia_init(tfm);
- return 0;
-}
-
-static int update2(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
- unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
-
- if (!nbytes)
- return 0;
-
- for (;;) {
- struct page *pg = sg_page(sg);
- unsigned int offset = sg->offset;
- unsigned int l = sg->length;
-
- if (unlikely(l > nbytes))
- l = nbytes;
- nbytes -= l;
-
- do {
- unsigned int bytes_from_page = min(l, ((unsigned int)
- (PAGE_SIZE)) -
- offset);
- char *src = crypto_kmap(pg, 0);
- char *p = src + offset;
-
- if (unlikely(offset & alignmask)) {
- unsigned int bytes =
- alignmask + 1 - (offset & alignmask);
- bytes = min(bytes, bytes_from_page);
- tfm->__crt_alg->cra_digest.dia_update(tfm, p,
- bytes);
- p += bytes;
- bytes_from_page -= bytes;
- l -= bytes;
- }
- tfm->__crt_alg->cra_digest.dia_update(tfm, p,
- bytes_from_page);
- crypto_kunmap(src, 0);
- crypto_yield(desc->flags);
- offset = 0;
- pg++;
- l -= bytes_from_page;
- } while (l > 0);
-
- if (!nbytes)
- break;
- sg = scatterwalk_sg_next(sg);
- }
-
- return 0;
-}
-
-static int update(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
- return update2(desc, sg, nbytes);
-}
-
-static int final(struct hash_desc *desc, u8 *out)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
- struct digest_alg *digest = &tfm->__crt_alg->cra_digest;
-
- if (unlikely((unsigned long)out & alignmask)) {
- unsigned long align = alignmask + 1;
- unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
- u8 *dst = (u8 *)ALIGN(addr, align) +
- ALIGN(tfm->__crt_alg->cra_ctxsize, align);
-
- digest->dia_final(tfm, dst);
- memcpy(out, dst, digest->dia_digestsize);
- } else
- digest->dia_final(tfm, out);
-
- return 0;
-}
-
-static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen)
-{
- crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
- return -ENOSYS;
-}
-
-static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(hash);
-
- crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK);
- return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen);
-}
-
-static int digest(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes, u8 *out)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
-
- init(desc);
- update2(desc, sg, nbytes);
- return final(desc, out);
-}
-
-int crypto_init_digest_ops(struct crypto_tfm *tfm)
-{
- struct hash_tfm *ops = &tfm->crt_hash;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- if (dalg->dia_digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- ops->init = init;
- ops->update = update;
- ops->final = final;
- ops->digest = digest;
- ops->setkey = dalg->dia_setkey ? setkey : nosetkey;
- ops->digestsize = dalg->dia_digestsize;
-
- return 0;
-}
-
-void crypto_exit_digest_ops(struct crypto_tfm *tfm)
-{
-}
-
-static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
- return -ENOSYS;
-}
-
-static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
- return dalg->dia_setkey(tfm, key, keylen);
-}
-
-static int digest_async_init(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- dalg->dia_init(tfm);
- return 0;
-}
-
-static int digest_async_update(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- update(&desc, req->src, req->nbytes);
- return 0;
-}
-
-static int digest_async_final(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- final(&desc, req->result);
- return 0;
-}
-
-static int digest_async_digest(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return digest(&desc, req->src, req->nbytes, req->result);
-}
-
-int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
-{
- struct ahash_tfm *crt = &tfm->crt_ahash;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- if (dalg->dia_digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- crt->init = digest_async_init;
- crt->update = digest_async_update;
- crt->final = digest_async_final;
- crt->digest = digest_async_digest;
- crt->setkey = dalg->dia_setkey ? digest_async_setkey :
- digest_async_nosetkey;
- crt->digestsize = dalg->dia_digestsize;
-
- return 0;
-}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 5fc3292483ef..c6547130624c 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -40,7 +40,7 @@ struct crypto_rfc4106_ctx {
struct crypto_gcm_ghash_ctx {
unsigned int cryptlen;
struct scatterlist *src;
- crypto_completion_t complete;
+ void (*complete)(struct aead_request *req, int err);
};
struct crypto_gcm_req_priv_ctx {
@@ -267,23 +267,26 @@ static int gcm_hash_final(struct aead_request *req,
return crypto_ahash_final(ahreq);
}
-static void gcm_hash_final_done(struct crypto_async_request *areq,
- int err)
+static void __gcm_hash_final_done(struct aead_request *req, int err)
{
- struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
if (!err)
crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
- gctx->complete(areq, err);
+ gctx->complete(req, err);
}
-static void gcm_hash_len_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
+
+ __gcm_hash_final_done(req, err);
+}
+
+static void __gcm_hash_len_done(struct aead_request *req, int err)
+{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) {
@@ -292,13 +295,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq,
return;
}
- gcm_hash_final_done(areq, err);
+ __gcm_hash_final_done(req, err);
}
-static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
+
+ __gcm_hash_len_done(req, err);
+}
+
+static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err)
+{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) {
@@ -307,13 +315,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
return;
}
- gcm_hash_len_done(areq, err);
+ __gcm_hash_len_done(req, err);
}
-static void gcm_hash_crypt_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
+ int err)
{
struct aead_request *req = areq->data;
+
+ __gcm_hash_crypt_remain_done(req, err);
+}
+
+static void __gcm_hash_crypt_done(struct aead_request *req, int err)
+{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int remain;
@@ -327,13 +341,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq,
return;
}
- gcm_hash_crypt_remain_done(areq, err);
+ __gcm_hash_crypt_remain_done(req, err);
}
-static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
+
+ __gcm_hash_crypt_done(req, err);
+}
+
+static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
+{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
crypto_completion_t complete;
@@ -350,15 +369,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
}
if (remain)
- gcm_hash_crypt_done(areq, err);
+ __gcm_hash_crypt_done(req, err);
else
- gcm_hash_crypt_remain_done(areq, err);
+ __gcm_hash_crypt_remain_done(req, err);
}
-static void gcm_hash_assoc_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
+ int err)
{
struct aead_request *req = areq->data;
+
+ __gcm_hash_assoc_remain_done(req, err);
+}
+
+static void __gcm_hash_assoc_done(struct aead_request *req, int err)
+{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
unsigned int remain;
@@ -371,13 +396,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq,
return;
}
- gcm_hash_assoc_remain_done(areq, err);
+ __gcm_hash_assoc_remain_done(req, err);
}
-static void gcm_hash_init_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
+
+ __gcm_hash_assoc_done(req, err);
+}
+
+static void __gcm_hash_init_done(struct aead_request *req, int err)
+{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
crypto_completion_t complete;
unsigned int remain = 0;
@@ -393,9 +423,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq,
}
if (remain)
- gcm_hash_assoc_done(areq, err);
+ __gcm_hash_assoc_done(req, err);
else
- gcm_hash_assoc_remain_done(areq, err);
+ __gcm_hash_assoc_remain_done(req, err);
+}
+
+static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+
+ __gcm_hash_init_done(req, err);
}
static int gcm_hash(struct aead_request *req,
@@ -457,10 +494,8 @@ static void gcm_enc_copy_hash(struct aead_request *req,
crypto_aead_authsize(aead), 1);
}
-static void gcm_enc_hash_done(struct crypto_async_request *areq,
- int err)
+static void gcm_enc_hash_done(struct aead_request *req, int err)
{
- struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
@@ -469,8 +504,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq,
aead_request_complete(req, err);
}
-static void gcm_encrypt_done(struct crypto_async_request *areq,
- int err)
+static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
@@ -479,9 +513,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq,
err = gcm_hash(req, pctx);
if (err == -EINPROGRESS || err == -EBUSY)
return;
+ else if (!err) {
+ crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
+ gcm_enc_copy_hash(req, pctx);
+ }
}
- gcm_enc_hash_done(areq, err);
+ aead_request_complete(req, err);
}
static int crypto_gcm_encrypt(struct aead_request *req)
@@ -538,9 +576,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
aead_request_complete(req, err);
}
-static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
+static void gcm_dec_hash_done(struct aead_request *req, int err)
{
- struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->u.abreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
@@ -552,9 +589,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
err = crypto_ablkcipher_decrypt(abreq);
if (err == -EINPROGRESS || err == -EBUSY)
return;
+ else if (!err)
+ err = crypto_gcm_verify(req, pctx);
}
- gcm_decrypt_done(areq, err);
+ aead_request_complete(req, err);
}
static int crypto_gcm_decrypt(struct aead_request *req)
diff --git a/crypto/hash.c b/crypto/hash.c
deleted file mode 100644
index cb86b19fd105..000000000000
--- a/crypto/hash.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Cryptographic Hash operations.
- *
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <crypto/internal/hash.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-
-#include "internal.h"
-
-static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
-{
- return alg->cra_ctxsize;
-}
-
-static int hash_setkey_unaligned(struct crypto_hash *crt, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(crt);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- unsigned long alignmask = crypto_hash_alignmask(crt);
- int ret;
- u8 *buffer, *alignbuffer;
- unsigned long absize;
-
- absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_ATOMIC);
- if (!buffer)
- return -ENOMEM;
-
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- ret = alg->setkey(crt, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
- return ret;
-}
-
-static int hash_setkey(struct crypto_hash *crt, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(crt);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- unsigned long alignmask = crypto_hash_alignmask(crt);
-
- if ((unsigned long)key & alignmask)
- return hash_setkey_unaligned(crt, key, keylen);
-
- return alg->setkey(crt, key, keylen);
-}
-
-static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
- struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- return alg->setkey(tfm_hash, key, keylen);
-}
-
-static int hash_async_init(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->init(&desc);
-}
-
-static int hash_async_update(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->update(&desc, req->src, req->nbytes);
-}
-
-static int hash_async_final(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->final(&desc, req->result);
-}
-
-static int hash_async_digest(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->digest(&desc, req->src, req->nbytes, req->result);
-}
-
-static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
-{
- struct ahash_tfm *crt = &tfm->crt_ahash;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- crt->init = hash_async_init;
- crt->update = hash_async_update;
- crt->final = hash_async_final;
- crt->digest = hash_async_digest;
- crt->setkey = hash_async_setkey;
- crt->digestsize = alg->digestsize;
-
- return 0;
-}
-
-static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
-{
- struct hash_tfm *crt = &tfm->crt_hash;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- crt->init = alg->init;
- crt->update = alg->update;
- crt->final = alg->final;
- crt->digest = alg->digest;
- crt->setkey = hash_setkey;
- crt->digestsize = alg->digestsize;
-
- return 0;
-}
-
-static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- if (alg->digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
- return crypto_init_hash_ops_async(tfm);
- else
- return crypto_init_hash_ops_sync(tfm);
-}
-
-static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
- __attribute__ ((unused));
-static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
-{
- seq_printf(m, "type : hash\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
-}
-
-const struct crypto_type crypto_hash_type = {
- .ctxsize = crypto_hash_ctxsize,
- .init = crypto_init_hash_ops,
-#ifdef CONFIG_PROC_FS
- .show = crypto_hash_show,
-#endif
-};
-EXPORT_SYMBOL_GPL(crypto_hash_type);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic cryptographic hash type");
diff --git a/crypto/proc.c b/crypto/proc.c
index 5dc07e442fca..58fef67d4f4d 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -25,28 +25,22 @@
#ifdef CONFIG_CRYPTO_FIPS
static struct ctl_table crypto_sysctl_table[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "fips_enabled",
.data = &fips_enabled,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = 0,
+ .proc_handler = proc_dointvec
},
+ {}
};
static struct ctl_table crypto_dir_table[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "crypto",
.mode = 0555,
.child = crypto_sysctl_table
},
- {
- .ctl_name = 0,
- },
+ {}
};
static struct ctl_table_header *crypto_sysctls;
@@ -115,13 +109,6 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- seq_printf(m, "type : digest\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n",
- alg->cra_digest.dia_digestsize);
- break;
case CRYPTO_ALG_TYPE_COMPRESS:
seq_printf(m, "type : compression\n");
break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 6d5b746637be..7620bfce92f2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1201,7 +1201,7 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
- int err, i, j, seedsize;
+ int err = 0, i, j, seedsize;
u8 *seed;
char result[32];
@@ -1943,6 +1943,15 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ghash",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = {
+ .vecs = ghash_tv_template,
+ .count = GHASH_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "hmac(md5)",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 9963b18983ab..fb765173d41c 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1003,6 +1003,21 @@ static struct hash_testvec tgr128_tv_template[] = {
},
};
+#define GHASH_TEST_VECTORS 1
+
+static struct hash_testvec ghash_tv_template[] =
+{
+ {
+
+ .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61",
+ .ksize = 16,
+ .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+ .psize = 16,
+ .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
+ "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+ },
+};
+
/*
* HMAC-MD5 test vectors from RFC2202
* (These need to be fixed to not use strlen).