summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-04-02 09:40:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-04-02 09:40:24 -0700
commit8f6b7676ceecc1f40df77d5a4d6a8bae87594a2d (patch)
treeaaac7c2858dab9817af579f2ad39f2f18ce4dbd8
parent143418d0c87fda0c587205c45094d8b05222fd49 (diff)
parent5219a5342ab13650ae0f0c62319407268c48d0ab (diff)
downloadlinux-8f6b7676ceecc1f40df77d5a4d6a8bae87594a2d.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: - Fix for CPU hotplug hang in padata. - Avoid using cpu_active inappropriately in pcrypt and padata. - Fix for user-space algorithm lookup hang with IV generators. - Fix for netlink dump of algorithms where stuff went missing due to incorrect calculation of message size. * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: user - Fix size of netlink dump message crypto: user - Fix lookup of algorithms with IV generator crypto: pcrypt - Use the online cpumask as the default padata: Fix cpu hotplug padata: Use the online cpumask as the default padata: Add a reference to the api documentation
-rw-r--r--crypto/ablkcipher.c4
-rw-r--r--crypto/aead.c4
-rw-r--r--crypto/crypto_user.c80
-rw-r--r--crypto/pcrypt.c8
-rw-r--r--include/crypto/internal/aead.h2
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/linux/cryptouser.h3
-rw-r--r--kernel/padata.c13
8 files changed, 103 insertions, 13 deletions
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a0f768c1d9aa..8d3a056ebeea 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -613,8 +613,7 @@ out:
return err;
}
-static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
- u32 mask)
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
@@ -652,6 +651,7 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
return ERR_PTR(crypto_givcipher_default(alg, type, mask));
}
+EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask)
diff --git a/crypto/aead.c b/crypto/aead.c
index 04add3dca6fe..e4cb35159be4 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -470,8 +470,7 @@ out:
return err;
}
-static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
- u32 mask)
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
@@ -503,6 +502,7 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
return ERR_PTR(crypto_nivaead_default(alg, type, mask));
}
+EXPORT_SYMBOL_GPL(crypto_lookup_aead);
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask)
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index f76e42bcc6e7..f1ea0a064135 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -21,9 +21,13 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/cryptouser.h>
+#include <linux/sched.h>
#include <net/netlink.h>
#include <linux/security.h>
#include <net/net_namespace.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
#include "internal.h"
DEFINE_MUTEX(crypto_cfg_mutex);
@@ -301,6 +305,60 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
return crypto_unregister_instance(alg);
}
+static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
+ u32 mask)
+{
+ int err;
+ struct crypto_alg *alg;
+
+ type = crypto_skcipher_type(type);
+ mask = crypto_skcipher_mask(mask);
+
+ for (;;) {
+ alg = crypto_lookup_skcipher(name, type, mask);
+ if (!IS_ERR(alg))
+ return alg;
+
+ err = PTR_ERR(alg);
+ if (err != -EAGAIN)
+ break;
+ if (signal_pending(current)) {
+ err = -EINTR;
+ break;
+ }
+ }
+
+ return ERR_PTR(err);
+}
+
+static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
+ u32 mask)
+{
+ int err;
+ struct crypto_alg *alg;
+
+ type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ type |= CRYPTO_ALG_TYPE_AEAD;
+ mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ for (;;) {
+ alg = crypto_lookup_aead(name, type, mask);
+ if (!IS_ERR(alg))
+ return alg;
+
+ err = PTR_ERR(alg);
+ if (err != -EAGAIN)
+ break;
+ if (signal_pending(current)) {
+ err = -EINTR;
+ break;
+ }
+ }
+
+ return ERR_PTR(err);
+}
+
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
@@ -325,7 +383,19 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
else
name = p->cru_name;
- alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+ switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
+ break;
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ case CRYPTO_ALG_TYPE_BLKCIPHER:
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
+ break;
+ default:
+ alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+ }
+
if (IS_ERR(alg))
return PTR_ERR(alg);
@@ -387,12 +457,20 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
+ struct crypto_alg *alg;
+ u16 dump_alloc = 0;
+
if (link->dump == NULL)
return -EINVAL;
+
+ list_for_each_entry(alg, &crypto_alg_list, cra_list)
+ dump_alloc += CRYPTO_REPORT_MAXSIZE;
+
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
+ .min_dump_alloc = dump_alloc,
};
return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 29a89dad68b6..b2c99dc1c5e2 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -280,11 +280,11 @@ static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
ictx->tfm_count++;
- cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
+ cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);
- ctx->cb_cpu = cpumask_first(cpu_active_mask);
+ ctx->cb_cpu = cpumask_first(cpu_online_mask);
for (cpu = 0; cpu < cpu_index; cpu++)
- ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
+ ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
@@ -472,7 +472,7 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
goto err_free_padata;
}
- cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
+ cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
rcu_assign_pointer(pcrypt->cb_cpumask, mask);
pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index d838c945575a..2eba340230a7 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -31,6 +31,8 @@ static inline void crypto_set_aead_spawn(
crypto_set_spawn(&spawn->base, inst);
}
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
+
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask);
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 3a748a6bf772..06e8b32d541c 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -34,6 +34,8 @@ static inline void crypto_set_skcipher_spawn(
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask);
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
+
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
diff --git a/include/linux/cryptouser.h b/include/linux/cryptouser.h
index 532fb58f16bf..4abf2ea6a887 100644
--- a/include/linux/cryptouser.h
+++ b/include/linux/cryptouser.h
@@ -100,3 +100,6 @@ struct crypto_report_rng {
char type[CRYPTO_MAX_NAME];
unsigned int seedsize;
};
+
+#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
+ sizeof(struct crypto_report_blkcipher))
diff --git a/kernel/padata.c b/kernel/padata.c
index 6f10eb285ece..89fe3d1b9efb 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1,6 +1,8 @@
/*
* padata.c - generic interface to process data streams in parallel
*
+ * See Documentation/padata.txt for an api documentation.
+ *
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
@@ -354,13 +356,13 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
return -ENOMEM;
- cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask);
+ cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pd->cpumask.cbcpu);
return -ENOMEM;
}
- cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask);
+ cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
return 0;
}
@@ -564,7 +566,7 @@ EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
static bool padata_validate_cpumask(struct padata_instance *pinst,
const struct cpumask *cpumask)
{
- if (!cpumask_intersects(cpumask, cpu_active_mask)) {
+ if (!cpumask_intersects(cpumask, cpu_online_mask)) {
pinst->flags |= PADATA_INVALID;
return false;
}
@@ -678,7 +680,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
struct parallel_data *pd;
- if (cpumask_test_cpu(cpu, cpu_active_mask)) {
+ if (cpumask_test_cpu(cpu, cpu_online_mask)) {
pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
pinst->cpumask.cbcpu);
if (!pd)
@@ -746,6 +748,9 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
return -ENOMEM;
padata_replace(pinst, pd);
+
+ cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
+ cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
}
return 0;