diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-06-09 18:19:39 +0800 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2015-06-10 19:13:56 +0800 |
commit | 205a525c334295e3cd4cc7755fd2c0398e3a787f (patch) | |
tree | 90a62c129458c30fcce01cdde1c4c5f34820fb5a | |
parent | 05dee9c7ebc06719df2dd11bfd3bea96aeaf9707 (diff) | |
download | linux-205a525c334295e3cd4cc7755fd2c0398e3a787f.tar.bz2 |
random: Add callback API for random pool readiness
The get_blocking_random_bytes API is broken because the wait can
be arbitrarily long (potentially forever) so there is no safe way
of calling it from within the kernel.
This patch replaces it with a callback API instead. The callback
is invoked potentially from interrupt context so the user needs
to schedule their own work thread if necessary.
In addition to adding callbacks, they can also be removed as
otherwise this opens up a way for user-space to allocate kernel
memory with no bound (by opening algif_rng descriptors and then
closing them).
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/char/random.c | 78 | ||||
-rw-r--r-- | include/linux/random.h | 9 |
2 files changed, 87 insertions, 0 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 159d0700f7d8..a1576ed1d88e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); static struct fasync_struct *fasync; +static DEFINE_SPINLOCK(random_ready_list_lock); +static LIST_HEAD(random_ready_list); + /********************************************************************** * * OS independent entropy store. Here are the functions which handle @@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f) f->count++; } +static void process_random_ready_list(void) +{ + unsigned long flags; + struct random_ready_callback *rdy, *tmp; + + spin_lock_irqsave(&random_ready_list_lock, flags); + list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { + struct module *owner = rdy->owner; + + list_del_init(&rdy->list); + rdy->func(rdy); + module_put(owner); + } + spin_unlock_irqrestore(&random_ready_list_lock, flags); +} + /* * Credit (or debit) the entropy store with n bits of entropy. * Use credit_entropy_bits_safe() if the value comes from userspace @@ -660,6 +679,7 @@ retry: r->entropy_total = 0; if (r == &nonblocking_pool) { prandom_reseed_late(); + process_random_ready_list(); wake_up_all(&urandom_init_wait); pr_notice("random: %s pool is initialized\n", r->name); } @@ -1257,6 +1277,64 @@ void get_blocking_random_bytes(void *buf, int nbytes) EXPORT_SYMBOL(get_blocking_random_bytes); /* + * Add a callback function that will be invoked when the nonblocking + * pool is initialised. + * + * returns: 0 if callback is successfully added + * -EALREADY if pool is already initialised (callback not called) + * -ENOENT if module for callback is not alive + */ +int add_random_ready_callback(struct random_ready_callback *rdy) +{ + struct module *owner; + unsigned long flags; + int err = -EALREADY; + + if (likely(nonblocking_pool.initialized)) + return err; + + owner = rdy->owner; + if (!try_module_get(owner)) + return -ENOENT; + + spin_lock_irqsave(&random_ready_list_lock, flags); + if (nonblocking_pool.initialized) + goto out; + + owner = NULL; + + list_add(&rdy->list, &random_ready_list); + err = 0; + +out: + spin_unlock_irqrestore(&random_ready_list_lock, flags); + + module_put(owner); + + return err; +} +EXPORT_SYMBOL(add_random_ready_callback); + +/* + * Delete a previously registered readiness callback function. + */ +void del_random_ready_callback(struct random_ready_callback *rdy) +{ + unsigned long flags; + struct module *owner = NULL; + + spin_lock_irqsave(&random_ready_list_lock, flags); + if (!list_empty(&rdy->list)) { + list_del_init(&rdy->list); + owner = rdy->owner; + } + spin_unlock_irqrestore(&random_ready_list_lock, flags); + + module_put(owner); +} +EXPORT_SYMBOL(del_random_ready_callback); + +/* * This function will use the architecture-specific hardware random * number generator if it is available. The arch-specific hw RNG will * almost certainly be faster than what we can do in software, but it diff --git a/include/linux/random.h b/include/linux/random.h index 796267d56901..30e2aca0b16a 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -6,8 +6,15 @@ #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H +#include <linux/list.h> #include <uapi/linux/random.h> +struct random_ready_callback { + struct list_head list; + void (*func)(struct random_ready_callback *rdy); + struct module *owner; +}; + extern void add_device_randomness(const void *, unsigned int); extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); @@ -15,6 +22,8 @@ extern void add_interrupt_randomness(int irq, int irq_flags); extern void get_random_bytes(void *buf, int nbytes); extern void get_blocking_random_bytes(void *buf, int nbytes); +extern int add_random_ready_callback(struct random_ready_callback *rdy); +extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); void generate_random_uuid(unsigned char uuid_out[16]); extern int random_int_secret_init(void); |