summaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2014-06-14 03:06:57 -0400
committerTheodore Ts'o <tytso@mit.edu>2014-07-15 04:49:39 -0400
commit840f95077ffd640df9c74ad9796fa094a5c8075a (patch)
tree70cd554b7b5d7badbefdffc06067b28ad09fd749 /drivers/char/random.c
parentcff850312cc7c0e0b9fe8b573687812dea232031 (diff)
downloadlinux-840f95077ffd640df9c74ad9796fa094a5c8075a.tar.bz2
random: clean up interrupt entropy accounting for archs w/o cycle counters
For architectures that don't have cycle counters, the algorithm for deciding when to avoid giving entropy credit due to back-to-back timer interrupts didn't make any sense, since we were checking every 64 interrupts. Change it so that we only give an entropy credit if the majority of the interrupts are not based on the timer. Signed-off-by: Theodore Ts'o <tytso@mit.edu> Cc: George Spelvin <linux@horizon.com>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 364a8001a2bd..dfe918a21e32 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -548,9 +548,9 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in,
struct fast_pool {
__u32 pool[4];
unsigned long last;
- unsigned short count;
+ unsigned char count;
+ unsigned char notimer_count;
unsigned char rotate;
- unsigned char last_timer_intr;
};
/*
@@ -850,15 +850,23 @@ void add_interrupt_randomness(int irq, int irq_flags)
input[3] = ip >> 32;
fast_mix(fast_pool, input);
+ if ((irq_flags & __IRQF_TIMER) == 0)
+ fast_pool->notimer_count++;
- if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
- return;
+ if (cycles) {
+ if ((fast_pool->count < 64) &&
+ !time_after(now, fast_pool->last + HZ))
+ return;
+ } else {
+ /* CPU does not have a cycle counting register :-( */
+ if (fast_pool->count < 64)
+ return;
+ }
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
- if (!spin_trylock(&r->lock)) {
- fast_pool->count--;
+ if (!spin_trylock(&r->lock))
return;
- }
+
fast_pool->last = now;
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
@@ -874,19 +882,15 @@ void add_interrupt_randomness(int irq, int irq_flags)
spin_unlock(&r->lock);
/*
- * If we don't have a valid cycle counter, and we see
- * back-to-back timer interrupts, then skip giving credit for
- * any entropy, otherwise credit 1 bit.
+ * If we have a valid cycle counter or if the majority of
+ * interrupts collected were non-timer interrupts, then give
+ * an entropy credit of 1 bit. Yes, this is being very
+ * conservative.
*/
- credit++;
- if (cycles == 0) {
- if (irq_flags & __IRQF_TIMER) {
- if (fast_pool->last_timer_intr)
- credit--;
- fast_pool->last_timer_intr = 1;
- } else
- fast_pool->last_timer_intr = 0;
- }
+ if (cycles || (fast_pool->notimer_count >= 32))
+ credit++;
+
+ fast_pool->count = fast_pool->notimer_count = 0;
credit_entropy_bits(r, credit);
}