diff options
author | Eric Dumazet <edumazet@google.com> | 2017-01-20 06:34:22 -0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2017-01-20 10:06:56 -0500 |
commit | aaf0f2fa682861e47a4f6a8762d2b8a9a4a51077 (patch) | |
tree | bc57d4a655a39f065e6a606913be6a25de9ab55a /lib/percpu_counter.c | |
parent | 44b4b461a0fb407507b46ea76a71376d74de7058 (diff) | |
download | linux-aaf0f2fa682861e47a4f6a8762d2b8a9a4a51077.tar.bz2 |
percpu_counter: percpu_counter_hotcpu_callback() cleanup
In commit ebd8fef304f9 ("percpu_counter: make percpu_counters_lock
irq-safe") we disabled irqs in percpu_counter_hotcpu_callback()
We can grab every counter spinlock without having to disable
irqs again.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r-- | lib/percpu_counter.c | 5 |
1 files changed, 2 insertions, 3 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index c8cebb137076..9c21000df0b5 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -176,13 +176,12 @@ static int percpu_counter_cpu_dead(unsigned int cpu) spin_lock_irq(&percpu_counters_lock); list_for_each_entry(fbc, &percpu_counters, list) { s32 *pcount; - unsigned long flags; - raw_spin_lock_irqsave(&fbc->lock, flags); + raw_spin_lock(&fbc->lock); pcount = per_cpu_ptr(fbc->counters, cpu); fbc->count += *pcount; *pcount = 0; - raw_spin_unlock_irqrestore(&fbc->lock, flags); + raw_spin_unlock(&fbc->lock); } spin_unlock_irq(&percpu_counters_lock); #endif |