summaryrefslogtreecommitdiffstats
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-25 16:21:48 +0200
committerIngo Molnar <mingo@elte.hu>2011-09-13 11:11:47 +0200
commitf032a450812f6c7edd532772cc7c48091bca9f27 (patch)
tree697632b583437656337cd883b1bb52bca2f6d77f /lib/percpu_counter.c
parentec484608c5885931c432e99ecfd2772288cd993c (diff)
downloadlinux-f032a450812f6c7edd532772cc7c48091bca9f27.tar.bz2
locking, percpu_counter: Annotate ::lock as raw
The percpu_counter::lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 28f2c33c6b53..f087105ed914 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
}
EXPORT_SYMBOL(percpu_counter_set);
@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) {
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
fbc->count += count;
__this_cpu_write(*fbc->counters, 0);
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
} else {
__this_cpu_write(*fbc->counters, count);
}
@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
s64 ret;
int cpu;
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
- spin_lock_init(&fbc->lock);
+ raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
fbc->counters = alloc_percpu(s32);
@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
s32 *pcount;
unsigned long flags;
- spin_lock_irqsave(&fbc->lock, flags);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
- spin_unlock_irqrestore(&fbc->lock, flags);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
mutex_unlock(&percpu_counters_lock);
#endif