summaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm/bitops.h
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2014-11-20 15:42:09 +0530
committerVineet Gupta <vgupta@synopsys.com>2015-06-25 06:00:16 +0530
commit2576c28e3f623ed401db7e6197241865328620ef (patch)
tree6bff094ce931e43c18d5887417e07c9defa0af4d /arch/arc/include/asm/bitops.h
parentd57f727264f1425a94689bafc7e99e502cb135b5 (diff)
downloadlinux-2576c28e3f623ed401db7e6197241865328620ef.tar.bz2
ARC: add smp barriers around atomics per Documentation/atomic_ops.txt
- arch_spin_lock/unlock were lacking the ACQUIRE/RELEASE barriers Since ARCv2 only provides load/load, store/store and all/all, we need the full barrier - LLOCK/SCOND based atomics, bitops, cmpxchg, which return modified values were lacking the explicit smp barriers. - Non LLOCK/SCOND varaints don't need the explicit barriers since that is implicity provided by the spin locks used to implement the critical section (the spin lock barriers in turn are also fixed in this commit as explained above Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: stable@vger.kernel.org Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include/asm/bitops.h')
-rw-r--r--arch/arc/include/asm/bitops.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 829a8a2e9704..dd03fd931bb7 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -117,6 +117,12 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
if (__builtin_constant_p(nr))
nr &= 0x1f;
+ /*
+ * Explicit full memory barrier needed before/after as
+ * LLOCK/SCOND themselves don't provide any such semantics
+ */
+ smp_mb();
+
__asm__ __volatile__(
"1: llock %0, [%2] \n"
" bset %1, %0, %3 \n"
@@ -126,6 +132,8 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
: "r"(m), "ir"(nr)
: "cc");
+ smp_mb();
+
return (old & (1 << nr)) != 0;
}
@@ -139,6 +147,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
if (__builtin_constant_p(nr))
nr &= 0x1f;
+ smp_mb();
+
__asm__ __volatile__(
"1: llock %0, [%2] \n"
" bclr %1, %0, %3 \n"
@@ -148,6 +158,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
: "r"(m), "ir"(nr)
: "cc");
+ smp_mb();
+
return (old & (1 << nr)) != 0;
}
@@ -161,6 +173,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
if (__builtin_constant_p(nr))
nr &= 0x1f;
+ smp_mb();
+
__asm__ __volatile__(
"1: llock %0, [%2] \n"
" bxor %1, %0, %3 \n"
@@ -170,6 +184,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
: "r"(m), "ir"(nr)
: "cc");
+ smp_mb();
+
return (old & (1 << nr)) != 0;
}
@@ -249,6 +265,9 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
if (__builtin_constant_p(nr))
nr &= 0x1f;
+ /*
+ * spin lock/unlock provide the needed smp_mb() before/after
+ */
bitops_lock(flags);
old = *m;