From e4f9bfb3feaeaca55cf177dadb7e3313836b10f4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 4 Feb 2014 20:36:01 +0100 Subject: ia64: Fix up smp_mb__{before,after}_clear_bit() IA64 doesn't actually have acquire/release barriers, its a lie! Add a comment explaining this and fix up the bitop barriers. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-akevfh136um9dqvb1ohm55ca@git.kernel.org Cc: Akinobu Mita Cc: Fenghua Yu Cc: Linus Torvalds Cc: Tony Luck Cc: linux-ia64@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/bitops.h | 7 ++----- arch/ia64/include/uapi/asm/cmpxchg.h | 9 +++++++++ 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index c27eccd33349..feb8117ed06a 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -65,11 +65,8 @@ __set_bit (int nr, volatile void *addr) *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); } -/* - * clear_bit() has "acquire" semantics. - */ -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() do { /* skip */; } while (0) +#define smp_mb__before_clear_bit() barrier(); +#define smp_mb__after_clear_bit() barrier(); /** * clear_bit - Clears a bit in memory diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h index 4f37dbbb8640..f35109b1d907 100644 --- a/arch/ia64/include/uapi/asm/cmpxchg.h +++ b/arch/ia64/include/uapi/asm/cmpxchg.h @@ -118,6 +118,15 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void); #define cmpxchg_rel(ptr, o, n) \ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) +/* + * Worse still - early processor implementations actually just ignored + * the acquire/release and did a full fence all the time. Unfortunately + * this meant a lot of badly written code that used .acq when they really + * wanted .rel became legacy out in the wild - so when we made a cpu + * that strictly did the .acq or .rel ... all that code started breaking - so + * we had to back-pedal and keep the "legacy" behavior of a full fence :-( + */ + /* for compatibility with other platforms: */ #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) -- cgit v1.2.3 From 0cd64efb61f1e68be26bd5121ccff3c779dc488b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,ia64: Convert smp_mb__*() ia64 atomic ops are full barriers; implement the new smp_mb__{before,after}_atomic(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-hyp7yj68cmqz1nqbfpr541ca@git.kernel.org Cc: Akinobu Mita Cc: Fenghua Yu Cc: Linus Torvalds Cc: Tony Luck Cc: Will Deacon Cc: linux-ia64@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/atomic.h | 7 +------ arch/ia64/include/asm/barrier.h | 3 +++ arch/ia64/include/asm/bitops.h | 6 ++---- 3 files changed, 6 insertions(+), 10 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 6e6fe1839f5d..0f8bf48dadf3 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -15,6 +15,7 @@ #include #include +#include #define ATOMIC_INIT(i) { (i) } @@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define atomic64_inc(v) atomic64_add(1, (v)) #define atomic64_dec(v) atomic64_sub(1, (v)) -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index d0a69aa35e27..a48957c7b445 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -55,6 +55,9 @@ #endif +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() + /* * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no * need for asm trickery! diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index feb8117ed06a..71e8145243ee 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -16,6 +16,7 @@ #include #include #include +#include /** * set_bit - Atomically set a bit in memory @@ -65,9 +66,6 @@ __set_bit (int nr, volatile void *addr) *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); } -#define smp_mb__before_clear_bit() barrier(); -#define smp_mb__after_clear_bit() barrier(); - /** * clear_bit - Clears a bit in memory * @nr: Bit to clear @@ -75,7 +73,7 @@ __set_bit (int nr, volatile void *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static __inline__ void -- cgit v1.2.3