From 5a1b26d7c629915446222ebe77d16567c98426ff Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Thu, 31 Dec 2015 12:09:13 +0200 Subject: lcoking/barriers, arch: Use smp barriers in smp_store_release() With commit b92b8b35a2e ("locking/arch: Rename set_mb() to smp_store_mb()") it was made clear that the context of this call (and thus set_mb) is strictly for CPU ordering, as opposed to IO. As such all archs should use the smp variant of mb(), respecting the semantics and saving a mandatory barrier on UP. Signed-off-by: Davidlohr Bueso Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Heiko Carstens Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: dave@stgolabs.net Link: http://lkml.kernel.org/r/1445975631-17047-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar Reviewed-by: Paul E. McKenney --- arch/ia64/include/asm/barrier.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index df896a1c41d3..209c4b817c95 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -77,7 +77,7 @@ do { \ ___p1; \ }) -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) /* * The group barrier in front of the rsm & ssm are necessary to ensure -- cgit v1.2.3 From 53a05ac15ee04b56ce02f0f831556e2fcdcce93f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: ia64: reuse asm-generic/barrier.h On ia64 smp_rmb, smp_wmb, read_barrier_depends, smp_read_barrier_depends and smp_store_mb() match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Tony Luck Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/ia64/include/asm/barrier.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 209c4b817c95..2f933480a764 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -48,12 +48,6 @@ # define smp_mb() barrier() #endif -#define smp_rmb() smp_mb() -#define smp_wmb() smp_mb() - -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() @@ -77,12 +71,12 @@ do { \ ___p1; \ }) -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - /* * The group barrier in front of the rsm & ssm are necessary to ensure * that none of the previous instructions in the same group are * affected by the rsm/ssm. */ +#include + #endif /* _ASM_IA64_BARRIER_H */ -- cgit v1.2.3 From eebd1b927822f13429ec09d0a48fe92716b22840 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: ia64: define __smp_xxx This defines __smp_xxx barriers for ia64, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h This reduces the amount of arch-specific boiler-plate code. Signed-off-by: Michael S. Tsirkin Acked-by: Tony Luck Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/ia64/include/asm/barrier.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 2f933480a764..588f1614cafc 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -42,28 +42,24 @@ #define dma_rmb() mb() #define dma_wmb() mb() -#ifdef CONFIG_SMP -# define smp_mb() mb() -#else -# define smp_mb() barrier() -#endif +# define __smp_mb() mb() -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() /* * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no * need for asm trickery! */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ -- cgit v1.2.3