From e4f9bfb3feaeaca55cf177dadb7e3313836b10f4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 4 Feb 2014 20:36:01 +0100 Subject: ia64: Fix up smp_mb__{before,after}_clear_bit() IA64 doesn't actually have acquire/release barriers, its a lie! Add a comment explaining this and fix up the bitop barriers. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-akevfh136um9dqvb1ohm55ca@git.kernel.org Cc: Akinobu Mita Cc: Fenghua Yu Cc: Linus Torvalds Cc: Tony Luck Cc: linux-ia64@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/bitops.h | 7 ++----- arch/ia64/include/uapi/asm/cmpxchg.h | 9 +++++++++ 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index c27eccd33349..feb8117ed06a 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -65,11 +65,8 @@ __set_bit (int nr, volatile void *addr) *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); } -/* - * clear_bit() has "acquire" semantics. - */ -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() do { /* skip */; } while (0) +#define smp_mb__before_clear_bit() barrier(); +#define smp_mb__after_clear_bit() barrier(); /** * clear_bit - Clears a bit in memory diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h index 4f37dbbb8640..f35109b1d907 100644 --- a/arch/ia64/include/uapi/asm/cmpxchg.h +++ b/arch/ia64/include/uapi/asm/cmpxchg.h @@ -118,6 +118,15 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void); #define cmpxchg_rel(ptr, o, n) \ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) +/* + * Worse still - early processor implementations actually just ignored + * the acquire/release and did a full fence all the time. Unfortunately + * this meant a lot of badly written code that used .acq when they really + * wanted .rel became legacy out in the wild - so when we made a cpu + * that strictly did the .acq or .rel ... all that code started breaking - so + * we had to back-pedal and keep the "legacy" behavior of a full fence :-( + */ + /* for compatibility with other platforms: */ #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) -- cgit v1.2.3 From 2ab08ee9f0a4eba27c7c4ce0b6d5118e8a18554b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 6 Feb 2014 14:26:10 +0100 Subject: arc,hexagon: Delete asm/barrier.h Both already use asm-generic/barrier.h as per their include/asm/Kbuild. Remove the stale files. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-c7vlkshl3tblim0o8z2p70kt@git.kernel.org Cc: Linus Torvalds Cc: Richard Kuo Cc: Vineet Gupta Cc: linux-hexagon@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/arc/include/asm/barrier.h | 37 ------------------------------------- arch/hexagon/include/asm/barrier.h | 37 ------------------------------------- 2 files changed, 74 deletions(-) delete mode 100644 arch/arc/include/asm/barrier.h delete mode 100644 arch/hexagon/include/asm/barrier.h (limited to 'arch') diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h deleted file mode 100644 index c32245c3d1e9..000000000000 --- a/arch/arc/include/asm/barrier.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __ASM_BARRIER_H -#define __ASM_BARRIER_H - -#ifndef __ASSEMBLY__ - -/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */ -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() mb() -#define wmb() mb() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) -#define read_barrier_depends() mb() - -/* TODO-vineetg verify the correctness of macros here */ -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#endif - -#define smp_read_barrier_depends() do { } while (0) - -#endif - -#endif diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h deleted file mode 100644 index 4e863daea25b..000000000000 --- a/arch/hexagon/include/asm/barrier.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Memory barrier definitions for the Hexagon architecture - * - * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - -#ifndef _ASM_BARRIER_H -#define _ASM_BARRIER_H - -#define rmb() barrier() -#define read_barrier_depends() barrier() -#define wmb() barrier() -#define mb() barrier() -#define smp_rmb() barrier() -#define smp_read_barrier_depends() barrier() -#define smp_wmb() barrier() -#define smp_mb() barrier() - -/* Set a value and use a memory barrier. Used by the scheduler somewhere. */ -#define set_mb(var, value) \ - do { var = value; mb(); } while (0) - -#endif /* _ASM_BARRIER_H */ -- cgit v1.2.3 From 6ae11028a62b2f82ee283392a84f33da599a81e7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Mar 2014 17:11:00 +0100 Subject: arch,alpha: Convert smp_mb__*() to the asm-generic primitives The Alpha ll/sc primitives do not imply any sort of barrier; therefore the smp_mb__{before,after} should be a full barrier. This is the default from asm-generic/barrier.h and therefore just remove the current definitions. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-iacwfd15lq3ta2v7jut747r7@git.kernel.org Cc: Ivan Kokshaysky Cc: Linus Torvalds Cc: Matt Turner Cc: Richard Henderson Cc: linux-alpha@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/alpha/include/asm/atomic.h | 5 ----- arch/alpha/include/asm/bitops.h | 3 --- 2 files changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 78b03ef39f6f..ed60a1ee1ed3 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -292,9 +292,4 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) #define atomic_dec(v) atomic_sub(1,(v)) #define atomic64_dec(v) atomic64_sub(1,(v)) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #endif /* _ALPHA_ATOMIC_H */ diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index a19ba5efea4c..4bdfbd444e63 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -53,9 +53,6 @@ __set_bit(unsigned long nr, volatile void * addr) *m |= 1 << (nr & 31); } -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() - static inline void clear_bit(unsigned long nr, volatile void * addr) { -- cgit v1.2.3 From d594ffa94b7efdd8c6da5ee48aea8ae229809b55 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Mar 2014 17:11:00 +0100 Subject: arch,arc: Convert smp_mb__*() The arc mb() implementation is a compiler barrier(), therefore it all doesn't matter one way or the other. Simply remove the existing definitions and use whatever is generated by the defaults. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-ua48a59wri3ybz1rz8i7uvbr@git.kernel.org Cc: Linus Torvalds Cc: Vineet Gupta Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/arc/include/asm/atomic.h | 5 ----- arch/arc/include/asm/bitops.h | 5 +---- 2 files changed, 1 insertion(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 03e494f695d1..83f03ca6caf6 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -190,11 +190,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #endif /* !CONFIG_ARC_HAS_LLSC */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - /** * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 647a83a8e756..ebc0cf3164dc 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -19,6 +19,7 @@ #include #include +#include /* * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns. @@ -496,10 +497,6 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word) */ #define ffz(x) __ffs(~(x)) -/* TODO does this affect uni-processor code */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - #include #include #include -- cgit v1.2.3 From 030d0178bdbd237c1f0577f03bbc1d7140a75044 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Mar 2014 17:11:00 +0100 Subject: arch,arm: Convert smp_mb__*() ARM uses ll/sc primitives that do not imply barriers for all regular atomic ops, therefore smp_mb__{before,after} need be a full barrier. Since ARM doesn't use asm-generic/barrier.h include the required definitions in its asm/barrier.h Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-yijo7sglsl7uusbp13upcuvo@git.kernel.org Cc: Albin Tonnerre Cc: Catalin Marinas Cc: Chen Gang Cc: Linus Torvalds Cc: Nicolas Pitre Cc: Russell King Cc: Victor Kamensky Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/arm/include/asm/atomic.h | 5 ----- arch/arm/include/asm/barrier.h | 3 +++ arch/arm/include/asm/bitops.h | 4 +--- 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9a92fd7864a8..3040359094d9 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -241,11 +241,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct { long long counter; diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 2f59f7443396..c6a3e73a6e24 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -79,5 +79,8 @@ do { \ #define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_BARRIER_H */ diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index b2e298a90d76..56380995f4c3 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -25,9 +25,7 @@ #include #include - -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() +#include /* * These functions are the basis of our bit ops. -- cgit v1.2.3 From 8715466b602729061394df18864ea64b97951589 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:37 +0100 Subject: arch,arm64: Convert smp_mb__*() AARGH64 uses ll/sc primitives that do not imply any barriers for the normal atomics, therefore smp_mb__{before,after} should be a full barrier. Since AARGH64 doesn't use asm-generic/barrier.h, add the required definitions to its asm/barrier.h. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-8p5iclqgy78al33kck3ht7nr@git.kernel.org Cc: Catalin Marinas Cc: Chen Gang Cc: Linus Torvalds Cc: Russell King Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/arm64/include/asm/atomic.h | 5 ----- arch/arm64/include/asm/barrier.h | 3 +++ arch/arm64/include/asm/bitops.h | 9 --------- 3 files changed, 3 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 0237f0867e37..57e8cb49824c 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -152,11 +152,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - /* * 64-bit atomic operations. */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 66eb7648043b..48b9e704af7c 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -98,6 +98,9 @@ do { \ #define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define nop() asm volatile("nop"); +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + #endif /* __ASSEMBLY__ */ #endif /* __ASM_BARRIER_H */ diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h index aa5b59d6ba43..9c19594ce7cb 100644 --- a/arch/arm64/include/asm/bitops.h +++ b/arch/arm64/include/asm/bitops.h @@ -17,17 +17,8 @@ #define __ASM_BITOPS_H #include - #include -/* - * clear_bit may not imply a memory barrier - */ -#ifndef smp_mb__before_clear_bit -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() -#endif - #ifndef _LINUX_BITOPS_H #error only can be included directly #endif -- cgit v1.2.3 From 710adaa913169d7183cdf0de41c2a349101ff615 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:37 +0100 Subject: arch,avr32: Convert smp_mb__*() AVR32's mb() implementation is a compiler barrier(), therefore it all doesn't matter, fully rely on whatever asm-generic/barrier.h generates. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-8gow97a7mapmnec0pvf729pj@git.kernel.org Cc: Haavard Skinnemoen Cc: Hans-Christian Egtvedt Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/avr32/include/asm/atomic.h | 5 ----- arch/avr32/include/asm/bitops.h | 9 ++------- 2 files changed, 2 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 61407279208a..0780f3f2415b 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -183,9 +183,4 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v) #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __ASM_AVR32_ATOMIC_H */ diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h index ebe7ad3f490b..910d5374ce59 100644 --- a/arch/avr32/include/asm/bitops.h +++ b/arch/avr32/include/asm/bitops.h @@ -13,12 +13,7 @@ #endif #include - -/* - * clear_bit() doesn't provide any barrier for the compiler - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() +#include /* * set_bit - Atomically set a bit in memory @@ -67,7 +62,7 @@ static inline void set_bit(int nr, volatile void * addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(int nr, volatile void * addr) -- cgit v1.2.3 From b7bb7d9b28f6278625fbe7892b8c7799bb12a26a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:37 +0100 Subject: arch,blackfin: Convert smp_mb__*() Blackfin's atomic primitives do not imply a full barrier as whitnessed from its SMP smp_mb__{before,after}_clear_bit() implementations. However since !SMP smp_mb() reduces to barrier() remove everything and rely on the asm-generic/barrier.h implentation. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-1widdkdsb3c1titq8jez6g3g@git.kernel.org Cc: Geert Uytterhoeven Cc: Linus Torvalds Cc: Mathieu Desnoyers Cc: Steven Miao Cc: adi-buildroot-devel@lists.sourceforge.net Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/blackfin/include/asm/barrier.h | 3 +++ arch/blackfin/include/asm/bitops.h | 14 ++------------ 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index 19283a16ac08..420006877998 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h @@ -27,6 +27,9 @@ #endif /* !CONFIG_SMP */ +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() + #include #endif /* _BLACKFIN_BARRIER_H */ diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index 0ca40dd44724..b298b654a26f 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h @@ -27,21 +27,17 @@ #include +#include + #ifndef CONFIG_SMP #include - /* * clear_bit may not imply a memory barrier */ -#ifndef smp_mb__before_clear_bit -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() -#endif #include #include #else -#include #include /* swab32 */ #include @@ -101,12 +97,6 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return __raw_bit_test_toggle_asm(a, nr & 0x1f); } -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - #define test_bit __skip_test_bit #include #undef test_bit -- cgit v1.2.3 From a8ec1516a74554992605f230859936d5d79c27ed Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:37 +0100 Subject: arch,c6x: Convert smp_mb__*() c6x doesn't have a barrier.h and completely relies on asm-generic/barrier.h. Therefore its smp_mb() is barrier() and we can use the default versions that are smp_mb(). Signed-off-by: Peter Zijlstra Acked-by: Mark Salter Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-kl53k3pyj0rbd80jq8ralpf3@git.kernel.org Cc: Aurelien Jacquiot Cc: Linus Torvalds Cc: Mark Salter Cc: linux-c6x-dev@linux-c6x.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/c6x/include/asm/bitops.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/c6x/include/asm/bitops.h b/arch/c6x/include/asm/bitops.h index 0bec7e5036a8..f0ab012401b6 100644 --- a/arch/c6x/include/asm/bitops.h +++ b/arch/c6x/include/asm/bitops.h @@ -14,14 +14,8 @@ #ifdef __KERNEL__ #include - #include - -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() +#include /* * We are lucky, DSP is perfect for bitops: do it in 3 cycles -- cgit v1.2.3 From 17b40213ab2b091fd5d2a9337eae62312608ad3d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:37 +0100 Subject: arch,cris: Convert smp_mb__*() Cris fully relies on asm-generic/barrier.h, therefore its smp_mb() is barrier(), thus we can use the default implementation that uses smp_mb(). (Include asm/system.h and asm/barrier.h to avoid header dependency hell.) Signed-off-by: Peter Zijlstra Acked-by: Jesper Nilsson Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-wvewbe8os3s1e4pt1cdotuee@git.kernel.org Cc: Andrew Morton Cc: Geert Uytterhoeven Cc: Jesper Nilsson Cc: Linus Torvalds Cc: Mikael Starvik Cc: linux-cris-kernel@axis.com Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/cris/include/asm/atomic.h | 8 ++------ arch/cris/include/asm/bitops.h | 9 ++------- 2 files changed, 4 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index 1056a5dfe04f..aa429baebaf9 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -7,6 +7,8 @@ #include #include #include +#include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -151,10 +153,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return ret; } -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h index 053c17b36559..bd49a546f4f5 100644 --- a/arch/cris/include/asm/bitops.h +++ b/arch/cris/include/asm/bitops.h @@ -21,6 +21,7 @@ #include #include #include +#include /* * set_bit - Atomically set a bit in memory @@ -42,7 +43,7 @@ * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ @@ -84,12 +85,6 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) return retval; } -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - /** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear -- cgit v1.2.3 From d038c0e8380fa8cae049bee8d81a5672cd159013 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:37 +0100 Subject: arch,frv: Convert smp_mb__*() Because: arch/frv/include/asm/smp.h:#error SMP not supported smp_mb() is barrier() and we can use the default implementation that uses smp_mb(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-n296g51yzdu5ru1vp7mccxmf@git.kernel.org Cc: David Howells Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/frv/include/asm/atomic.h | 7 +------ arch/frv/include/asm/bitops.h | 6 ------ 2 files changed, 1 insertion(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index b86329d0e316..f6c3a1690101 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -17,6 +17,7 @@ #include #include #include +#include #ifdef CONFIG_SMP #error not SMP safe @@ -29,12 +30,6 @@ * We do not have SMP systems, so we don't have to deal with that. */ -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = (i)) diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h index 57bf85db893f..96de220ef131 100644 --- a/arch/frv/include/asm/bitops.h +++ b/arch/frv/include/asm/bitops.h @@ -25,12 +25,6 @@ #include -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS static inline unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) -- cgit v1.2.3 From 94cf42f823bc904305b0ee93a09bcd51ba380497 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,hexagon: Convert smp_mb__*() Hexagon uses asm-gemeric/barrier.h and its smp_mb() is barrier(). Therefore we can use the default implementation that uses smp_mb(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-87irqrrbgizeojjfdqhypud3@git.kernel.org Cc: Linus Torvalds Cc: Richard Kuo Cc: Vineet Gupta Cc: linux-hexagon@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/hexagon/include/asm/atomic.h | 6 +----- arch/hexagon/include/asm/bitops.h | 4 +--- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 17dc63780c06..de916b11bff5 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -24,6 +24,7 @@ #include #include +#include #define ATOMIC_INIT(i) { (i) } @@ -176,9 +177,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 9b1e4afbab3c..5e4a59b3ec1b 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -25,12 +25,10 @@ #include #include #include +#include #ifdef __KERNEL__ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - /* * The offset calculations for these are based on BITS_PER_LONG == 32 * (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access), -- cgit v1.2.3 From 0cd64efb61f1e68be26bd5121ccff3c779dc488b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,ia64: Convert smp_mb__*() ia64 atomic ops are full barriers; implement the new smp_mb__{before,after}_atomic(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-hyp7yj68cmqz1nqbfpr541ca@git.kernel.org Cc: Akinobu Mita Cc: Fenghua Yu Cc: Linus Torvalds Cc: Tony Luck Cc: Will Deacon Cc: linux-ia64@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/atomic.h | 7 +------ arch/ia64/include/asm/barrier.h | 3 +++ arch/ia64/include/asm/bitops.h | 6 ++---- 3 files changed, 6 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 6e6fe1839f5d..0f8bf48dadf3 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -15,6 +15,7 @@ #include #include +#include #define ATOMIC_INIT(i) { (i) } @@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define atomic64_inc(v) atomic64_add(1, (v)) #define atomic64_dec(v) atomic64_sub(1, (v)) -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index d0a69aa35e27..a48957c7b445 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -55,6 +55,9 @@ #endif +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() + /* * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no * need for asm trickery! diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index feb8117ed06a..71e8145243ee 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -16,6 +16,7 @@ #include #include #include +#include /** * set_bit - Atomically set a bit in memory @@ -65,9 +66,6 @@ __set_bit (int nr, volatile void *addr) *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); } -#define smp_mb__before_clear_bit() barrier(); -#define smp_mb__after_clear_bit() barrier(); - /** * clear_bit - Clears a bit in memory * @nr: Bit to clear @@ -75,7 +73,7 @@ __set_bit (int nr, volatile void *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static __inline__ void -- cgit v1.2.3 From 89607d5e2928d49bb64669d6d1e30e933a33f817 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,m32r: Convert smp_mb__*() M32r uses asm-generic/barrier.h and its smp_mb() is barrier(); therefore we can use the generic versions which default to smp_mb(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-wh6xljltyvmpy9t0bc80k1fy@git.kernel.org Cc: Hirokazu Takata Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Cc: linux-m32r-ja@ml.linux-m32r.org Cc: linux-m32r@ml.linux-m32r.org Signed-off-by: Ingo Molnar --- arch/m32r/include/asm/atomic.h | 7 +------ arch/m32r/include/asm/bitops.h | 6 ++---- 2 files changed, 3 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 0d81697c326c..8ad0ed4182a5 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -13,6 +13,7 @@ #include #include #include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) local_irq_restore(flags); } -/* Atomic operations are already serializing on m32r */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h index d3dea9ac7d4e..86ba2b42a6cf 100644 --- a/arch/m32r/include/asm/bitops.h +++ b/arch/m32r/include/asm/bitops.h @@ -21,6 +21,7 @@ #include #include #include +#include /* * These have to be done with inline assembly: that way the bit-setting @@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static __inline__ void clear_bit(int nr, volatile void * addr) @@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) local_irq_restore(flags); } -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - /** * change_bit - Toggle a bit in memory * @nr: Bit to clear -- cgit v1.2.3 From 2db56e8606016e33903c64feaed989ffecd66a1b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,m68k: Convert smp_mb__*() m68k uses asm-generic/barrier.h and its smp_mb() is barrier(), therefore we can use the generic versions that use smp_mb(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-s5dvosrb7qhvpmtaffwfn0zg@git.kernel.org Cc: Geert Uytterhoeven Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Cc: linux-m68k@lists.linux-m68k.org Signed-off-by: Ingo Molnar --- arch/m68k/include/asm/atomic.h | 8 +------- arch/m68k/include/asm/bitops.h | 7 +------ 2 files changed, 2 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index f4e32de263a7..55695212a2ae 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -4,6 +4,7 @@ #include #include #include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -209,11 +210,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } - -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __ARCH_M68K_ATOMIC __ */ diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index c6baa913592a..b4a9b0d5928d 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -13,6 +13,7 @@ #endif #include +#include /* * Bit access functions vary across the ColdFire and 68k families. @@ -67,12 +68,6 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) #define __set_bit(nr, vaddr) set_bit(nr, vaddr) -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) { char *p = (char *)vaddr + (nr ^ 31) / 8; -- cgit v1.2.3 From 40074dece684fc61ab72cfc1689d564cba1c5f64 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,metag: Convert smp_mb__*() Implement the new barriers; as per the old versions the metag atomic imply a full barrier. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-dqnyo215kq38wi4xcxnbpjw3@git.kernel.org Cc: James Hogan Cc: Linus Torvalds Cc: Will Deacon Cc: linux-kernel@vger.kernel.org Cc: linux-metag@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/metag/include/asm/atomic.h | 6 +----- arch/metag/include/asm/barrier.h | 3 +++ arch/metag/include/asm/bitops.h | 6 ------ 3 files changed, 4 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h index 307ecd2bd9a1..470e365f04ea 100644 --- a/arch/metag/include/asm/atomic.h +++ b/arch/metag/include/asm/atomic.h @@ -4,6 +4,7 @@ #include #include #include +#include #if defined(CONFIG_METAG_ATOMICITY_IRQSOFF) /* The simple UP case. */ @@ -39,11 +40,6 @@ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index 5d6b4b407dda..d1768c6f5b18 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -97,4 +97,7 @@ do { \ ___p1; \ }) +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() + #endif /* _ASM_METAG_BARRIER_H */ diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h index c0d0df0d1378..2671134ee745 100644 --- a/arch/metag/include/asm/bitops.h +++ b/arch/metag/include/asm/bitops.h @@ -5,12 +5,6 @@ #include #include -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - #ifdef CONFIG_SMP /* * These functions are the basis of our bit ops. -- cgit v1.2.3 From 91bbefe6b0fcd2968c34a5a566bda870477afc82 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,mips: Convert smp_mb__*() MIPS is interesting and has hardware variants that reorder over ll/sc as well as those that do not. Implement the 2 new barrier functions as per the old barriers. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-9ph49jbae3hol9v721sbc2g6@git.kernel.org Cc: Linus Torvalds Cc: Maciej W. Rozycki" Cc: Ralf Baechle Cc: Will Deacon Cc: linux-kernel@vger.kernel.org Cc: linux-mips@linux-mips.org Signed-off-by: Ingo Molnar --- arch/mips/include/asm/atomic.h | 9 --------- arch/mips/include/asm/barrier.h | 3 +++ arch/mips/include/asm/bitops.h | 11 ++--------- arch/mips/kernel/irq.c | 4 ++-- 4 files changed, 7 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index e8eb3d53a241..37b2befe651a 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -761,13 +761,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #endif /* CONFIG_64BIT */ -/* - * atomic*_return operations are serializing but not the non-*_return - * versions. - */ -#define smp_mb__before_atomic_dec() smp_mb__before_llsc() -#define smp_mb__after_atomic_dec() smp_llsc_mb() -#define smp_mb__before_atomic_inc() smp_mb__before_llsc() -#define smp_mb__after_atomic_inc() smp_llsc_mb() - #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index e1aa4e4c2984..d0101dd0575e 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -195,4 +195,7 @@ do { \ ___p1; \ }) +#define smp_mb__before_atomic() smp_mb__before_llsc() +#define smp_mb__after_atomic() smp_llsc_mb() + #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 6a65d49e2c0d..7c8816f7b7c4 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -37,13 +37,6 @@ #define __EXT "dext " #endif -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() smp_mb__before_llsc() -#define smp_mb__after_clear_bit() smp_llsc_mb() - - /* * These are the "slower" versions of the functions and are in bitops.c. * These functions call raw_local_irq_{save,restore}(). @@ -120,7 +113,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) @@ -175,7 +168,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) */ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(nr, addr); } diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index d1fea7a054be..1818da4dbb85 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -62,9 +62,9 @@ void __init alloc_legacy_irqno(void) void free_irqno(unsigned int irq) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(irq, irq_map); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } /* -- cgit v1.2.3 From 9424cdf0fc7f768f20daef77980da9617a76679b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,mn10300: Convert smp_mb__*() mn10300 fully relies on asm-generic/barrier.h and therefore its smp_mb() is barrier(). We can use the default implementation. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-wotyeoj99h1dpojjeest2jbk@git.kernel.org Cc: David Howells Cc: Koichi Yasutake Cc: Linus Torvalds Cc: linux-am33-list@redhat.com Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/mn10300/include/asm/atomic.h | 7 +------ arch/mn10300/include/asm/bitops.h | 4 +--- arch/mn10300/mm/tlb-smp.c | 4 ++-- 3 files changed, 4 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 975e1841ca64..cadeb1e2cdfc 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -13,6 +13,7 @@ #include #include +#include #ifndef CONFIG_SMP #include @@ -234,12 +235,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) #endif } -/* Atomic operations are already serializing on MN10300??? */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __KERNEL__ */ #endif /* CONFIG_SMP */ #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h index 596bb2706d81..fe6f8e2c3617 100644 --- a/arch/mn10300/include/asm/bitops.h +++ b/arch/mn10300/include/asm/bitops.h @@ -18,9 +18,7 @@ #define __ASM_BITOPS_H #include - -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() +#include /* * set bit diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index 3e57faf04083..e5d0ef722bfa 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c @@ -78,9 +78,9 @@ void smp_flush_tlb(void *unused) else local_flush_tlb_page(flush_mm, flush_va); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); cpumask_clear_cpu(cpu_id, &flush_cpumask); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); out: put_cpu(); } -- cgit v1.2.3 From 0f5c6f9e18e14b4ebd83dd08e81ca8143d43de28 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,openrisc: Convert smp_mb__*() Openrisc fully relies on asm-generic/barrier.h and therefore its smp_mb() is barrier(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-sxgxgqag9tond4kji07d22oh@git.kernel.org Cc: Jonas Bonn Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Cc: linux@lists.openrisc.net Signed-off-by: Ingo Molnar --- arch/openrisc/include/asm/bitops.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h index 2c64f2228dc7..3003cdad561b 100644 --- a/arch/openrisc/include/asm/bitops.h +++ b/arch/openrisc/include/asm/bitops.h @@ -27,14 +27,7 @@ #include #include - -/* - * clear_bit may not imply a memory barrier - */ -#ifndef smp_mb__before_clear_bit -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() -#endif +#include #include #include -- cgit v1.2.3 From e4a65e9d395feee195f1219d708e9eb2a2d4b583 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:36 +0100 Subject: arch,parisc: Convert smp_mb__*() parisc fully relies on asm-generic/barrier.h, therefore its smp_mb() is barrier and the default implementation suffices. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-mxs4aubiyesi79v8xx53093q@git.kernel.org Cc: Helge Deller Cc: James E.J. Bottomley Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Cc: linux-parisc@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/parisc/include/asm/atomic.h | 6 +----- arch/parisc/include/asm/bitops.h | 4 +--- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 472886ceab1d..0be2db2c7d44 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -7,6 +7,7 @@ #include #include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -143,11 +144,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define ATOMIC_INIT(i) { (i) } -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #ifdef CONFIG_64BIT #define ATOMIC64_INIT(i) { (i) } diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h index 8c9b631d2a78..3f9406d9b9d6 100644 --- a/arch/parisc/include/asm/bitops.h +++ b/arch/parisc/include/asm/bitops.h @@ -8,6 +8,7 @@ #include #include /* for BITS_PER_LONG/SHIFT_PER_LONG */ #include +#include #include /* @@ -19,9 +20,6 @@ #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() - /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion * on use of volatile and __*_bit() (set/clear/change): * *_bit() want use of volatile. -- cgit v1.2.3 From c645073f7e4f073e1ebcd0f0d91652c4468b8364 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:35 +0100 Subject: arch,powerpc: Convert smp_mb__*() Powerpc allows reordering over its ll/sc implementation. Implement the two new barriers as appropriate. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-gg2ffgq32sjgy9b8lj6m3hsc@git.kernel.org Cc: Benjamin Herrenschmidt Cc: Linus Torvalds Cc: Mahesh Salgaonkar Cc: Paul Gortmaker Cc: Paul Mackerras Cc: Will Deacon Cc: linux-kernel@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/atomic.h | 6 +----- arch/powerpc/include/asm/barrier.h | 3 +++ arch/powerpc/include/asm/bitops.h | 6 +----- arch/powerpc/kernel/crash.c | 2 +- 4 files changed, 6 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index e3b1d41c89be..28992d012926 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -8,6 +8,7 @@ #ifdef __KERNEL__ #include #include +#include #define ATOMIC_INIT(i) { (i) } @@ -270,11 +271,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) } #define atomic_dec_if_positive atomic_dec_if_positive -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #ifdef __powerpc64__ #define ATOMIC64_INIT(i) { (i) } diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index f89da808ce31..bab79a110c7b 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -84,4 +84,7 @@ do { \ ___p1; \ }) +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index a5e9a7d494d8..bd3bd573d0ae 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -51,11 +51,7 @@ #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) -/* - * clear_bit doesn't imply a memory barrier - */ -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() +#include /* Macro for generating the ***_bits() functions */ #define DEFINE_BITOP(fn, op, prefix) \ diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 18d7c80ddeb9..51dbace3269b 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -81,7 +81,7 @@ void crash_ipi_callback(struct pt_regs *regs) } atomic_inc(&cpus_in_crash); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* * Starting the kdump boot. -- cgit v1.2.3 From 0e530747c69f1e191f101a925bb4051894e5c7b0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:35 +0100 Subject: arch,s390: Convert smp_mb__*() As per the existing implementation; implement the new one using smp_mb(). AFAICT the s390 compare-and-swap does imply a barrier, however there are some immediate ops that seem to be singly-copy atomic and do not imply a barrier. One such is the "ni" op (which would be and-immediate) which is used for the constant clear_bit implementation. Therefore s390 needs full barriers for the {before,after} atomic ops. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-kme5dz5hcobpnufnnkh1ech2@git.kernel.org Cc: Chen Gang Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Will Deacon Cc: linux390@de.ibm.com Cc: linux-kernel@vger.kernel.org Cc: linux-s390@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/s390/include/asm/atomic.h | 5 ----- arch/s390/include/asm/barrier.h | 5 +++-- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 1d4706114a45..fa934fe080c1 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -412,9 +412,4 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 578680f6207a..19ff956b752b 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -27,8 +27,9 @@ #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() + +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() #define set_mb(var, value) do { var = value; mb(); } while (0) -- cgit v1.2.3 From 57aa6a76864dfbb5450728199c37c02f401586ec Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:35 +0100 Subject: arch,score: Convert smp_mb__*() score fully relies on asm-generic/barrier.h, so it can use its default implementation. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Acked-by: Lennox Wu Link: http://lkml.kernel.org/n/tip-4mv9svf28lnotjpfuza8urh8@git.kernel.org Cc: Chen Liqin Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/score/include/asm/bitops.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/score/include/asm/bitops.h b/arch/score/include/asm/bitops.h index a304096b1894..c1bf8d6d0fb0 100644 --- a/arch/score/include/asm/bitops.h +++ b/arch/score/include/asm/bitops.h @@ -2,12 +2,7 @@ #define _ASM_SCORE_BITOPS_H #include /* swab32 */ - -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() +#include #include #include -- cgit v1.2.3 From 603228bcb8da90f8f8a5cdd8de74178f6c3e7e13 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:35 +0100 Subject: arch,sh: Convert smp_mb__*() SH can use the asm-generic/barrier.h implementation since that uses smp_mb(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-2z962by2ppzcd984ybw2mwdw@git.kernel.org Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Cc: linux-sh@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/sh/include/asm/atomic.h | 6 +----- arch/sh/include/asm/bitops.h | 7 +------ 2 files changed, 2 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index f4c1c20bcdf6..f57b8a6743b3 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -10,6 +10,7 @@ #include #include #include +#include #define ATOMIC_INIT(i) { (i) } @@ -62,9 +63,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return c; } -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #endif /* __ASM_SH_ATOMIC_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index ea8706d94f08..fc8e652cf173 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -9,6 +9,7 @@ /* For __swab32 */ #include +#include #ifdef CONFIG_GUSA_RB #include @@ -22,12 +23,6 @@ #include #endif -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() - #ifdef CONFIG_SUPERH32 static inline unsigned long ffz(unsigned long word) { -- cgit v1.2.3 From 56d3648948c202e8b89cd786a004c451a3eb264f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:35 +0100 Subject: arch,sparc: Convert smp_mb__*() sparc32: fully relies on asm-generic/barrier.h and thus can use its implementation. sparc64: is strongly ordered and its atomic ops imply a full barrier, implement the new primitives using barrier(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Acked-by: David S. Miller Link: http://lkml.kernel.org/n/tip-2cla9ubpd8chrntnm7e4zdt4@git.kernel.org Cc: Linus Torvalds Cc: Will Deacon Cc: linux-kernel@vger.kernel.org Cc: sparclinux@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/sparc/include/asm/atomic_32.h | 7 +------ arch/sparc/include/asm/atomic_64.h | 7 +------ arch/sparc/include/asm/barrier_64.h | 3 +++ arch/sparc/include/asm/bitops_32.h | 3 --- arch/sparc/include/asm/bitops_64.h | 4 +--- 5 files changed, 6 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 905832aa9e9e..f08fe51b264d 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -14,6 +14,7 @@ #include #include +#include #include @@ -52,10 +53,4 @@ extern void atomic_set(atomic_t *, int); #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index be56a244c9cf..8b2f1bde2889 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -9,6 +9,7 @@ #include #include +#include #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } @@ -108,10 +109,4 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) extern long atomic64_dec_if_positive(atomic64_t *v); -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index b5aad964558e..305dcc3dc721 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -68,4 +68,7 @@ do { \ ___p1; \ }) +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() + #endif /* !(__SPARC64_BARRIER_H) */ diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h index 25a676653d45..88c9a962502c 100644 --- a/arch/sparc/include/asm/bitops_32.h +++ b/arch/sparc/include/asm/bitops_32.h @@ -90,9 +90,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) #include -#define smp_mb__before_clear_bit() do { } while(0) -#define smp_mb__after_clear_bit() do { } while(0) - #include #include #include diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h index 29011cc0e4be..f1a051ca301a 100644 --- a/arch/sparc/include/asm/bitops_64.h +++ b/arch/sparc/include/asm/bitops_64.h @@ -13,6 +13,7 @@ #include #include +#include extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); @@ -23,9 +24,6 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr); #include -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - #include #include #include -- cgit v1.2.3 From ce3609f93445846f7b5a5b4bacb236a9bdc35216 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:35 +0100 Subject: arch,tile: Convert smp_mb__*() Implement the new smp_mb__* ops as per the old ones. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Acked-by: Chris Metcalf Link: http://lkml.kernel.org/n/tip-euuabnf5a3u23fy4fq8m3jcg@git.kernel.org Cc: Akinobu Mita Cc: Chen Gang Cc: Geert Uytterhoeven Cc: Linus Torvalds Cc: Mathieu Desnoyers Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/tile/include/asm/atomic_32.h | 10 ---------- arch/tile/include/asm/atomic_64.h | 6 ------ arch/tile/include/asm/barrier.h | 14 ++++++++++++++ arch/tile/include/asm/bitops.h | 1 + arch/tile/include/asm/bitops_32.h | 8 ++------ arch/tile/include/asm/bitops_64.h | 4 ---- 6 files changed, 17 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 1ad4a1f7d42b..1b109fad9fff 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -169,16 +169,6 @@ static inline void atomic64_set(atomic64_t *v, long long n) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) -/* - * We need to barrier before modifying the word, since the _atomic_xxx() - * routines just tns the lock and then read/modify/write of the word. - * But after the word is updated, the routine issues an "mf" before returning, - * and since it's a function call, we don't even need a compiler barrier. - */ -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_dec() do { } while (0) -#define smp_mb__after_atomic_inc() do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index ad220eed05fc..7b11c5fadd42 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -105,12 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -/* Atomic dec and inc don't implement barrier, so provide them if needed. */ -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - /* Define this to indicate that cmpxchg is an efficient operation. */ #define __HAVE_ARCH_CMPXCHG diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index b5a05d050a8f..96a42ae79f4d 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -72,6 +72,20 @@ mb_incoherent(void) #define mb() fast_mb() #define iob() fast_iob() +#ifndef __tilegx__ /* 32 bit */ +/* + * We need to barrier before modifying the word, since the _atomic_xxx() + * routines just tns the lock and then read/modify/write of the word. + * But after the word is updated, the routine issues an "mf" before returning, + * and since it's a function call, we don't even need a compiler barrier. + */ +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() do { } while (0) +#else /* 64 bit */ +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif + #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index d5a206865036..20caa346ac06 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -17,6 +17,7 @@ #define _ASM_TILE_BITOPS_H #include +#include #ifndef _LINUX_BITOPS_H #error only can be included directly diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 386865ad2f55..bbf7b666f21d 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -49,8 +49,8 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr) * restricted to acting on a single-word quantity. * * clear_bit() may not contain a memory barrier, so if it is used for - * locking purposes, you should call smp_mb__before_clear_bit() and/or - * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. + * locking purposes, you should call smp_mb__before_atomic() and/or + * smp_mb__after_atomic() to ensure changes are visible on other cpus. */ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) { @@ -121,10 +121,6 @@ static inline int test_and_change_bit(unsigned nr, return (_atomic_xor(addr, mask) & mask) != 0; } -/* See discussion at smp_mb__before_atomic_dec() in . */ -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() do {} while (0) - #include #endif /* _ASM_TILE_BITOPS_32_H */ diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index ad34cd056085..bb1a29221fcd 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h @@ -32,10 +32,6 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask); } -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() - - static inline void change_bit(unsigned nr, volatile unsigned long *addr) { unsigned long mask = (1UL << (nr % BITS_PER_LONG)); -- cgit v1.2.3 From d00a569284b1340c16fe2c148099e077ea09ebc9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:35 +0100 Subject: arch,x86: Convert smp_mb__*() x86 is strongly ordered and all its atomic ops imply a full barrier. Implement the two new primitives as the old ones were. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-knswsr5mldkr0w1lrdxvc81w@git.kernel.org Cc: Dave Jones Cc: Jesse Brandeburg Cc: Linus Torvalds Cc: Michel Lespinasse Cc: Will Deacon Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic.h | 7 +------ arch/x86/include/asm/barrier.h | 4 ++++ arch/x86/include/asm/bitops.h | 6 ++---- arch/x86/include/asm/sync_bitops.h | 2 +- arch/x86/kernel/apic/hw_nmi.c | 2 +- 5 files changed, 9 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index b17f4f48ecd7..6dd1c7dd0473 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -243,12 +244,6 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) : : "r" ((unsigned)(mask)), "m" (*(addr)) \ : "memory") -/* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #ifdef CONFIG_X86_32 # include #else diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 69bbb4845020..5c7198cca5ed 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -137,6 +137,10 @@ do { \ #endif +/* Atomic operations are already serializing on x86 */ +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() + /* * Stop RDTSC speculation. This is needed when you need to use RDTSC * (or get_cycles or vread that possibly accesses the TSC) in a defined diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 9fc1af74dc83..afcd35d331de 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -15,6 +15,7 @@ #include #include #include +#include #if BITS_PER_LONG == 32 # define _BITOPS_LONG_SHIFT 5 @@ -102,7 +103,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static __always_inline void @@ -156,9 +157,6 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) __clear_bit(nr, addr); } -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - /** * __change_bit - Toggle a bit in memory * @nr: the bit to change diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h index 05af3b31d522..f28a24b51dc7 100644 --- a/arch/x86/include/asm/sync_bitops.h +++ b/arch/x86/include/asm/sync_bitops.h @@ -41,7 +41,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr) * * sync_clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static inline void sync_clear_bit(long nr, volatile unsigned long *addr) diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index a698d7165c96..eab67047dec3 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -57,7 +57,7 @@ void arch_trigger_all_cpu_backtrace(void) } clear_bit(0, &backtrace_flag); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static int __kprobes -- cgit v1.2.3 From 09a01c0ccb1837abb28afcfdd668fa0dfabed928 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 Mar 2014 19:00:35 +0100 Subject: arch,xtensa: Convert smp_mb__*() Xtensa SMP has compare-and-swap which is fully serializing, therefore its exising smp_mb__{before,after}_clear_bit() appear unduly heavy. Implement the new barriers as barrier(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-e9rqjxr1m1ejsob9p433kmji@git.kernel.org Cc: Chris Zankel Cc: Geert Uytterhoeven Cc: Linus Torvalds Cc: Mathieu Desnoyers Cc: Max Filippov Cc: linux-kernel@vger.kernel.org Cc: linux-xtensa@linux-xtensa.org Signed-off-by: Ingo Molnar --- arch/xtensa/include/asm/atomic.h | 7 +------ arch/xtensa/include/asm/barrier.h | 3 +++ arch/xtensa/include/asm/bitops.h | 4 +--- 3 files changed, 5 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index e7fb447bce8e..e5103b47a8ce 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -19,6 +19,7 @@ #ifdef __KERNEL__ #include #include +#include #define ATOMIC_INIT(i) { (i) } @@ -387,12 +388,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #endif } -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index 0a24b04d6b21..5b88774c75ab 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -13,6 +13,9 @@ #define rmb() barrier() #define wmb() mb() +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() + #include #endif /* _XTENSA_SYSTEM_H */ diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 7b6873ae84c2..3f44fa2a53e9 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -21,9 +21,7 @@ #include #include - -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() +#include #include -- cgit v1.2.3