From c66e45edef51e1f54297ddaf202fc2dd00852734 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Apr 2014 19:57:49 +0200 Subject: m32r: Provide atomic_{or,xor,and} Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/m32r/include/asm/atomic.h | 44 ++++++++++-------------------------------- 1 file changed, 10 insertions(+), 34 deletions(-) (limited to 'arch/m32r') diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 31bb74adba08..7245463c1e98 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -94,6 +94,12 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define CONFIG_ARCH_HAS_ATOMIC_OR + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -240,44 +246,14 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) } -static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) +static __inline__ __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) { - unsigned long flags; - unsigned long tmp; - - local_irq_save(flags); - __asm__ __volatile__ ( - "# atomic_clear_mask \n\t" - DCACHE_CLEAR("%0", "r5", "%1") - M32R_LOCK" %0, @%1; \n\t" - "and %0, %2; \n\t" - M32R_UNLOCK" %0, @%1; \n\t" - : "=&r" (tmp) - : "r" (addr), "r" (~mask) - : "memory" - __ATOMIC_CLOBBER - ); - local_irq_restore(flags); + atomic_and(~mask, v); } -static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) +static __inline__ __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) { - unsigned long flags; - unsigned long tmp; - - local_irq_save(flags); - __asm__ __volatile__ ( - "# atomic_set_mask \n\t" - DCACHE_CLEAR("%0", "r5", "%1") - M32R_LOCK" %0, @%1; \n\t" - "or %0, %2; \n\t" - M32R_UNLOCK" %0, @%1; \n\t" - : "=&r" (tmp) - : "r" (addr), "r" (mask) - : "memory" - __ATOMIC_CLOBBER - ); - local_irq_restore(flags); + atomic_or(mask, v); } #endif /* _ASM_M32R_ATOMIC_H */ -- cgit v1.2.3 From e6942b7de2dfe44ebde9bae57dadece5abca9de8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Apr 2014 19:32:50 +0200 Subject: atomic: Provide atomic_{or,xor,and} Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/alpha/include/asm/atomic.h | 1 - arch/arc/include/asm/atomic.h | 1 - arch/arm/include/asm/atomic.h | 1 - arch/arm64/include/asm/atomic.h | 1 - arch/avr32/include/asm/atomic.h | 2 -- arch/blackfin/include/asm/atomic.h | 2 -- arch/frv/include/asm/atomic.h | 2 -- arch/h8300/include/asm/atomic.h | 2 -- arch/hexagon/include/asm/atomic.h | 2 -- arch/ia64/include/asm/atomic.h | 2 -- arch/m32r/include/asm/atomic.h | 2 -- arch/m68k/include/asm/atomic.h | 2 -- arch/metag/include/asm/atomic_lnkget.h | 2 -- arch/mips/include/asm/atomic.h | 2 -- arch/mn10300/include/asm/atomic.h | 2 -- arch/parisc/include/asm/atomic.h | 2 -- arch/powerpc/include/asm/atomic.h | 2 -- arch/s390/include/asm/atomic.h | 2 -- arch/sh/include/asm/atomic-grb.h | 2 -- arch/sparc/include/asm/atomic_32.h | 2 -- arch/sparc/include/asm/atomic_64.h | 2 -- arch/tile/include/asm/atomic_32.h | 2 -- arch/tile/include/asm/atomic_64.h | 2 -- arch/x86/include/asm/atomic.h | 2 -- arch/xtensa/include/asm/atomic.h | 2 -- include/asm-generic/atomic.h | 21 ++++++++++++--------- include/asm-generic/atomic64.h | 4 ++++ include/linux/atomic.h | 13 ------------- lib/atomic64.c | 3 +++ 29 files changed, 19 insertions(+), 68 deletions(-) (limited to 'arch/m32r') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 0eff853398d2..e8c956098424 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -110,7 +110,6 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR #define atomic_andnot atomic_andnot #define atomic64_andnot atomic64_andnot diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index e90b701fc6a8..2a847821dee1 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -144,7 +144,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR #define atomic_andnot atomic_andnot ATOMIC_OP(and, &=, and) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index ff214bac9cb4..82b75a7cb762 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -194,7 +194,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR #define atomic_andnot atomic_andnot ATOMIC_OP(and, &=, and) diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 2876173397b2..866a71fca9a3 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -85,7 +85,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, add) ATOMIC_OPS(sub, sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR #define atomic_andnot atomic_andnot ATOMIC_OP(and, and) diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 115d3005e4bc..97c9bdf83409 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -51,8 +51,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ (void)__atomic_##op##_return(i, v); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, and) ATOMIC_OP(or, or) ATOMIC_OP(xor, eor) diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index eafa55b81a7b..2d6a7a3823c3 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -28,8 +28,6 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) -#define CONFIG_ARCH_HAS_ATOMIC_OR - #define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 74d22454d7c6..fc48bea26b40 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -192,8 +192,6 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ (void)__atomic64_fetch_##op(i, &v->counter); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(or) ATOMIC_OP(and) ATOMIC_OP(xor) diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index f181f820be33..c4d061f09c44 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -41,8 +41,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ ATOMIC_OP_RETURN(add, +=) ATOMIC_OP_RETURN(sub, -=) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &=) ATOMIC_OP(or, |=) ATOMIC_OP(xor, ^=) diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 4efe2c7c0dd8..811d61f6422d 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -132,8 +132,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 0809ef5d6b9a..be4beeb77d57 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -69,8 +69,6 @@ ATOMIC_OP(sub, -) : ia64_atomic_sub(__ia64_asr_i, v); \ }) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &) ATOMIC_OP(or, |) ATOMIC_OP(xor, ^) diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 7245463c1e98..b2a13fbd5be0 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -94,8 +94,6 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index c30e43ea49a3..93ebd96aa494 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -77,8 +77,6 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &=, and) ATOMIC_OP(or, |=, or) ATOMIC_OP(xor, ^=, eor) diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h index 930c12cb8d37..0642606de901 100644 --- a/arch/metag/include/asm/atomic_lnkget.h +++ b/arch/metag/include/asm/atomic_lnkget.h @@ -74,8 +74,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 0430ba6ab762..4c42fd9af777 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -137,8 +137,6 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add, +=, addu) ATOMIC_OPS(sub, -=, subu) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &=, and) ATOMIC_OP(or, |=, or) ATOMIC_OP(xor, ^=, xor) diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 03eea8158cf9..f5a63f0bda46 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -89,8 +89,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index be2c50ddebd6..2536965d00ea 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -126,8 +126,6 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, &=) ATOMIC_OP(or, |=) ATOMIC_OP(xor, ^=) diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 6ca89e2aca15..55f106ed12bf 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -67,8 +67,6 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \ ATOMIC_OPS(add, add) ATOMIC_OPS(sub, subf) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and, and) ATOMIC_OP(or, or) ATOMIC_OP(xor, xor) diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index b3859d8e001f..d761aeff72da 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -282,8 +282,6 @@ static inline void atomic64_##op(long i, atomic64_t *v) \ __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC64_OP(and, AND) ATOMIC64_OP(or, OR) ATOMIC64_OP(xor, XOR) diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h index 4b03830d48c7..b94df40e5f2d 100644 --- a/arch/sh/include/asm/atomic-grb.h +++ b/arch/sh/include/asm/atomic-grb.h @@ -48,8 +48,6 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index e19d8880b146..7dcbebbcaec6 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -17,8 +17,6 @@ #include #include -#define CONFIG_ARCH_HAS_ATOMIC_OR - #define ATOMIC_INIT(i) { (i) } int atomic_add_return(int, atomic_t *); diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index d6af27c93450..917084ace49d 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -33,8 +33,6 @@ long atomic64_##op##_return(long, atomic64_t *); ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 94237922f0dd..d320ce253d86 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -41,8 +41,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ _atomic_##op((unsigned long *)&v->counter, i); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index d07d9fc6e2a1..096a56d6ead4 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -58,8 +58,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return oldval; } -#define CONFIG_ARCH_HAS_ATOMIC_OR - static inline void atomic_and(int i, atomic_t *v) { __insn_fetchand4((void *)&v->counter, i); diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index f3a3ec040694..b3493023efda 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -191,8 +191,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ : "memory"); \ } -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 4dd2450300a6..31371f43c23b 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -145,8 +145,6 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define CONFIG_ARCH_HAS_ATOMIC_OR - ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 92947e0a532a..a41b0b8f7404 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -102,24 +102,27 @@ ATOMIC_OP_RETURN(sub, -) ATOMIC_OP(and, &) #endif -#ifndef atomic_clear_mask -#define atomic_clear_mask(i, v) atomic_and(~(i), (v)) -#endif - #ifndef atomic_or -#ifndef CONFIG_ARCH_HAS_ATOMIC_OR -#define CONFIG_ARCH_HAS_ATOMIC_OR -#endif ATOMIC_OP(or, |) #endif -#ifndef atomic_set_mask -#define atomic_set_mask(i, v) atomic_or((i), (v)) +#ifndef atomic_xor +ATOMIC_OP(xor, ^) #endif #undef ATOMIC_OP_RETURN #undef ATOMIC_OP +static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + atomic_and(~mask, v); +} + +static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + atomic_or(mask, v); +} + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 30ad9c86cebb..d48e78ccad3d 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v); ATOMIC64_OPS(add) ATOMIC64_OPS(sub) +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) + #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 5b08a8540ecf..7d6279012a1f 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -111,19 +111,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -#ifndef CONFIG_ARCH_HAS_ATOMIC_OR -static inline void atomic_or(int i, atomic_t *v) -{ - int old; - int new; - - do { - old = atomic_read(v); - new = old | i; - } while (atomic_cmpxchg(v, old, new) != old); -} -#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ - #include #ifdef CONFIG_GENERIC_ATOMIC64 #include diff --git a/lib/atomic64.c b/lib/atomic64.c index 1298c05ef528..2886ebac6567 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -102,6 +102,9 @@ EXPORT_SYMBOL(atomic64_##op##_return); ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) +ATOMIC64_OP(and, &=) +ATOMIC64_OP(or, |=) +ATOMIC64_OP(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN -- cgit v1.2.3 From de9e432cb5de1bf2952919dc0b22e4bec0ed8d53 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Apr 2015 01:12:32 +0200 Subject: atomic: Collapse all atomic_{set,clear}_mask definitions Move the now generic definitions of atomic_{set,clear}_mask() into linux/atomic.h to avoid endless and pointless repetition. Also, provide an atomic_andnot() wrapper for those few archs that can implement that. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/arc/include/asm/atomic.h | 10 ---------- arch/blackfin/include/asm/atomic.h | 10 ---------- arch/frv/include/asm/atomic.h | 10 ---------- arch/h8300/include/asm/atomic.h | 10 ---------- arch/m32r/include/asm/atomic.h | 11 ----------- arch/m68k/include/asm/atomic.h | 10 ---------- arch/metag/include/asm/atomic_lnkget.h | 10 ---------- arch/metag/include/asm/atomic_lock1.h | 10 ---------- arch/mn10300/include/asm/atomic.h | 24 ------------------------ arch/powerpc/kernel/misc_32.S | 19 ------------------- arch/s390/include/asm/atomic.h | 10 ---------- arch/sh/include/asm/atomic.h | 10 ---------- arch/x86/include/asm/atomic.h | 10 ---------- arch/xtensa/include/asm/atomic.h | 10 ---------- include/asm-generic/atomic.h | 10 ---------- include/linux/atomic.h | 25 +++++++++++++++++++++++++ 16 files changed, 25 insertions(+), 174 deletions(-) (limited to 'arch/m32r') diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 2a847821dee1..d8a85e706fba 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -155,16 +155,6 @@ ATOMIC_OP(xor, ^=, xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - /** * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 2d6a7a3823c3..1c1c42330c99 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -32,16 +32,6 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #endif #include diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index fc48bea26b40..0da689def4cc 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -198,14 +198,4 @@ ATOMIC_OP(xor) #undef ATOMIC_OP -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #endif /* _ASM_ATOMIC_H */ diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index c4d061f09c44..702ee539f87d 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -89,14 +89,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return ret; } -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index b2a13fbd5be0..025e2a170493 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -243,15 +243,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } - -static __inline__ __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static __inline__ __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 93ebd96aa494..039fac120cc0 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -174,16 +174,6 @@ static inline int atomic_add_negative(int i, atomic_t *v) return c != 0; } -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h index 0642606de901..21c4c268b86c 100644 --- a/arch/metag/include/asm/atomic_lnkget.h +++ b/arch/metag/include/asm/atomic_lnkget.h @@ -82,16 +82,6 @@ ATOMIC_OP(xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int result, temp; diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h index 7d88725a85da..f8efe380fe8b 100644 --- a/arch/metag/include/asm/atomic_lock1.h +++ b/arch/metag/include/asm/atomic_lock1.h @@ -76,16 +76,6 @@ ATOMIC_OP(xor, ^=) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index f5a63f0bda46..375e59140c9c 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -131,30 +131,6 @@ static inline void atomic_dec(atomic_t *v) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) -/** - * atomic_clear_mask - Atomically clear bits in memory - * @mask: Mask of the bits to be cleared - * @v: pointer to word in memory - * - * Atomically clears the bits set in mask from the memory word specified. - */ -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -/** - * atomic_set_mask - Atomically set bits in memory - * @mask: Mask of the bits to be set - * @v: pointer to word in memory - * - * Atomically sets the bits set in mask from the memory word specified. - */ -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #endif /* __KERNEL__ */ #endif /* CONFIG_SMP */ #endif /* _ASM_ATOMIC_H */ diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 7c6bb4b17b49..ed3ab509faca 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -595,25 +595,6 @@ _GLOBAL(copy_page) li r11,4 b 2b -/* - * void atomic_clear_mask(atomic_t mask, atomic_t *addr) - * void atomic_set_mask(atomic_t mask, atomic_t *addr); - */ -_GLOBAL(atomic_clear_mask) -10: lwarx r5,0,r4 - andc r5,r5,r3 - PPC405_ERR77(0,r4) - stwcx. r5,0,r4 - bne- 10b - blr -_GLOBAL(atomic_set_mask) -10: lwarx r5,0,r4 - or r5,r5,r3 - PPC405_ERR77(0,r4) - stwcx. r5,0,r4 - bne- 10b - blr - /* * Extended precision shifts. * diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index d761aeff72da..117fa5c921c1 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -132,16 +132,6 @@ ATOMIC_OP(xor, XOR) #undef ATOMIC_OP -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index cee0245257e1..05b9f74ce2d5 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -25,16 +25,6 @@ #include #endif -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index b3493023efda..fb52aa644aab 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -234,16 +234,6 @@ static __always_inline short int atomic_inc_short(short int *v) return *v; } -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #ifdef CONFIG_X86_32 # include #else diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 31371f43c23b..e0be67936990 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -153,16 +153,6 @@ ATOMIC_OP(xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index a41b0b8f7404..d4d7e337fdcb 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -113,16 +113,6 @@ ATOMIC_OP(xor, ^) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 7d6279012a1f..8b98b423388f 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -28,6 +28,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #endif +#ifndef atomic_andnot +static inline void atomic_andnot(int i, atomic_t *v) +{ + atomic_and(~i, v); +} +#endif + +static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + atomic_andnot(mask, v); +} + +static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + atomic_or(mask, v); +} + /** * atomic_inc_not_zero_hint - increment if not null * @v: pointer of type atomic_t @@ -115,4 +132,12 @@ static inline int atomic_dec_if_positive(atomic_t *v) #ifdef CONFIG_GENERIC_ATOMIC64 #include #endif + +#ifndef atomic64_andnot +static inline void atomic64_andnot(long long i, atomic64_t *v) +{ + atomic64_and(~i, v); +} +#endif + #endif /* _LINUX_ATOMIC_H */ -- cgit v1.2.3 From 805de8f43c20ba8b479bb598b543fa86b20067f6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Apr 2015 01:12:32 +0200 Subject: atomic: Replace atomic_{set,clear}_mask() usage Replace the deprecated atomic_{set,clear}_mask() usage with the now ubiquous atomic_{or,andnot}() functions. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/blackfin/mach-common/smp.c | 2 +- arch/m32r/kernel/smp.c | 4 +-- arch/mn10300/mm/tlb-smp.c | 2 +- arch/s390/kernel/time.c | 4 +-- arch/s390/kvm/interrupt.c | 30 ++++++++++---------- arch/s390/kvm/kvm-s390.c | 32 ++++++++++----------- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 4 +-- drivers/s390/scsi/zfcp_aux.c | 2 +- drivers/s390/scsi/zfcp_erp.c | 62 ++++++++++++++++++++--------------------- drivers/s390/scsi/zfcp_fc.c | 8 +++--- drivers/s390/scsi/zfcp_fsf.c | 26 ++++++++--------- drivers/s390/scsi/zfcp_qdio.c | 14 +++++----- 14 files changed, 97 insertions(+), 97 deletions(-) (limited to 'arch/m32r') diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 1c7259597395..0030e21cfceb 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) local_irq_save(flags); for_each_cpu(cpu, cpumask) { bfin_ipi_data = &per_cpu(bfin_ipi, cpu); - atomic_set_mask((1 << msg), &bfin_ipi_data->bits); + atomic_or((1 << msg), &bfin_ipi_data->bits); atomic_inc(&bfin_ipi_data->count); } local_irq_restore(flags); diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index c18ddc74ef9a..62d6961e7f2b 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -156,7 +156,7 @@ void smp_flush_cache_all(void) cpumask_clear_cpu(smp_processor_id(), &cpumask); spin_lock(&flushcache_lock); mask=cpumask_bits(&cpumask); - atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); + atomic_or(*mask, (atomic_t *)&flushcache_cpumask); send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); _flush_cache_copyback_all(); while (flushcache_cpumask) @@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, flush_vma = vma; flush_va = va; mask=cpumask_bits(&cpumask); - atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); + atomic_or(*mask, (atomic_t *)&flush_cpumask); /* * We have to send the IPI only to diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index e5d0ef722bfa..9a39ea9031d4 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c @@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, flush_mm = mm; flush_va = va; #if NR_CPUS <= BITS_PER_LONG - atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); + atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]); #else #error Not supported. #endif diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 9e733d965e08..f5a0bd778ace 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy) * increase the "sequence" counter to avoid the race of an * etr event and the complete recovery against get_sync_clock. */ - atomic_clear_mask(0x80000000, sw_ptr); + atomic_andnot(0x80000000, sw_ptr); atomic_inc(sw_ptr); } @@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy) static void enable_sync_clock(void) { atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); - atomic_set_mask(0x80000000, sw_ptr); + atomic_or(0x80000000, sw_ptr); } /* diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c98d89708e99..57309e9cdd80 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); } static void __unset_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); } static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, - &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, + &vcpu->arch.sie_block->cpuflags); vcpu->arch.sie_block->lctl = 0x0000; vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); @@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) { - atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); + atomic_or(flag, &vcpu->arch.sie_block->cpuflags); } static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) @@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) spin_unlock(&li->lock); /* clear pending external calls set by sigp interpretation facility */ - atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); + atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; } @@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) li->irq.ext = irq->u.ext; set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) /* another external call is pending */ return -EBUSY; } - atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); return 0; } @@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) return -EBUSY; *extcall = irq->u.extcall; - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) 0, 0, 2); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) 0, 0, 2); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) spin_lock(&li->lock); switch (type) { case KVM_S390_MCHK: - atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); + atomic_or(CPUSTAT_STOP_INT, li->cpuflags); break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); + atomic_or(CPUSTAT_IO_INT, li->cpuflags); break; default: - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); break; } spin_unlock(&li->lock); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2078f92d15ac..b73302fb0507 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); - atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); if (test_kvm_facility(vcpu->kvm, 129)) { save_fp_ctl(&vcpu->run->s.regs.fpc); @@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_STOPPED); if (test_kvm_facility(vcpu->kvm, 78)) - atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); else if (test_kvm_facility(vcpu->kvm, 8)) - atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_setup_model(vcpu); @@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) { - atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); exit_sie(vcpu); } void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) { - atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); + atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); } static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) { - atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); exit_sie(vcpu); } static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) { - atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); } /* @@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) * return immediately. */ void exit_sie(struct kvm_vcpu *vcpu) { - atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) cpu_relax(); } @@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, if (dbg->control & KVM_GUESTDBG_ENABLE) { vcpu->guest_debug = dbg->control; /* enforce guest PER */ - atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); if (dbg->control & KVM_GUESTDBG_USE_HW_BP) rc = kvm_s390_import_bp_data(vcpu, dbg); } else { - atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); vcpu->arch.guestdbg.last_bp = 0; } if (rc) { vcpu->guest_debug = 0; kvm_s390_clear_bp_data(vcpu); - atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); } return rc; @@ -1771,7 +1771,7 @@ retry: if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { if (!ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); - atomic_set_mask(CPUSTAT_IBS, + atomic_or(CPUSTAT_IBS, &vcpu->arch.sie_block->cpuflags); } goto retry; @@ -1780,7 +1780,7 @@ retry: if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { if (ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); - atomic_clear_mask(CPUSTAT_IBS, + atomic_andnot(CPUSTAT_IBS, &vcpu->arch.sie_block->cpuflags); } goto retry; @@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) __disable_ibs_on_all_vcpus(vcpu->kvm); } - atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); /* * Another VCPU might have used IBS while we were offline. * Let's play safe and flush the VCPU at startup. @@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ kvm_s390_clear_stop_irq(vcpu); - atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 884b4f9b81c4..8917c98ff121 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev) mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); - atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); + atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); } mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 52b446b27b4d..7a918d1c12ba 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev) * for all other failure, such as an allocation failure, bail. */ DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); - atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); + atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); ret = 0; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 984e2fe6688c..449a95c6c2a1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, reset_done_event); } else { - atomic_set_mask(I915_WEDGED, &error->reset_counter); + atomic_or(I915_WEDGED, &error->reset_counter); } /* @@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged, i915_report_and_clear_eir(dev); if (wedged) { - atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, + atomic_or(I915_RESET_IN_PROGRESS_FLAG, &dev_priv->gpu_error.reset_counter); /* diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 01a73395a017..c00ac4650dce 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, list_add_tail(&port->list, &adapter->port_list); write_unlock_irq(&adapter->port_list_lock); - atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); + atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); return port; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index acde3f5d6e9e..3fb410977014 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -190,7 +190,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) if (scsi_device_get(sdev)) return NULL; - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, + atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &zfcp_sdev->status); erp_action = &zfcp_sdev->erp_action; memset(erp_action, 0, sizeof(struct zfcp_erp_action)); @@ -206,7 +206,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, if (!get_device(&port->dev)) return NULL; zfcp_erp_action_dismiss_port(port); - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); + atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; memset(erp_action, 0, sizeof(struct zfcp_erp_action)); erp_action->port = port; @@ -217,7 +217,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, case ZFCP_ERP_ACTION_REOPEN_ADAPTER: kref_get(&adapter->ref); zfcp_erp_action_dismiss_adapter(adapter); - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); + atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); erp_action = &adapter->erp_action; memset(erp_action, 0, sizeof(struct zfcp_erp_action)); if (!(atomic_read(&adapter->status) & @@ -254,7 +254,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); if (!act) goto out; - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); + atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); wake_up(&adapter->erp_ready_wq); @@ -486,14 +486,14 @@ static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); - atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); + atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); } static void zfcp_erp_port_unblock(struct zfcp_port *port) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) zfcp_dbf_rec_run("erpubl1", &port->erp_action); - atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); + atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); } static void zfcp_erp_lun_unblock(struct scsi_device *sdev) @@ -502,7 +502,7 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev) if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); - atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); + atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); } static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) @@ -642,7 +642,7 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) read_lock_irqsave(&adapter->erp_lock, flags); if (list_empty(&adapter->erp_ready_head) && list_empty(&adapter->erp_running_head)) { - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, + atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); wake_up(&adapter->erp_done_wqh); } @@ -665,16 +665,16 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) int sleep = 1; struct zfcp_adapter *adapter = erp_action->adapter; - atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); + atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); for (retries = 7; retries; retries--) { - atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, &adapter->status); write_lock_irq(&adapter->erp_lock); zfcp_erp_action_to_running(erp_action); write_unlock_irq(&adapter->erp_lock); if (zfcp_fsf_exchange_config_data(erp_action)) { - atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, &adapter->status); return ZFCP_ERP_FAILED; } @@ -692,7 +692,7 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) sleep *= 2; } - atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, &adapter->status); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK)) @@ -764,7 +764,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) /* all ports and LUNs are closed */ zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN); - atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | + atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); } @@ -773,7 +773,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) struct zfcp_adapter *adapter = act->adapter; if (zfcp_qdio_open(adapter->qdio)) { - atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | + atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); return ZFCP_ERP_FAILED; @@ -784,7 +784,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) return ZFCP_ERP_FAILED; } - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status); + atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status); return ZFCP_ERP_SUCCEEDED; } @@ -948,7 +948,7 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, + atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED, &zfcp_sdev->status); } @@ -1187,18 +1187,18 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) switch (erp_action->action) { case ZFCP_ERP_ACTION_REOPEN_LUN: zfcp_sdev = sdev_to_zfcp(erp_action->sdev); - atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, + atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, &zfcp_sdev->status); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: - atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, + atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->port->status); break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, + atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->adapter->status); break; } @@ -1422,19 +1422,19 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) unsigned long flags; u32 common_mask = mask & ZFCP_COMMON_FLAGS; - atomic_set_mask(mask, &adapter->status); + atomic_or(mask, &adapter->status); if (!common_mask) return; read_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry(port, &adapter->port_list, list) - atomic_set_mask(common_mask, &port->status); + atomic_or(common_mask, &port->status); read_unlock_irqrestore(&adapter->port_list_lock, flags); spin_lock_irqsave(adapter->scsi_host->host_lock, flags); __shost_for_each_device(sdev, adapter->scsi_host) - atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); + atomic_or(common_mask, &sdev_to_zfcp(sdev)->status); spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } @@ -1453,7 +1453,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; - atomic_clear_mask(mask, &adapter->status); + atomic_andnot(mask, &adapter->status); if (!common_mask) return; @@ -1463,7 +1463,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) read_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry(port, &adapter->port_list, list) { - atomic_clear_mask(common_mask, &port->status); + atomic_andnot(common_mask, &port->status); if (clear_counter) atomic_set(&port->erp_counter, 0); } @@ -1471,7 +1471,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) spin_lock_irqsave(adapter->scsi_host->host_lock, flags); __shost_for_each_device(sdev, adapter->scsi_host) { - atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); + atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status); if (clear_counter) atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } @@ -1491,7 +1491,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) u32 common_mask = mask & ZFCP_COMMON_FLAGS; unsigned long flags; - atomic_set_mask(mask, &port->status); + atomic_or(mask, &port->status); if (!common_mask) return; @@ -1499,7 +1499,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) - atomic_set_mask(common_mask, + atomic_or(common_mask, &sdev_to_zfcp(sdev)->status); spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } @@ -1518,7 +1518,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; unsigned long flags; - atomic_clear_mask(mask, &port->status); + atomic_andnot(mask, &port->status); if (!common_mask) return; @@ -1529,7 +1529,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) { - atomic_clear_mask(common_mask, + atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status); if (clear_counter) atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); @@ -1546,7 +1546,7 @@ void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - atomic_set_mask(mask, &zfcp_sdev->status); + atomic_or(mask, &zfcp_sdev->status); } /** @@ -1558,7 +1558,7 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - atomic_clear_mask(mask, &zfcp_sdev->status); + atomic_andnot(mask, &zfcp_sdev->status); if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) atomic_set(&zfcp_sdev->erp_counter, 0); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 25d49f32ca63..237688af179b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -508,7 +508,7 @@ static void zfcp_fc_adisc_handler(void *data) /* port is good, unblock rport without going through erp */ zfcp_scsi_schedule_rport_register(port); out: - atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); + atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); kmem_cache_free(zfcp_fc_req_cache, fc_req); } @@ -564,14 +564,14 @@ void zfcp_fc_link_test_work(struct work_struct *work) if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) goto out; - atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); + atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status); retval = zfcp_fc_adisc(port); if (retval == 0) return; /* send of ADISC was not possible */ - atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); + atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); zfcp_erp_port_forced_reopen(port, 0, "fcltwk1"); out: @@ -640,7 +640,7 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) return; - atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); + atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status); if ((port->supported_classes != 0) || !list_empty(&port->unit_list)) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 21ec5e2f584c..27b976aa1818 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -114,7 +114,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) return; - atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); + atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); zfcp_scsi_schedule_rports_block(adapter); @@ -345,7 +345,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); break; case FSF_PROT_HOST_CONNECTION_INITIALIZING: - atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, &adapter->status); break; case FSF_PROT_DUPLICATE_REQUEST_ID: @@ -554,7 +554,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); return; } - atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, + atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); break; case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: @@ -567,7 +567,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) /* avoids adapter shutdown to be able to recognize * events such as LINK UP */ - atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, + atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); zfcp_fsf_link_down_info_eval(req, &qtcb->header.fsf_status_qual.link_down_info); @@ -1394,9 +1394,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) break; case FSF_GOOD: port->handle = header->port_handle; - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | + atomic_or(ZFCP_STATUS_COMMON_OPEN | ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, + atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, &port->status); /* check whether D_ID has changed during open */ /* @@ -1677,10 +1677,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) case FSF_PORT_BOXED: /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ - atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); + atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, + atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &sdev_to_zfcp(sdev)->status); zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, @@ -1700,10 +1700,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ - atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); + atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, + atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &sdev_to_zfcp(sdev)->status); break; } @@ -1766,7 +1766,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) zfcp_sdev = sdev_to_zfcp(sdev); - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | + atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED, &zfcp_sdev->status); @@ -1822,7 +1822,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) case FSF_GOOD: zfcp_sdev->lun_handle = header->lun_handle; - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); + atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); break; } } @@ -1913,7 +1913,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) } break; case FSF_GOOD: - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); + atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); break; } } diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 495e1cb3afa6..dbf2b54703f7 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -349,7 +349,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ spin_lock_irq(&qdio->req_q_lock); - atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); + atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); spin_unlock_irq(&qdio->req_q_lock); wake_up(&qdio->req_q_wq); @@ -384,7 +384,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) return -EIO; - atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, + atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, &qdio->adapter->status); zfcp_qdio_setup_init_data(&init_data, qdio); @@ -396,14 +396,14 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) goto failed_qdio; if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) - atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, + atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, &qdio->adapter->status); if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { - atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); + atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; } else { - atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); + atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; } @@ -427,7 +427,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) /* set index of first available SBALS / number of available SBALS */ qdio->req_q_idx = 0; atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); - atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); + atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); if (adapter->scsi_host) { adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; @@ -499,6 +499,6 @@ void zfcp_qdio_siosl(struct zfcp_adapter *adapter) rc = ccw_device_siosl(adapter->ccw_device); if (!rc) - atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, + atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, &adapter->status); } -- cgit v1.2.3