diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2014-04-23 19:50:20 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-27 14:06:23 +0200 | 
| commit | ddb7573ff68964e7b3b72eeb9cde1384c4c6ba83 (patch) | |
| tree | c71e6b79b6b6e089ee8667340f5f1ca3210ee8d9 /arch/mn10300 | |
| parent | 27782f2752aca65a241f10fb2d4508c71bb2656b (diff) | |
| download | linux-ddb7573ff68964e7b3b72eeb9cde1384c4c6ba83.tar.bz2 | |
mn10300: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/mn10300')
| -rw-r--r-- | arch/mn10300/include/asm/atomic.h | 57 | 
1 files changed, 10 insertions, 47 deletions
| diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 5be655e83e70..03eea8158cf9 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -89,6 +89,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v)		\  ATOMIC_OPS(add)  ATOMIC_OPS(sub) +#define CONFIG_ARCH_HAS_ATOMIC_OR + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) +  #undef ATOMIC_OPS  #undef ATOMIC_OP_RETURN  #undef ATOMIC_OP @@ -134,31 +140,9 @@ static inline void atomic_dec(atomic_t *v)   *   * Atomically clears the bits set in mask from the memory word specified.   */ -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)  { -#ifdef CONFIG_SMP -	int status; - -	asm volatile( -		"1:	mov	%3,(_AAR,%2)	\n" -		"	mov	(_ADR,%2),%0	\n" -		"	and	%4,%0		\n" -		"	mov	%0,(_ADR,%2)	\n" -		"	mov	(_ADR,%2),%0	\n"	/* flush */ -		"	mov	(_ASR,%2),%0	\n" -		"	or	%0,%0		\n" -		"	bne	1b		\n" -		: "=&r"(status), "=m"(*addr) -		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask) -		: "memory", "cc"); -#else -	unsigned long flags; - -	mask = ~mask; -	flags = arch_local_cli_save(); -	*addr &= mask; -	arch_local_irq_restore(flags); -#endif +	atomic_and(~mask, v);  }  /** @@ -168,30 +152,9 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)   *   * Atomically sets the bits set in mask from the memory word specified.   */ -static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) +static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)  { -#ifdef CONFIG_SMP -	int status; - -	asm volatile( -		"1:	mov	%3,(_AAR,%2)	\n" -		"	mov	(_ADR,%2),%0	\n" -		"	or	%4,%0		\n" -		"	mov	%0,(_ADR,%2)	\n" -		"	mov	(_ADR,%2),%0	\n"	/* flush */ -		"	mov	(_ASR,%2),%0	\n" -		"	or	%0,%0		\n" -		"	bne	1b		\n" -		: "=&r"(status), "=m"(*addr) -		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask) -		: "memory", "cc"); -#else -	unsigned long flags; - -	flags = arch_local_cli_save(); -	*addr |= mask; -	arch_local_irq_restore(flags); -#endif +	atomic_or(mask, v);  }  #endif /* __KERNEL__ */ |