From ffbf670f5cd50501a34a5187981460da2216071e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jan 2006 15:59:17 -0800 Subject: [PATCH] mutex subsystem, add atomic_xchg() to all arches add atomic_xchg() to all the architectures. Needed by the new mutex code. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven --- include/asm-arm/atomic.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/asm-arm') diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index f72b63309bc5..3d7283d84405 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -175,6 +175,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #endif /* __LINUX_ARM_ARCH__ */ +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + static inline int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; -- cgit v1.2.3 From 823d0f4f67252115212eb86caba14d5795bbe643 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 9 Jan 2006 15:59:18 -0800 Subject: [PATCH] mutex subsystem, add include/asm-arm/mutex.h add the ARM version of mutex.h, which is optimized in assembly for ARMv6, and uses the xchg implementation on pre-ARMv6. Signed-off-by: Ingo Molnar --- include/asm-arm/mutex.h | 128 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 include/asm-arm/mutex.h (limited to 'include/asm-arm') diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h new file mode 100644 index 000000000000..6caa59f1f595 --- /dev/null +++ b/include/asm-arm/mutex.h @@ -0,0 +1,128 @@ +/* + * include/asm-arm/mutex.h + * + * ARM optimized mutex locking primitives + * + * Please look into asm-generic/mutex-xchg.h for a formal definition. + */ +#ifndef _ASM_MUTEX_H +#define _ASM_MUTEX_H + +#if __LINUX_ARM_ARCH__ < 6 +/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */ +# include +#else + +/* + * Attempting to lock a mutex on ARMv6+ can be done with a bastardized + * atomic decrement (it is not a reliable atomic decrement but it satisfies + * the defined semantics for our purpose, while being smaller and faster + * than a real atomic decrement or atomic swap. The idea is to attempt + * decrementing the lock value only once. If once decremented it isn't zero, + * or if its store-back fails due to a dispute on the exclusive store, we + * simply bail out immediately through the slow path where the lock will be + * reattempted until it succeeds. + */ +#define __mutex_fastpath_lock(count, fail_fn) \ +do { \ + int __ex_flag, __res; \ + \ + typecheck(atomic_t *, count); \ + typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ + \ + __asm__ ( \ + "ldrex %0, [%2] \n" \ + "sub %0, %0, #1 \n" \ + "strex %1, %0, [%2] \n" \ + \ + : "=&r" (__res), "=&r" (__ex_flag) \ + : "r" (&(count)->counter) \ + : "cc","memory" ); \ + \ + if (unlikely(__res || __ex_flag)) \ + fail_fn(count); \ +} while (0) + +#define __mutex_fastpath_lock_retval(count, fail_fn) \ +({ \ + int __ex_flag, __res; \ + \ + typecheck(atomic_t *, count); \ + typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \ + \ + __asm__ ( \ + "ldrex %0, [%2] \n" \ + "sub %0, %0, #1 \n" \ + "strex %1, %0, [%2] \n" \ + \ + : "=&r" (__res), "=&r" (__ex_flag) \ + : "r" (&(count)->counter) \ + : "cc","memory" ); \ + \ + __res |= __ex_flag; \ + if (unlikely(__res != 0)) \ + __res = fail_fn(count); \ + __res; \ +}) + +/* + * Same trick is used for the unlock fast path. However the original value, + * rather than the result, is used to test for success in order to have + * better generated assembly. + */ +#define __mutex_fastpath_unlock(count, fail_fn) \ +do { \ + int __ex_flag, __res, __orig; \ + \ + typecheck(atomic_t *, count); \ + typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ + \ + __asm__ ( \ + "ldrex %0, [%3] \n" \ + "add %1, %0, #1 \n" \ + "strex %2, %1, [%3] \n" \ + \ + : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \ + : "r" (&(count)->counter) \ + : "cc","memory" ); \ + \ + if (unlikely(__orig || __ex_flag)) \ + fail_fn(count); \ +} while (0) + +/* + * If the unlock was done on a contended lock, or if the unlock simply fails + * then the mutex remains locked. + */ +#define __mutex_slowpath_needs_to_unlock() 1 + +/* + * For __mutex_fastpath_trylock we use another construct which could be + * described as a "single value cmpxchg". + * + * This provides the needed trylock semantics like cmpxchg would, but it is + * lighter and less generic than a true cmpxchg implementation. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res, __orig; + + __asm__ ( + + "1: ldrex %0, [%3] \n" + "subs %1, %0, #1 \n" + "strexeq %2, %1, [%3] \n" + "movlt %0, #0 \n" + "cmpeq %2, #0 \n" + "bgt 1b \n" + + : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) + : "r" (&count->counter) + : "cc", "memory" ); + + return __orig; +} + +#endif +#endif -- cgit v1.2.3