From 79d39e2bab60d18a68a5abc00be4506864397efc Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Wed, 24 May 2017 17:55:14 -0600 Subject: arch/sparc: Introduce xchg16 for SPARC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SPARC supports 32 bit and 64 bit xchg right now. Add the support for 16 bit (2 byte) xchg. This is required to support queued spinlock feature which uses 2 byte xchg. This is achieved using 4 byte cas instructions with byte manipulations. Also re-arranged the code to call __cmpxchg_u32 inside xchg16. Signed-off-by: Babu Moger Reviewed-by: HÃ¥kon Bugge Reviewed-by: Steven Sistare Reviewed-by: Shannon Nelson Reviewed-by: Jane Chu Reviewed-by: Vijay Kumar Signed-off-by: David S. Miller --- arch/sparc/include/asm/cmpxchg_64.h | 49 +++++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 10 deletions(-) (limited to 'arch/sparc/include') diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 000f7d7a2d4a..4028f4f1e561 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -6,6 +6,17 @@ #ifndef __ARCH_SPARC64_CMPXCHG__ #define __ARCH_SPARC64_CMPXCHG__ +static inline unsigned long +__cmpxchg_u32(volatile int *m, int old, int new) +{ + __asm__ __volatile__("cas [%2], %3, %0" + : "=&r" (new) + : "0" (new), "r" (m), "r" (old) + : "memory"); + + return new; +} + static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; @@ -44,10 +55,38 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long void __xchg_called_with_bad_pointer(void); +/* + * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic + * here is to get the bit shift of the byte we are interested in. + * The XOR is handy for reversing the bits for big-endian byte order. + */ +static inline unsigned long +xchg16(__volatile__ unsigned short *m, unsigned short val) +{ + unsigned long maddr = (unsigned long)m; + int bit_shift = (((unsigned long)m & 2) ^ 2) << 3; + unsigned int mask = 0xffff << bit_shift; + unsigned int *ptr = (unsigned int *) (maddr & ~2); + unsigned int old32, new32, load32; + + /* Read the old value */ + load32 = *ptr; + + do { + old32 = load32; + new32 = (load32 & (~mask)) | val << bit_shift; + load32 = __cmpxchg_u32(ptr, old32, new32); + } while (load32 != old32); + + return (load32 & mask) >> bit_shift; +} + static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { + case 2: + return xchg16(ptr, x); case 4: return xchg32(ptr, x); case 8: @@ -65,16 +104,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, #include -static inline unsigned long -__cmpxchg_u32(volatile int *m, int old, int new) -{ - __asm__ __volatile__("cas [%2], %3, %0" - : "=&r" (new) - : "0" (new), "r" (m), "r" (old) - : "memory"); - - return new; -} static inline unsigned long __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) -- cgit v1.2.3