diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-arm/atomic.h | |
download | linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2 |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-arm/atomic.h')
-rw-r--r-- | include/asm-arm/atomic.h | 165 |
1 files changed, 165 insertions, 0 deletions
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h new file mode 100644 index 000000000000..2885972b0855 --- /dev/null +++ b/include/asm-arm/atomic.h @@ -0,0 +1,165 @@ +/* + * linux/include/asm-arm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_ATOMIC_H +#define __ASM_ARM_ATOMIC_H + +#include <linux/config.h> + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#ifdef __KERNEL__ + +#define atomic_read(v) ((v)->counter) + +#if __LINUX_ARM_ARCH__ >= 6 + +/* + * ARMv6 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. Writing to 'v->counter' + * without using the following operations WILL break the atomic + * nature of these ops. + */ +static inline void atomic_set(atomic_t *v, int i) +{ + unsigned long tmp; + + __asm__ __volatile__("@ atomic_set\n" +"1: ldrex %0, [%1]\n" +" strex %0, %2, [%1]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_add_return\n" +"1: ldrex %0, [%2]\n" +" add %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + return result; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_sub_return\n" +"1: ldrex %0, [%2]\n" +" sub %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + return result; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long tmp, tmp2; + + __asm__ __volatile__("@ atomic_clear_mask\n" +"1: ldrex %0, %2\n" +" bic %0, %0, %3\n" +" strex %1, %0, %2\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (tmp), "=&r" (tmp2) + : "r" (addr), "Ir" (mask) + : "cc"); +} + +#else /* ARM_ARCH_6 */ + +#include <asm/system.h> + +#ifdef CONFIG_SMP +#error SMP not supported on pre-ARMv6 CPUs +#endif + +#define atomic_set(v,i) (((v)->counter) = (i)) + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val += i; + local_irq_restore(flags); + + return val; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val -= i; + local_irq_restore(flags); + + return val; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long flags; + + local_irq_save(flags); + *addr &= ~mask; + local_irq_restore(flags); +} + +#endif /* __LINUX_ARM_ARCH__ */ + +#define atomic_add(i, v) (void) atomic_add_return(i, v) +#define atomic_inc(v) (void) atomic_add_return(1, v) +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) +#define atomic_dec(v) (void) atomic_sub_return(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +/* Atomic operations are already serializing on ARM */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif +#endif |