From c31ae4bb4a9fa4606a74c0a4fb61b74f804e861e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:25 +0000 Subject: asm-generic: introduce asm/bitsperlong.h This provides a reliable way for asm-generic/types.h and other files to find out if it is running on a 32 or 64 bit platform. We cannot use CONFIG_64BIT for this in headers that are included from user space because CONFIG symbols are not available there. We also cannot do it inside of asm/types.h because some headers need the word size but cannot include types.h. The solution is to introduce a new header that defines both __BITS_PER_LONG for user space and BITS_PER_LONG for usage in the kernel. The asm-generic version falls back to 32 bit unless the architecture overrides it, which I did for all 64 bit platforms. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann --- arch/xtensa/include/asm/bitsperlong.h | 1 + 1 file changed, 1 insertion(+) create mode 100644 arch/xtensa/include/asm/bitsperlong.h (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/bitsperlong.h b/arch/xtensa/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/xtensa/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include -- cgit v1.2.3 From 72099ed2719fc5829bd79c6ca9d1783ed026eb37 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:29 +0000 Subject: asm-generic: rename atomic.h to atomic-long.h The existing asm-generic/atomic.h only defines the atomic_long type. This renames it to atomic-long.h so we have a place to add a truly generic atomic.h that can be used on all non-SMP systems. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann Acked-by: Ingo Molnar --- arch/alpha/include/asm/atomic.h | 2 +- arch/arm/include/asm/atomic.h | 2 +- arch/avr32/include/asm/atomic.h | 2 +- arch/blackfin/include/asm/atomic.h | 2 +- arch/cris/include/asm/atomic.h | 2 +- arch/frv/include/asm/atomic.h | 2 +- arch/h8300/include/asm/atomic.h | 2 +- arch/ia64/include/asm/atomic.h | 2 +- arch/m32r/include/asm/atomic.h | 2 +- arch/m68k/include/asm/atomic_mm.h | 2 +- arch/m68k/include/asm/atomic_no.h | 2 +- arch/microblaze/include/asm/atomic.h | 2 +- arch/mips/include/asm/atomic.h | 2 +- arch/mn10300/include/asm/atomic.h | 2 +- arch/parisc/include/asm/atomic.h | 2 +- arch/powerpc/include/asm/atomic.h | 2 +- arch/s390/include/asm/atomic.h | 2 +- arch/sh/include/asm/atomic.h | 2 +- arch/sparc/include/asm/atomic_32.h | 2 +- arch/sparc/include/asm/atomic_64.h | 2 +- arch/x86/include/asm/atomic_32.h | 2 +- arch/x86/include/asm/atomic_64.h | 2 +- arch/xtensa/include/asm/atomic.h | 2 +- include/asm-generic/atomic-long.h | 258 +++++++++++++++++++++++++++++++++++ include/asm-generic/atomic.h | 258 ----------------------------------- include/asm-generic/bitops/atomic.h | 1 + 26 files changed, 282 insertions(+), 281 deletions(-) create mode 100644 include/asm-generic/atomic-long.h delete mode 100644 include/asm-generic/atomic.h (limited to 'arch/xtensa') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 62b363584b2b..610dff44d94b 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -256,5 +256,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include +#include #endif /* _ALPHA_ATOMIC_H */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 16b52f397983..9e07fe507029 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -249,6 +249,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include +#include #endif #endif diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 318815107748..b131c27ddf57 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -196,6 +196,6 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __ASM_AVR32_ATOMIC_H */ diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 94b2a9b19451..7bbf44e4ddf9 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -208,6 +208,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#include +#include #endif /* __ARCH_BLACKFIN_ATOMIC __ */ diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index 5718dd8902a1..a6aca819e9f3 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -158,5 +158,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 296c35cfb207..0409d981fd39 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -194,5 +194,5 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#include +#include #endif /* _ASM_ATOMIC_H */ diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 833186c8dc3b..33c8c0fa9583 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -141,5 +141,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index d37292bd9875..88405cb0832a 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -216,5 +216,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 2eed30f84080..63f0cf0f50dd 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -314,5 +314,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index eb0ab9d4ee77..88b7af20a996 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h @@ -192,5 +192,5 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __ARCH_M68K_ATOMIC __ */ diff --git a/arch/m68k/include/asm/atomic_no.h b/arch/m68k/include/asm/atomic_no.h index 6bb674855a3f..5674cb9449bd 100644 --- a/arch/m68k/include/asm/atomic_no.h +++ b/arch/m68k/include/asm/atomic_no.h @@ -151,5 +151,5 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) -#include +#include #endif /* __ARCH_M68KNOMMU_ATOMIC __ */ diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h index a448d94ab721..0de612ad7cb2 100644 --- a/arch/microblaze/include/asm/atomic.h +++ b/arch/microblaze/include/asm/atomic.h @@ -118,6 +118,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_MICROBLAZE_ATOMIC_H */ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 1b332e15ab52..eb7f01cfd1ac 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -793,6 +793,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() smp_llsc_mb() #define smp_mb__after_atomic_inc() smp_llsc_mb() -#include +#include #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index bc064825f9b1..5bf5be9566de 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -151,7 +151,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __KERNEL__ */ #endif /* _ASM_ATOMIC_H */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index ada3e5364d82..7eeaff944360 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -338,6 +338,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #endif /* CONFIG_64BIT */ -#include +#include #endif /* _ASM_PARISC_ATOMIC_H_ */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index b401950f5259..b7d2d07b6f96 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -472,6 +472,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #endif /* __powerpc64__ */ -#include +#include #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index de432f2de2d2..fca9dffcc669 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -275,6 +275,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include +#include #endif /* __KERNEL__ */ #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 6327ffbb1992..a5647d0cd179 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -84,5 +84,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __ASM_SH_ATOMIC_H */ diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index bb91b1248cd1..f0d343c3b956 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -161,5 +161,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v) #endif /* !(__KERNEL__) */ -#include +#include #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index a0a706492696..f2e48009989e 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -114,5 +114,5 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index 85b46fba4229..c83d31486081 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h @@ -247,5 +247,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_X86_ATOMIC_32_H */ diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h index 8c21731984da..0d6360220007 100644 --- a/arch/x86/include/asm/atomic_64.h +++ b/arch/x86/include/asm/atomic_64.h @@ -455,5 +455,5 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_X86_ATOMIC_64_H */ diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 67ad67bed8c1..22d6dde42619 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -292,7 +292,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h new file mode 100644 index 000000000000..76e27d66c055 --- /dev/null +++ b/include/asm-generic/atomic-long.h @@ -0,0 +1,258 @@ +#ifndef _ASM_GENERIC_ATOMIC_LONG_H +#define _ASM_GENERIC_ATOMIC_LONG_H +/* + * Copyright (C) 2005 Silicon Graphics, Inc. + * Christoph Lameter + * + * Allows to provide arch independent atomic definitions without the need to + * edit all arch specific atomic.h files. + */ + +#include + +/* + * Suppport for atomic_long_t + * + * Casts for parameters are avoided for existing atomic functions in order to + * avoid issues with cast-as-lval under gcc 4.x and other limitations that the + * macros of a platform may have. + */ + +#if BITS_PER_LONG == 64 + +typedef atomic64_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_sub(i, v); +} + +static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_sub_and_test(i, v); +} + +static inline int atomic_long_dec_and_test(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_dec_and_test(v); +} + +static inline int atomic_long_inc_and_test(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_inc_and_test(v); +} + +static inline int atomic_long_add_negative(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_add_negative(i, v); +} + +static inline long atomic_long_add_return(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_return(i, v); +} + +static inline long atomic_long_sub_return(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_sub_return(i, v); +} + +static inline long atomic_long_inc_return(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_inc_return(v); +} + +static inline long atomic_long_dec_return(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_dec_return(v); +} + +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_unless(v, a, u); +} + +#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) + +#define atomic_long_cmpxchg(l, old, new) \ + (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) +#define atomic_long_xchg(v, new) \ + (atomic64_xchg((atomic64_t *)(l), (new))) + +#else /* BITS_PER_LONG == 64 */ + +typedef atomic_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic_t *v = (atomic_t *)l; + + atomic_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_sub(i, v); +} + +static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_sub_and_test(i, v); +} + +static inline int atomic_long_dec_and_test(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_dec_and_test(v); +} + +static inline int atomic_long_inc_and_test(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_inc_and_test(v); +} + +static inline int atomic_long_add_negative(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_add_negative(i, v); +} + +static inline long atomic_long_add_return(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_add_return(i, v); +} + +static inline long atomic_long_sub_return(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_sub_return(i, v); +} + +static inline long atomic_long_inc_return(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_inc_return(v); +} + +static inline long atomic_long_dec_return(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_dec_return(v); +} + +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_add_unless(v, a, u); +} + +#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) + +#define atomic_long_cmpxchg(l, old, new) \ + (atomic_cmpxchg((atomic_t *)(l), (old), (new))) +#define atomic_long_xchg(v, new) \ + (atomic_xchg((atomic_t *)(v), (new))) + +#endif /* BITS_PER_LONG == 64 */ + +#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h deleted file mode 100644 index 3673a13b6703..000000000000 --- a/include/asm-generic/atomic.h +++ /dev/null @@ -1,258 +0,0 @@ -#ifndef _ASM_GENERIC_ATOMIC_H -#define _ASM_GENERIC_ATOMIC_H -/* - * Copyright (C) 2005 Silicon Graphics, Inc. - * Christoph Lameter - * - * Allows to provide arch independent atomic definitions without the need to - * edit all arch specific atomic.h files. - */ - -#include - -/* - * Suppport for atomic_long_t - * - * Casts for parameters are avoided for existing atomic functions in order to - * avoid issues with cast-as-lval under gcc 4.x and other limitations that the - * macros of a platform may have. - */ - -#if BITS_PER_LONG == 64 - -typedef atomic64_t atomic_long_t; - -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) - -static inline long atomic_long_read(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_read(v); -} - -static inline void atomic_long_set(atomic_long_t *l, long i) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_set(v, i); -} - -static inline void atomic_long_inc(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_inc(v); -} - -static inline void atomic_long_dec(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_dec(v); -} - -static inline void atomic_long_add(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_add(i, v); -} - -static inline void atomic_long_sub(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_sub(i, v); -} - -static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_sub_and_test(i, v); -} - -static inline int atomic_long_dec_and_test(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_dec_and_test(v); -} - -static inline int atomic_long_inc_and_test(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_inc_and_test(v); -} - -static inline int atomic_long_add_negative(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_add_negative(i, v); -} - -static inline long atomic_long_add_return(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_add_return(i, v); -} - -static inline long atomic_long_sub_return(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_sub_return(i, v); -} - -static inline long atomic_long_inc_return(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_inc_return(v); -} - -static inline long atomic_long_dec_return(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_dec_return(v); -} - -static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_add_unless(v, a, u); -} - -#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) - -#define atomic_long_cmpxchg(l, old, new) \ - (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) -#define atomic_long_xchg(v, new) \ - (atomic64_xchg((atomic64_t *)(l), (new))) - -#else /* BITS_PER_LONG == 64 */ - -typedef atomic_t atomic_long_t; - -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -static inline long atomic_long_read(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_read(v); -} - -static inline void atomic_long_set(atomic_long_t *l, long i) -{ - atomic_t *v = (atomic_t *)l; - - atomic_set(v, i); -} - -static inline void atomic_long_inc(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_inc(v); -} - -static inline void atomic_long_dec(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_dec(v); -} - -static inline void atomic_long_add(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_add(i, v); -} - -static inline void atomic_long_sub(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_sub(i, v); -} - -static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return atomic_sub_and_test(i, v); -} - -static inline int atomic_long_dec_and_test(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return atomic_dec_and_test(v); -} - -static inline int atomic_long_inc_and_test(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return atomic_inc_and_test(v); -} - -static inline int atomic_long_add_negative(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return atomic_add_negative(i, v); -} - -static inline long atomic_long_add_return(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_add_return(i, v); -} - -static inline long atomic_long_sub_return(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_sub_return(i, v); -} - -static inline long atomic_long_inc_return(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_inc_return(v); -} - -static inline long atomic_long_dec_return(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_dec_return(v); -} - -static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_add_unless(v, a, u); -} - -#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) - -#define atomic_long_cmpxchg(l, old, new) \ - (atomic_cmpxchg((atomic_t *)(l), (old), (new))) -#define atomic_long_xchg(v, new) \ - (atomic_xchg((atomic_t *)(v), (new))) - -#endif /* BITS_PER_LONG == 64 */ - -#endif /* _ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 4657f3e410fc..c8946465e63a 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -2,6 +2,7 @@ #define _ASM_GENERIC_BITOPS_ATOMIC_H_ #include +#include #ifdef CONFIG_SMP #include -- cgit v1.2.3 From 5b17e1cd8928ae65932758ce6478ac6d3e9a86b2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:30 +0000 Subject: asm-generic: rename page.h and uaccess.h The current asm-generic/page.h only contains the get_order function, and asm-generic/uaccess.h only implements unaligned accesses. This renames the file to getorder.h and uaccess-unaligned.h to make room for new page.h and uaccess.h file that will be usable by all simple (e.g. nommu) architectures. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann --- arch/alpha/include/asm/page.h | 2 +- arch/arm/include/asm/page.h | 2 +- arch/blackfin/include/asm/page.h | 2 +- arch/cris/include/asm/page.h | 2 +- arch/frv/include/asm/page.h | 2 +- arch/h8300/include/asm/page.h | 2 +- arch/m32r/include/asm/page.h | 2 +- arch/m68k/include/asm/page_mm.h | 2 +- arch/m68k/include/asm/page_no.h | 2 +- arch/microblaze/include/asm/page.h | 2 +- arch/mips/include/asm/page.h | 2 +- arch/parisc/include/asm/page.h | 2 +- arch/parisc/include/asm/uaccess.h | 2 +- arch/powerpc/include/asm/page_32.h | 2 +- arch/powerpc/include/asm/page_64.h | 2 +- arch/s390/include/asm/page.h | 2 +- arch/sh/include/asm/page.h | 2 +- arch/sparc/include/asm/page_32.h | 2 +- arch/sparc/include/asm/page_64.h | 2 +- arch/sparc/include/asm/uaccess_64.h | 2 +- arch/um/include/asm/page.h | 2 +- arch/x86/include/asm/page.h | 2 +- arch/xtensa/include/asm/page.h | 2 +- include/asm-generic/getorder.h | 24 ++++++++++++++++++++++++ include/asm-generic/page.h | 24 ------------------------ include/asm-generic/uaccess-unaligned.h | 26 ++++++++++++++++++++++++++ include/asm-generic/uaccess.h | 26 -------------------------- 27 files changed, 73 insertions(+), 73 deletions(-) create mode 100644 include/asm-generic/getorder.h delete mode 100644 include/asm-generic/page.h create mode 100644 include/asm-generic/uaccess-unaligned.h delete mode 100644 include/asm-generic/uaccess.h (limited to 'arch/xtensa') diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h index 0995f9d13417..07af062544fb 100644 --- a/arch/alpha/include/asm/page.h +++ b/arch/alpha/include/asm/page.h @@ -93,6 +93,6 @@ typedef struct page *pgtable_t; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #endif /* _ALPHA_PAGE_H */ diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 7b522770f29d..be962c1349c4 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -202,6 +202,6 @@ typedef struct page *pgtable_t; (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#include +#include #endif diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h index 344f6a8c1f22..3ea2016a1d4a 100644 --- a/arch/blackfin/include/asm/page.h +++ b/arch/blackfin/include/asm/page.h @@ -81,7 +81,7 @@ extern unsigned long memory_end; #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ ((void *)(kaddr) < (void *)memory_end)) -#include +#include #endif /* __ASSEMBLY__ */ diff --git a/arch/cris/include/asm/page.h b/arch/cris/include/asm/page.h index f3fdbd09c34c..be45ee366be9 100644 --- a/arch/cris/include/asm/page.h +++ b/arch/cris/include/asm/page.h @@ -68,7 +68,7 @@ typedef struct page *pgtable_t; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #endif /* _CRIS_PAGE_H */ diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h index bd9c220094c7..25c6a5002355 100644 --- a/arch/frv/include/asm/page.h +++ b/arch/frv/include/asm/page.h @@ -73,6 +73,6 @@ extern unsigned long max_pfn; #endif /* __ASSEMBLY__ */ #include -#include +#include #endif /* _ASM_PAGE_H */ diff --git a/arch/h8300/include/asm/page.h b/arch/h8300/include/asm/page.h index 0b6acf0b03aa..837381a2df46 100644 --- a/arch/h8300/include/asm/page.h +++ b/arch/h8300/include/asm/page.h @@ -73,6 +73,6 @@ extern unsigned long memory_end; #endif /* __ASSEMBLY__ */ #include -#include +#include #endif /* _H8300_PAGE_H */ diff --git a/arch/m32r/include/asm/page.h b/arch/m32r/include/asm/page.h index c9333089fe11..11777f7a5628 100644 --- a/arch/m32r/include/asm/page.h +++ b/arch/m32r/include/asm/page.h @@ -82,6 +82,6 @@ typedef struct page *pgtable_t; #define devmem_is_allowed(x) 1 #include -#include +#include #endif /* _ASM_M32R_PAGE_H */ diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index a34b8bad7847..d009f3ea39ab 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h @@ -223,6 +223,6 @@ static inline __attribute_const__ int __virt_to_node_shift(void) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#include +#include #endif /* _M68K_PAGE_H */ diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h index 3a1ede4544cb..9aa3f90f4855 100644 --- a/arch/m68k/include/asm/page_no.h +++ b/arch/m68k/include/asm/page_no.h @@ -72,6 +72,6 @@ extern unsigned long memory_end; #endif /* __ASSEMBLY__ */ -#include +#include #endif /* _M68KNOMMU_PAGE_H */ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 7238dcfcc517..962c210e5b9a 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -135,6 +135,6 @@ extern unsigned int memory_size; #endif /* __KERNEL__ */ #include -#include +#include #endif /* _ASM_MICROBLAZE_PAGE_H */ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 9f946e4ca057..72c80d2034c2 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -189,6 +189,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) #include -#include +#include #endif /* _ASM_PAGE_H */ diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 7bc5125d7d4c..a84cc1f925f6 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -159,6 +159,6 @@ extern int npmem_ranges; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #endif /* _PARISC_PAGE_H */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index cd4c0b2a8e70..7cf799d70b4c 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -7,7 +7,7 @@ #include #include #include -#include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index a0e3f6e6b4ee..bd0849dbcaaa 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -41,7 +41,7 @@ extern void clear_pages(void *page, int order); static inline void clear_page(void *page) { clear_pages(page, 0); } extern void copy_page(void *to, void *from); -#include +#include #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 043bfdfe4f73..5817a3b747e5 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -180,6 +180,6 @@ do { \ (test_thread_flag(TIF_32BIT) ? \ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) -#include +#include #endif /* _ASM_POWERPC_PAGE_64_H */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 32e8f6aa4384..3e3594d01f83 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -150,7 +150,7 @@ void arch_alloc_page(struct page *page, int order); VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #define __HAVE_ARCH_GATE_AREA 1 diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 9c6d21ec0240..49592c780a6e 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -163,7 +163,7 @@ typedef struct page *pgtable_t; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include /* vDSO support */ #ifdef CONFIG_VSYSCALL diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index d1806edc0958..f72080bdda94 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -152,6 +152,6 @@ extern unsigned long pfn_base; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #endif /* _SPARC_PAGE_H */ diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 4274ed13ddb2..f0d09b401036 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -132,6 +132,6 @@ typedef struct page *pgtable_t; #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#include +#include #endif /* _SPARC64_PAGE_H */ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index c64e767a3e4b..a38c03238918 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -12,7 +12,7 @@ #include #include #include -#include +#include #endif #ifndef __ASSEMBLY__ diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h index 55f28a0bae6d..4cc9b6cf480a 100644 --- a/arch/um/include/asm/page.h +++ b/arch/um/include/asm/page.h @@ -116,7 +116,7 @@ extern unsigned long uml_physmem; #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) #include -#include +#include #endif /* __ASSEMBLY__ */ #endif /* __UM_PAGE_H */ diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 89ed9d70b0aa..625c3f0e741a 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -56,7 +56,7 @@ extern bool __virt_addr_valid(unsigned long kaddr); #endif /* __ASSEMBLY__ */ #include -#include +#include #define __HAVE_ARCH_GATE_AREA 1 diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 17e0c5383b10..161bb89e98c8 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -129,7 +129,7 @@ static inline __attribute_const__ int get_order(unsigned long size) #else -# include +# include #endif diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h new file mode 100644 index 000000000000..67e7245dc9b3 --- /dev/null +++ b/include/asm-generic/getorder.h @@ -0,0 +1,24 @@ +#ifndef __ASM_GENERIC_GETORDER_H +#define __ASM_GENERIC_GETORDER_H + +#ifndef __ASSEMBLY__ + +#include + +/* Pure 2^n version of get_order */ +static inline __attribute_const__ int get_order(unsigned long size) +{ + int order; + + size = (size - 1) >> (PAGE_SHIFT - 1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_GETORDER_H */ diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h deleted file mode 100644 index 14db733b8e68..000000000000 --- a/include/asm-generic/page.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _ASM_GENERIC_PAGE_H -#define _ASM_GENERIC_PAGE_H - -#ifndef __ASSEMBLY__ - -#include - -/* Pure 2^n version of get_order */ -static __inline__ __attribute_const__ int get_order(unsigned long size) -{ - int order; - - size = (size - 1) >> (PAGE_SHIFT - 1); - order = -1; - do { - size >>= 1; - order++; - } while (size); - return order; -} - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_GENERIC_PAGE_H */ diff --git a/include/asm-generic/uaccess-unaligned.h b/include/asm-generic/uaccess-unaligned.h new file mode 100644 index 000000000000..67deb898f0c5 --- /dev/null +++ b/include/asm-generic/uaccess-unaligned.h @@ -0,0 +1,26 @@ +#ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H +#define __ASM_GENERIC_UACCESS_UNALIGNED_H + +/* + * This macro should be used instead of __get_user() when accessing + * values at locations that are not known to be aligned. + */ +#define __get_user_unaligned(x, ptr) \ +({ \ + __typeof__ (*(ptr)) __x; \ + __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ + (x) = __x; \ +}) + + +/* + * This macro should be used instead of __put_user() when accessing + * values at locations that are not known to be aligned. + */ +#define __put_user_unaligned(x, ptr) \ +({ \ + __typeof__ (*(ptr)) __x = (x); \ + __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ +}) + +#endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h deleted file mode 100644 index 549cb3a1640a..000000000000 --- a/include/asm-generic/uaccess.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef _ASM_GENERIC_UACCESS_H_ -#define _ASM_GENERIC_UACCESS_H_ - -/* - * This macro should be used instead of __get_user() when accessing - * values at locations that are not known to be aligned. - */ -#define __get_user_unaligned(x, ptr) \ -({ \ - __typeof__ (*(ptr)) __x; \ - __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ - (x) = __x; \ -}) - - -/* - * This macro should be used instead of __put_user() when accessing - * values at locations that are not known to be aligned. - */ -#define __put_user_unaligned(x, ptr) \ -({ \ - __typeof__ (*(ptr)) __x = (x); \ - __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ -}) - -#endif /* _ASM_GENERIC_UACCESS_H */ -- cgit v1.2.3