diff options
| author | Chris Zankel <chris@zankel.net> | 2014-10-20 21:26:01 -0700 | 
|---|---|---|
| committer | Chris Zankel <chris@zankel.net> | 2014-10-20 21:26:01 -0700 | 
| commit | a13926db3e5ea5bbac297f59e9f35061f52892d3 (patch) | |
| tree | b794f25550b7dbbe9cc8eda81633df9023b4821c /arch/xtensa | |
| parent | f2589bff1ce8b94cebc044e5dfeac4d4e8701cbc (diff) | |
| parent | f114040e3ea6e07372334ade75d1ee0775c355e1 (diff) | |
| download | linux-a13926db3e5ea5bbac297f59e9f35061f52892d3.tar.bz2 | |
Merge tag 'v3.18-rc1' into for_next
Linux 3.18-rc1
Diffstat (limited to 'arch/xtensa')
| -rw-r--r-- | arch/xtensa/include/asm/Kbuild | 1 | ||||
| -rw-r--r-- | arch/xtensa/include/asm/atomic.h | 235 | ||||
| -rw-r--r-- | arch/xtensa/kernel/ptrace.c | 2 | ||||
| -rw-r--r-- | arch/xtensa/kernel/smp.c | 1 | 
4 files changed, 86 insertions, 153 deletions
| diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index c3d20ba6eb86..105d38922c44 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -12,6 +12,7 @@ generic-y += hardirq.h  generic-y += hash.h  generic-y += ioctl.h  generic-y += irq_regs.h +generic-y += irq_work.h  generic-y += kdebug.h  generic-y += kmap_types.h  generic-y += kvm_para.h diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index e5103b47a8ce..00b7d46b35b8 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -47,7 +47,7 @@   *   * Atomically reads the value of @v.   */ -#define atomic_read(v)		(*(volatile int *)&(v)->counter) +#define atomic_read(v)		ACCESS_ONCE((v)->counter)  /**   * atomic_set - set atomic variable @@ -58,165 +58,96 @@   */  #define atomic_set(v,i)		((v)->counter = (i)) -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static inline void atomic_add(int i, atomic_t * v) -{  #if XCHAL_HAVE_S32C1I -	unsigned long tmp; -	int result; - -	__asm__ __volatile__( -			"1:     l32i    %1, %3, 0\n" -			"       wsr     %1, scompare1\n" -			"       add     %0, %1, %2\n" -			"       s32c1i  %0, %3, 0\n" -			"       bne     %0, %1, 1b\n" -			: "=&a" (result), "=&a" (tmp) -			: "a" (i), "a" (v) -			: "memory" -			); -#else -	unsigned int vval; - -	__asm__ __volatile__( -			"       rsil    a15, "__stringify(LOCKLEVEL)"\n" -			"       l32i    %0, %2, 0\n" -			"       add     %0, %0, %1\n" -			"       s32i    %0, %2, 0\n" -			"       wsr     a15, ps\n" -			"       rsync\n" -			: "=&a" (vval) -			: "a" (i), "a" (v) -			: "a15", "memory" -			); -#endif -} - -/** - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static inline void atomic_sub(int i, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I -	unsigned long tmp; -	int result; - -	__asm__ __volatile__( -			"1:     l32i    %1, %3, 0\n" -			"       wsr     %1, scompare1\n" -			"       sub     %0, %1, %2\n" -			"       s32c1i  %0, %3, 0\n" -			"       bne     %0, %1, 1b\n" -			: "=&a" (result), "=&a" (tmp) -			: "a" (i), "a" (v) -			: "memory" -			); -#else -	unsigned int vval; - -	__asm__ __volatile__( -			"       rsil    a15, "__stringify(LOCKLEVEL)"\n" -			"       l32i    %0, %2, 0\n" -			"       sub     %0, %0, %1\n" -			"       s32i    %0, %2, 0\n" -			"       wsr     a15, ps\n" -			"       rsync\n" -			: "=&a" (vval) -			: "a" (i), "a" (v) -			: "a15", "memory" -			); -#endif +#define ATOMIC_OP(op)							\ +static inline void atomic_##op(int i, atomic_t * v)			\ +{									\ +	unsigned long tmp;						\ +	int result;							\ +									\ +	__asm__ __volatile__(						\ +			"1:     l32i    %1, %3, 0\n"			\ +			"       wsr     %1, scompare1\n"		\ +			"       " #op " %0, %1, %2\n"			\ +			"       s32c1i  %0, %3, 0\n"			\ +			"       bne     %0, %1, 1b\n"			\ +			: "=&a" (result), "=&a" (tmp)			\ +			: "a" (i), "a" (v)				\ +			: "memory"					\ +			);						\ +}									\ + +#define ATOMIC_OP_RETURN(op)						\ +static inline int atomic_##op##_return(int i, atomic_t * v)		\ +{									\ +	unsigned long tmp;						\ +	int result;							\ +									\ +	__asm__ __volatile__(						\ +			"1:     l32i    %1, %3, 0\n"			\ +			"       wsr     %1, scompare1\n"		\ +			"       " #op " %0, %1, %2\n"			\ +			"       s32c1i  %0, %3, 0\n"			\ +			"       bne     %0, %1, 1b\n"			\ +			"       " #op " %0, %0, %2\n"			\ +			: "=&a" (result), "=&a" (tmp)			\ +			: "a" (i), "a" (v)				\ +			: "memory"					\ +			);						\ +									\ +	return result;							\  } -/* - * We use atomic_{add|sub}_return to define other functions. - */ - -static inline int atomic_add_return(int i, atomic_t * v) -{ -#if XCHAL_HAVE_S32C1I -	unsigned long tmp; -	int result; - -	__asm__ __volatile__( -			"1:     l32i    %1, %3, 0\n" -			"       wsr     %1, scompare1\n" -			"       add     %0, %1, %2\n" -			"       s32c1i  %0, %3, 0\n" -			"       bne     %0, %1, 1b\n" -			"       add     %0, %0, %2\n" -			: "=&a" (result), "=&a" (tmp) -			: "a" (i), "a" (v) -			: "memory" -			); - -	return result; -#else -	unsigned int vval; - -	__asm__ __volatile__( -			"       rsil    a15,"__stringify(LOCKLEVEL)"\n" -			"       l32i    %0, %2, 0\n" -			"       add     %0, %0, %1\n" -			"       s32i    %0, %2, 0\n" -			"       wsr     a15, ps\n" -			"       rsync\n" -			: "=&a" (vval) -			: "a" (i), "a" (v) -			: "a15", "memory" -			); - -	return vval; -#endif +#else /* XCHAL_HAVE_S32C1I */ + +#define ATOMIC_OP(op)							\ +static inline void atomic_##op(int i, atomic_t * v)			\ +{									\ +	unsigned int vval;						\ +									\ +	__asm__ __volatile__(						\ +			"       rsil    a15, "__stringify(LOCKLEVEL)"\n"\ +			"       l32i    %0, %2, 0\n"			\ +			"       " #op " %0, %0, %1\n"			\ +			"       s32i    %0, %2, 0\n"			\ +			"       wsr     a15, ps\n"			\ +			"       rsync\n"				\ +			: "=&a" (vval)					\ +			: "a" (i), "a" (v)				\ +			: "a15", "memory"				\ +			);						\ +}									\ + +#define ATOMIC_OP_RETURN(op)						\ +static inline int atomic_##op##_return(int i, atomic_t * v)		\ +{									\ +	unsigned int vval;						\ +									\ +	__asm__ __volatile__(						\ +			"       rsil    a15,"__stringify(LOCKLEVEL)"\n"	\ +			"       l32i    %0, %2, 0\n"			\ +			"       " #op " %0, %0, %1\n"			\ +			"       s32i    %0, %2, 0\n"			\ +			"       wsr     a15, ps\n"			\ +			"       rsync\n"				\ +			: "=&a" (vval)					\ +			: "a" (i), "a" (v)				\ +			: "a15", "memory"				\ +			);						\ +									\ +	return vval;							\  } -static inline int atomic_sub_return(int i, atomic_t * v) -{ -#if XCHAL_HAVE_S32C1I -	unsigned long tmp; -	int result; +#endif /* XCHAL_HAVE_S32C1I */ -	__asm__ __volatile__( -			"1:     l32i    %1, %3, 0\n" -			"       wsr     %1, scompare1\n" -			"       sub     %0, %1, %2\n" -			"       s32c1i  %0, %3, 0\n" -			"       bne     %0, %1, 1b\n" -			"       sub     %0, %0, %2\n" -			: "=&a" (result), "=&a" (tmp) -			: "a" (i), "a" (v) -			: "memory" -			); +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) -	return result; -#else -	unsigned int vval; - -	__asm__ __volatile__( -			"       rsil    a15,"__stringify(LOCKLEVEL)"\n" -			"       l32i    %0, %2, 0\n" -			"       sub     %0, %0, %1\n" -			"       s32i    %0, %2, 0\n" -			"       wsr     a15, ps\n" -			"       rsync\n" -			: "=&a" (vval) -			: "a" (i), "a" (v) -			: "a15", "memory" -			); +ATOMIC_OPS(add) +ATOMIC_OPS(sub) -	return vval; -#endif -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP  /**   * atomic_sub_and_test - subtract value from variable and test result diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index 562fac664751..4d54b481123b 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -342,7 +342,7 @@ void do_syscall_trace_enter(struct pt_regs *regs)  		do_syscall_trace();  #if 0 -	audit_syscall_entry(current, AUDIT_ARCH_XTENSA..); +	audit_syscall_entry(...);  #endif  } diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 40b5a3771fb0..4d02e38514f5 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -571,6 +571,7 @@ void flush_icache_range(unsigned long start, unsigned long end)  	};  	on_each_cpu(ipi_flush_icache_range, &fd, 1);  } +EXPORT_SYMBOL(flush_icache_range);  /* ------------------------------------------------------------------------- */ |