From f5967101e9de12addcda4510dfbac66d7c5779c3 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 30 May 2016 12:56:27 +0200 Subject: x86/hweight: Get rid of the special calling convention People complained about ARCH_HWEIGHT_CFLAGS and how it throws a wrench into kcov, lto, etc, experimentations. Add asm versions for __sw_hweight{32,64}() and do explicit saving and restoring of clobbered registers. This gets rid of the special calling convention. We get to call those functions on !X86_FEATURE_POPCNT CPUs. We still need to hardcode POPCNT and register operands as some old gas versions which we support, do not know about POPCNT. Btw, remove redundant REX prefix from 32-bit POPCNT because alternatives can do padding now. Suggested-by: H. Peter Anvin Signed-off-by: Borislav Petkov Acked-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1464605787-20603-1-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar --- lib/Makefile | 5 ----- lib/hweight.c | 4 ++++ 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index ff6a7a6c6395..07d06a8b9788 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -15,9 +15,6 @@ KCOV_INSTRUMENT_rbtree.o := n KCOV_INSTRUMENT_list_debug.o := n KCOV_INSTRUMENT_debugobjects.o := n KCOV_INSTRUMENT_dynamic_debug.o := n -# Kernel does not boot if we instrument this file as it uses custom calling -# convention (see CONFIG_ARCH_HWEIGHT_CFLAGS). -KCOV_INSTRUMENT_hweight.o := n lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ @@ -74,8 +71,6 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o -GCOV_PROFILE_hweight.o := n -CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_BTREE) += btree.o diff --git a/lib/hweight.c b/lib/hweight.c index 9a5c1f221558..43273a7d83cf 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -9,6 +9,7 @@ * The Hamming Weight of a number is the total number of bits set in it. */ +#ifndef __HAVE_ARCH_SW_HWEIGHT unsigned int __sw_hweight32(unsigned int w) { #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER @@ -25,6 +26,7 @@ unsigned int __sw_hweight32(unsigned int w) #endif } EXPORT_SYMBOL(__sw_hweight32); +#endif unsigned int __sw_hweight16(unsigned int w) { @@ -43,6 +45,7 @@ unsigned int __sw_hweight8(unsigned int w) } EXPORT_SYMBOL(__sw_hweight8); +#ifndef __HAVE_ARCH_SW_HWEIGHT unsigned long __sw_hweight64(__u64 w) { #if BITS_PER_LONG == 32 @@ -65,3 +68,4 @@ unsigned long __sw_hweight64(__u64 w) #endif } EXPORT_SYMBOL(__sw_hweight64); +#endif -- cgit v1.2.3 From f8cbdee99b161cb08c3fb55200941028c5fe25c8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 19 Apr 2016 13:03:18 -0700 Subject: torture: Simplify code, eliminate RCU_PERF_TEST_RUNNABLE This commit applies the infamous IS_ENABLED() macro to eliminate a #ifdef. It also eliminates the RCU_PERF_TEST_RUNNABLE Kconfig option in favor of the already-existing rcuperf.perf_runnable kernel boot parameter. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuperf.c | 7 +------ lib/Kconfig.debug | 16 ---------------- 2 files changed, 1 insertion(+), 22 deletions(-) (limited to 'lib') diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 3cee0d8393ed..afd174e901c3 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -96,12 +96,7 @@ static int rcu_perf_writer_state; #define MAX_MEAS 10000 #define MIN_MEAS 100 -#if defined(MODULE) || defined(CONFIG_RCU_PERF_TEST_RUNNABLE) -#define RCUPERF_RUNNABLE_INIT 1 -#else -#define RCUPERF_RUNNABLE_INIT 0 -#endif -static int perf_runnable = RCUPERF_RUNNABLE_INIT; +static int perf_runnable = IS_ENABLED(MODULE); module_param(perf_runnable, int, 0444); MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot"); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b9cfdbfae9aa..cf6ddcd8f70c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1307,22 +1307,6 @@ config RCU_PERF_TEST Say M if you want the RCU performance tests to build as a module. Say N if you are unsure. -config RCU_PERF_TEST_RUNNABLE - bool "performance tests for RCU runnable by default" - depends on RCU_PERF_TEST = y - default n - help - This option provides a way to build the RCU performance tests - directly into the kernel without them starting up at boot time. - You can use /sys/module to manually override this setting. - This /proc file is available only when the RCU performance - tests have been built into the kernel. - - Say Y here if you want the RCU performance tests to start during - boot (you probably don't). - Say N here if you want the RCU performance tests to start only - after being manually enabled via /sys/module. - config RCU_TORTURE_TEST tristate "torture tests for RCU" depends on DEBUG_KERNEL -- cgit v1.2.3 From 4e9a073f60367157fd64b65490654c39d4c44321 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 19 Apr 2016 14:42:34 -0700 Subject: torture: Remove CONFIG_RCU_TORTURE_TEST_RUNNABLE, simplify code This commit removes CONFIG_RCU_TORTURE_TEST_RUNNABLE in favor of the already-existing rcutorture.torture_runnable kernel boot parameter. It also converts an #ifdef into IS_ENABLED(), saving a few lines of code. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 7 +------ kernel/rcu/tree_plugin.h | 2 -- lib/Kconfig.debug | 17 ----------------- 3 files changed, 1 insertion(+), 25 deletions(-) (limited to 'lib') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 084a28a732eb..01cb57ff106f 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -182,12 +182,7 @@ static const char *rcu_torture_writer_state_getname(void) return rcu_torture_writer_state_names[i]; } -#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) -#define RCUTORTURE_RUNNABLE_INIT 1 -#else -#define RCUTORTURE_RUNNABLE_INIT 0 -#endif -static int torture_runnable = RCUTORTURE_RUNNABLE_INIT; +static int torture_runnable = IS_ENABLED(MODULE); module_param(torture_runnable, int, 0444); MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot"); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index ff1cd4e1188d..5b2d723e6568 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -79,8 +79,6 @@ static void __init rcu_bootup_announce_oddness(void) pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); if (IS_ENABLED(CONFIG_PROVE_RCU)) pr_info("\tRCU lockdep checking is enabled.\n"); - if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE)) - pr_info("\tRCU torture testing starts during boot.\n"); if (RCU_NUM_LVLS >= 4) pr_info("\tFour(or more)-level hierarchy is enabled.\n"); if (RCU_FANOUT_LEAF != 16) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index cf6ddcd8f70c..805b7048a1bd 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1324,23 +1324,6 @@ config RCU_TORTURE_TEST Say M if you want the RCU torture tests to build as a module. Say N if you are unsure. -config RCU_TORTURE_TEST_RUNNABLE - bool "torture tests for RCU runnable by default" - depends on RCU_TORTURE_TEST = y - default n - help - This option provides a way to build the RCU torture tests - directly into the kernel without them starting up at boot - time. You can use /proc/sys/kernel/rcutorture_runnable - to manually override this setting. This /proc file is - available only when the RCU torture tests have been built - into the kernel. - - Say Y here if you want the RCU torture tests to start during - boot (you probably don't). - Say N here if you want the RCU torture tests to start only - after being manually enabled via /proc. - config RCU_TORTURE_TEST_SLOW_PREINIT bool "Slow down RCU grace-period pre-initialization to expose races" depends on RCU_TORTURE_TEST -- cgit v1.2.3 From 28aa2bda2211f4327d83b44a4f917b4a061b1c56 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 18 Apr 2016 00:54:38 +0200 Subject: locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() Now that all the architectures have implemented support for these new atomic primitives add on the generic infrastructure to expose and use it. Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Arnd Bergmann Cc: Boqun Feng Cc: Borislav Petkov Cc: Davidlohr Bueso Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/asm-generic/atomic-long.h | 36 +++- include/asm-generic/atomic.h | 49 ++++++ include/asm-generic/atomic64.h | 15 +- include/linux/atomic.h | 336 ++++++++++++++++++++++++++++++++++++++ lib/atomic64.c | 32 +++- lib/atomic64_test.c | 34 ++++ 6 files changed, 493 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 5e1f345b58dd..2d0d3cf791ab 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -112,6 +112,40 @@ static __always_inline void atomic_long_dec(atomic_long_t *l) ATOMIC_LONG_PFX(_dec)(v); } +#define ATOMIC_LONG_FETCH_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \ +} + +ATOMIC_LONG_FETCH_OP(add, ) +ATOMIC_LONG_FETCH_OP(add, _relaxed) +ATOMIC_LONG_FETCH_OP(add, _acquire) +ATOMIC_LONG_FETCH_OP(add, _release) +ATOMIC_LONG_FETCH_OP(sub, ) +ATOMIC_LONG_FETCH_OP(sub, _relaxed) +ATOMIC_LONG_FETCH_OP(sub, _acquire) +ATOMIC_LONG_FETCH_OP(sub, _release) +ATOMIC_LONG_FETCH_OP(and, ) +ATOMIC_LONG_FETCH_OP(and, _relaxed) +ATOMIC_LONG_FETCH_OP(and, _acquire) +ATOMIC_LONG_FETCH_OP(and, _release) +ATOMIC_LONG_FETCH_OP(andnot, ) +ATOMIC_LONG_FETCH_OP(andnot, _relaxed) +ATOMIC_LONG_FETCH_OP(andnot, _acquire) +ATOMIC_LONG_FETCH_OP(andnot, _release) +ATOMIC_LONG_FETCH_OP(or, ) +ATOMIC_LONG_FETCH_OP(or, _relaxed) +ATOMIC_LONG_FETCH_OP(or, _acquire) +ATOMIC_LONG_FETCH_OP(or, _release) +ATOMIC_LONG_FETCH_OP(xor, ) +ATOMIC_LONG_FETCH_OP(xor, _relaxed) +ATOMIC_LONG_FETCH_OP(xor, _acquire) +ATOMIC_LONG_FETCH_OP(xor, _release) + #define ATOMIC_LONG_OP(op) \ static __always_inline void \ atomic_long_##op(long i, atomic_long_t *l) \ @@ -124,9 +158,9 @@ atomic_long_##op(long i, atomic_long_t *l) \ ATOMIC_LONG_OP(add) ATOMIC_LONG_OP(sub) ATOMIC_LONG_OP(and) +ATOMIC_LONG_OP(andnot) ATOMIC_LONG_OP(or) ATOMIC_LONG_OP(xor) -ATOMIC_LONG_OP(andnot) #undef ATOMIC_LONG_OP diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 74f1a3704d7a..a2304ccf4ed0 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return c c_op i; \ } +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c; \ +} + #else #include @@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return ret; \ } +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = v->counter; \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + #endif /* CONFIG_SMP */ #ifndef atomic_add_return @@ -98,6 +124,28 @@ ATOMIC_OP_RETURN(add, +) ATOMIC_OP_RETURN(sub, -) #endif +#ifndef atomic_fetch_add +ATOMIC_FETCH_OP(add, +) +#endif + +#ifndef atomic_fetch_sub +ATOMIC_FETCH_OP(sub, -) +#endif + +#ifndef atomic_fetch_and +ATOMIC_FETCH_OP(and, &) +#endif + +#ifndef atomic_fetch_or +#define atomic_fetch_or atomic_fetch_or + +ATOMIC_FETCH_OP(or, |) +#endif + +#ifndef atomic_fetch_xor +ATOMIC_FETCH_OP(xor, ^) +#endif + #ifndef atomic_and ATOMIC_OP(and, &) #endif @@ -110,6 +158,7 @@ ATOMIC_OP(or, |) ATOMIC_OP(xor, ^) #endif +#undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index d48e78ccad3d..dad68bf46c77 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v); #define ATOMIC64_OP_RETURN(op) \ extern long long atomic64_##op##_return(long long a, atomic64_t *v); -#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) +#define ATOMIC64_FETCH_OP(op) \ +extern long long atomic64_fetch_##op(long long a, atomic64_t *v); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) ATOMIC64_OPS(add) ATOMIC64_OPS(sub) -ATOMIC64_OP(and) -ATOMIC64_OP(or) -ATOMIC64_OP(xor) +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) + +ATOMIC64_OPS(and) +ATOMIC64_OPS(or) +ATOMIC64_OPS(xor) #undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 351f89e1d15c..2e6c013ac5a4 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -163,6 +163,154 @@ #endif #endif /* atomic_dec_return_relaxed */ + +/* atomic_fetch_add_relaxed */ +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_relaxed atomic_fetch_add +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add + +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +#define atomic_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_add_release +#define atomic_fetch_add_release(...) \ + __atomic_op_release(atomic_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_add +#define atomic_fetch_add(...) \ + __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic_fetch_add_relaxed */ + +/* atomic_fetch_sub_relaxed */ +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub + +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +#define atomic_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_sub_release +#define atomic_fetch_sub_release(...) \ + __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_sub +#define atomic_fetch_sub(...) \ + __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic_fetch_sub_relaxed */ + +/* atomic_fetch_or_relaxed */ +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_relaxed atomic_fetch_or +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or + +#else /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_or_acquire +#define atomic_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_or_release +#define atomic_fetch_or_release(...) \ + __atomic_op_release(atomic_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_or +#define atomic_fetch_or(...) \ + __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic_fetch_or_relaxed */ + +/* atomic_fetch_and_relaxed */ +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_relaxed atomic_fetch_and +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and + +#else /* atomic_fetch_and_relaxed */ + +#ifndef atomic_fetch_and_acquire +#define atomic_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_and_release +#define atomic_fetch_and_release(...) \ + __atomic_op_release(atomic_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_and +#define atomic_fetch_and(...) \ + __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic_fetch_and_relaxed */ + +#ifdef atomic_andnot +/* atomic_fetch_andnot_relaxed */ +#ifndef atomic_fetch_andnot_relaxed +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot + +#else /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_andnot_acquire +#define atomic_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_andnot_release +#define atomic_fetch_andnot_release(...) \ + __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(...) \ + __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic_fetch_andnot_relaxed */ +#endif /* atomic_andnot */ + +/* atomic_fetch_xor_relaxed */ +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor + +#else /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_fetch_xor_acquire +#define atomic_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_xor_release +#define atomic_fetch_xor_release(...) \ + __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_xor +#define atomic_fetch_xor(...) \ + __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic_fetch_xor_relaxed */ + + /* atomic_xchg_relaxed */ #ifndef atomic_xchg_relaxed #define atomic_xchg_relaxed atomic_xchg @@ -310,6 +458,26 @@ static inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); } + +static inline int atomic_fetch_andnot(int i, atomic_t *v) +{ + return atomic_fetch_and(~i, v); +} + +static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return atomic_fetch_and_relaxed(~i, v); +} + +static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return atomic_fetch_and_acquire(~i, v); +} + +static inline int atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return atomic_fetch_and_release(~i, v); +} #endif static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) @@ -535,6 +703,154 @@ static inline int atomic_fetch_or(int mask, atomic_t *p) #endif #endif /* atomic64_dec_return_relaxed */ + +/* atomic64_fetch_add_relaxed */ +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add + +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +#define atomic64_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add_release +#define atomic64_fetch_add_release(...) \ + __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add +#define atomic64_fetch_add(...) \ + __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_add_relaxed */ + +/* atomic64_fetch_sub_relaxed */ +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub + +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +#define atomic64_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub_release +#define atomic64_fetch_sub_release(...) \ + __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub +#define atomic64_fetch_sub(...) \ + __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_sub_relaxed */ + +/* atomic64_fetch_or_relaxed */ +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or + +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +#define atomic64_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or_release +#define atomic64_fetch_or_release(...) \ + __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or +#define atomic64_fetch_or(...) \ + __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_or_relaxed */ + +/* atomic64_fetch_and_relaxed */ +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and + +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +#define atomic64_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and_release +#define atomic64_fetch_and_release(...) \ + __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and +#define atomic64_fetch_and(...) \ + __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_and_relaxed */ + +#ifdef atomic64_andnot +/* atomic64_fetch_andnot_relaxed */ +#ifndef atomic64_fetch_andnot_relaxed +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +#define atomic64_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot_release +#define atomic64_fetch_andnot_release(...) \ + __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(...) \ + __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_andnot_relaxed */ +#endif /* atomic64_andnot */ + +/* atomic64_fetch_xor_relaxed */ +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor + +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +#define atomic64_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor_release +#define atomic64_fetch_xor_release(...) \ + __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor +#define atomic64_fetch_xor(...) \ + __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_xor_relaxed */ + + /* atomic64_xchg_relaxed */ #ifndef atomic64_xchg_relaxed #define atomic64_xchg_relaxed atomic64_xchg @@ -588,6 +904,26 @@ static inline void atomic64_andnot(long long i, atomic64_t *v) { atomic64_and(~i, v); } + +static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) +{ + return atomic64_fetch_and(~i, v); +} + +static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_relaxed(~i, v); +} + +static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_acquire(~i, v); +} + +static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_release(~i, v); +} #endif #include diff --git a/lib/atomic64.c b/lib/atomic64.c index 2886ebac6567..53c2d5edc826 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -96,17 +96,41 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \ } \ EXPORT_SYMBOL(atomic64_##op##_return); +#define ATOMIC64_FETCH_OP(op, c_op) \ +long long atomic64_fetch_##op(long long a, atomic64_t *v) \ +{ \ + unsigned long flags; \ + raw_spinlock_t *lock = lock_addr(v); \ + long long val; \ + \ + raw_spin_lock_irqsave(lock, flags); \ + val = v->counter; \ + v->counter c_op a; \ + raw_spin_unlock_irqrestore(lock, flags); \ + return val; \ +} \ +EXPORT_SYMBOL(atomic64_fetch_##op); + #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ - ATOMIC64_OP_RETURN(op, c_op) + ATOMIC64_OP_RETURN(op, c_op) \ + ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) -ATOMIC64_OP(and, &=) -ATOMIC64_OP(or, |=) -ATOMIC64_OP(xor, ^=) #undef ATOMIC64_OPS +#define ATOMIC64_OPS(op, c_op) \ + ATOMIC64_OP(op, c_op) \ + ATOMIC64_OP_RETURN(op, c_op) \ + ATOMIC64_FETCH_OP(op, c_op) + +ATOMIC64_OPS(and, &=) +ATOMIC64_OPS(or, |=) +ATOMIC64_OPS(xor, ^=) + +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 123481814320..dbb369145dda 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c @@ -53,11 +53,25 @@ do { \ BUG_ON(atomic##bit##_read(&v) != r); \ } while (0) +#define TEST_FETCH(bit, op, c_op, val) \ +do { \ + atomic##bit##_set(&v, v0); \ + r = v0; \ + r c_op val; \ + BUG_ON(atomic##bit##_##op(val, &v) != v0); \ + BUG_ON(atomic##bit##_read(&v) != r); \ +} while (0) + #define RETURN_FAMILY_TEST(bit, op, c_op, val) \ do { \ FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \ } while (0) +#define FETCH_FAMILY_TEST(bit, op, c_op, val) \ +do { \ + FAMILY_TEST(TEST_FETCH, bit, op, c_op, val); \ +} while (0) + #define TEST_ARGS(bit, op, init, ret, expect, args...) \ do { \ atomic##bit##_set(&v, init); \ @@ -114,6 +128,16 @@ static __init void test_atomic(void) RETURN_FAMILY_TEST(, sub_return, -=, onestwos); RETURN_FAMILY_TEST(, sub_return, -=, -one); + FETCH_FAMILY_TEST(, fetch_add, +=, onestwos); + FETCH_FAMILY_TEST(, fetch_add, +=, -one); + FETCH_FAMILY_TEST(, fetch_sub, -=, onestwos); + FETCH_FAMILY_TEST(, fetch_sub, -=, -one); + + FETCH_FAMILY_TEST(, fetch_or, |=, v1); + FETCH_FAMILY_TEST(, fetch_and, &=, v1); + FETCH_FAMILY_TEST(, fetch_andnot, &= ~, v1); + FETCH_FAMILY_TEST(, fetch_xor, ^=, v1); + INC_RETURN_FAMILY_TEST(, v0); DEC_RETURN_FAMILY_TEST(, v0); @@ -154,6 +178,16 @@ static __init void test_atomic64(void) RETURN_FAMILY_TEST(64, sub_return, -=, onestwos); RETURN_FAMILY_TEST(64, sub_return, -=, -one); + FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos); + FETCH_FAMILY_TEST(64, fetch_add, +=, -one); + FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos); + FETCH_FAMILY_TEST(64, fetch_sub, -=, -one); + + FETCH_FAMILY_TEST(64, fetch_or, |=, v1); + FETCH_FAMILY_TEST(64, fetch_and, &=, v1); + FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1); + FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1); + INIT(v0); atomic64_inc(&v); r += one; -- cgit v1.2.3 From 53bf837b78d155b8e1110b3c25b4d0d6391b8ff3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Jul 2016 09:50:31 +0000 Subject: timers: Remove set_timer_slack() leftovers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We now have implicit batching in the timer wheel. The slack API is no longer used, so remove it. Signed-off-by: Thomas Gleixner Cc: Alan Stern Cc: Andrew F. Davis Cc: Arjan van de Ven Cc: Chris Mason Cc: David S. Miller Cc: David Woodhouse Cc: Dmitry Eremin-Solenikov Cc: Eric Dumazet Cc: Frederic Weisbecker Cc: George Spelvin Cc: Greg Kroah-Hartman Cc: Jaehoon Chung Cc: Jens Axboe Cc: John Stultz Cc: Josh Triplett Cc: Len Brown Cc: Linus Torvalds Cc: Mathias Nyman Cc: Pali Rohár Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Rik van Riel Cc: Sebastian Reichel Cc: Ulf Hansson Cc: linux-block@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-mmc@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: linux-usb@vger.kernel.org Cc: netdev@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160704094342.189813118@linutronix.de Signed-off-by: Ingo Molnar --- block/genhd.c | 5 ----- drivers/mmc/host/jz4740_mmc.c | 2 -- drivers/power/bq27xxx_battery.c | 5 +---- drivers/usb/host/ohci-hcd.c | 1 - drivers/usb/host/xhci.c | 2 -- include/linux/timer.h | 4 ---- kernel/time/timer.c | 19 ------------------- lib/random32.c | 1 - 8 files changed, 1 insertion(+), 38 deletions(-) (limited to 'lib') diff --git a/block/genhd.c b/block/genhd.c index 9f42526b4d62..f06d7f3b075b 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1523,12 +1523,7 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now) if (--ev->block) goto out_unlock; - /* - * Not exactly a latency critical operation, set poll timer - * slack to 25% and kick event check. - */ intv = disk_events_poll_jiffies(disk); - set_timer_slack(&ev->dwork.timer, intv / 4); if (check_now) queue_delayed_work(system_freezable_power_efficient_wq, &ev->dwork, 0); diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 03ddf0ecf402..684087db170b 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev) jz4740_mmc_clock_disable(host); setup_timer(&host->timeout_timer, jz4740_mmc_timeout, (unsigned long)host); - /* It is not important when it times out, it just needs to timeout. */ - set_timer_slack(&host->timeout_timer, HZ); host->use_dma = true; if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0) diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c index 45f6ebf88df6..e90b3f307e0f 100644 --- a/drivers/power/bq27xxx_battery.c +++ b/drivers/power/bq27xxx_battery.c @@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct work_struct *work) bq27xxx_battery_update(di); - if (poll_interval > 0) { - /* The timer does not have to be accurate. */ - set_timer_slack(&di->work.timer, poll_interval * HZ / 4); + if (poll_interval > 0) schedule_delayed_work(&di->work, poll_interval * HZ); - } } /* diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 0449235d4f22..1700908b84ef 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci) setup_timer(&ohci->io_watchdog, io_watchdog_func, (unsigned long) ohci); - set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20)); ohci->hcca = dma_alloc_coherent (hcd->self.controller, sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f2f9518c53ab..a986fe763d87 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) xhci->comp_mode_recovery_timer.expires = jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); - set_timer_slack(&xhci->comp_mode_recovery_timer, - msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); add_timer(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Compliance mode recovery timer initialized"); diff --git a/include/linux/timer.h b/include/linux/timer.h index 5869ab9848fe..4419506b564e 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -19,7 +19,6 @@ struct timer_list { void (*function)(unsigned long); unsigned long data; u32 flags; - int slack; #ifdef CONFIG_TIMER_STATS int start_pid; @@ -73,7 +72,6 @@ struct timer_list { .expires = (_expires), \ .data = (_data), \ .flags = (_flags), \ - .slack = -1, \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } @@ -193,8 +191,6 @@ extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); -extern void set_timer_slack(struct timer_list *time, int slack_hz); - /* * The jiffies value which is added to now, when there is no timer * in the timer wheel: diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 86e95b72665d..a83e23d0bc25 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -447,24 +447,6 @@ unsigned long round_jiffies_up_relative(unsigned long j) } EXPORT_SYMBOL_GPL(round_jiffies_up_relative); -/** - * set_timer_slack - set the allowed slack for a timer - * @timer: the timer to be modified - * @slack_hz: the amount of time (in jiffies) allowed for rounding - * - * Set the amount of time, in jiffies, that a certain timer has - * in terms of slack. By setting this value, the timer subsystem - * will schedule the actual timer somewhere between - * the time mod_timer() asks for, and that time plus the slack. - * - * By setting the slack to -1, a percentage of the delay is used - * instead. - */ -void set_timer_slack(struct timer_list *timer, int slack_hz) -{ - timer->slack = slack_hz; -} -EXPORT_SYMBOL_GPL(set_timer_slack); static inline unsigned int timer_get_idx(struct timer_list *timer) { @@ -775,7 +757,6 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags, { timer->entry.pprev = NULL; timer->flags = flags | raw_smp_processor_id(); - timer->slack = -1; #ifdef CONFIG_TIMER_STATS timer->start_site = NULL; timer->start_pid = -1; diff --git a/lib/random32.c b/lib/random32.c index 510d1ce7d4d2..69ed593aab07 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -233,7 +233,6 @@ static void __prandom_timer(unsigned long dontcare) static void __init __prandom_start_seed_timer(void) { - set_timer_slack(&seed_timer, HZ); seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); add_timer(&seed_timer); } -- cgit v1.2.3 From 13d4ea097d18b419ad2a2b696063d44bf59acec0 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 14 Jul 2016 13:22:57 -0700 Subject: x86/uaccess: Move thread_info::addr_limit to thread_struct struct thread_info is a legacy mess. To prepare for its partial removal, move thread_info::addr_limit out. As an added benefit, this way is simpler. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/15bee834d09402b47ac86f2feccdf6529f9bc5b0.1468527351.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/checksum_32.h | 3 +-- arch/x86/include/asm/processor.h | 17 ++++++++++------- arch/x86/include/asm/thread_info.h | 7 ------- arch/x86/include/asm/uaccess.h | 6 +++--- arch/x86/kernel/asm-offsets.c | 4 +++- arch/x86/lib/copy_user_64.S | 8 ++++---- arch/x86/lib/csum-wrappers_64.c | 1 + arch/x86/lib/getuser.S | 20 ++++++++++---------- arch/x86/lib/putuser.S | 10 +++++----- arch/x86/lib/usercopy_64.c | 2 +- drivers/pnp/isapnp/proc.c | 2 +- lib/bitmap.c | 2 +- 12 files changed, 40 insertions(+), 42 deletions(-) (limited to 'lib') diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index 532f85e6651f..7b53743ed267 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -2,8 +2,7 @@ #define _ASM_X86_CHECKSUM_32_H #include - -#include +#include /* * computes the checksum of a memory block at buff, length len, diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index cbdfe5f76347..89314ed74fee 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -371,6 +371,10 @@ extern unsigned int xstate_size; struct perf_event; +typedef struct { + unsigned long seg; +} mm_segment_t; + struct thread_struct { /* Cached TLS descriptors: */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; @@ -419,6 +423,8 @@ struct thread_struct { /* Max allowed port in the bitmap, in bytes: */ unsigned io_bitmap_max; + mm_segment_t addr_limit; + unsigned int sig_on_uaccess_err:1; unsigned int uaccess_err:1; /* uaccess failed */ @@ -493,11 +499,6 @@ static inline void load_sp0(struct tss_struct *tss, #define set_iopl_mask native_set_iopl_mask #endif /* CONFIG_PARAVIRT */ -typedef struct { - unsigned long seg; -} mm_segment_t; - - /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); @@ -719,6 +720,7 @@ static inline void spin_lock_prefetch(const void *x) .sp0 = TOP_OF_INIT_STACK, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ + .addr_limit = KERNEL_DS, \ } extern unsigned long thread_saved_pc(struct task_struct *tsk); @@ -768,8 +770,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX TASK_SIZE_MAX -#define INIT_THREAD { \ - .sp0 = TOP_OF_INIT_STACK \ +#define INIT_THREAD { \ + .sp0 = TOP_OF_INIT_STACK, \ + .addr_limit = KERNEL_DS, \ } /* diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 7c47bb659ecd..89bff044a6f5 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -57,7 +57,6 @@ struct thread_info { __u32 flags; /* low level flags */ __u32 status; /* thread synchronous flags */ __u32 cpu; /* current CPU */ - mm_segment_t addr_limit; }; #define INIT_THREAD_INFO(tsk) \ @@ -65,7 +64,6 @@ struct thread_info { .task = &tsk, \ .flags = 0, \ .cpu = 0, \ - .addr_limit = KERNEL_DS, \ } #define init_thread_info (init_thread_union.thread_info) @@ -184,11 +182,6 @@ static inline unsigned long current_stack_pointer(void) # define cpu_current_top_of_stack (cpu_tss + TSS_sp0) #endif -/* Load thread_info address into "reg" */ -#define GET_THREAD_INFO(reg) \ - _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ - _ASM_SUB $(THREAD_SIZE),reg ; - /* * ASM operand which evaluates to a 'thread_info' address of * the current task, if it is known that "reg" is exactly "off" diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8f66e5655c23..c03bfb68c503 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -29,12 +29,12 @@ #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) #define get_ds() (KERNEL_DS) -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) +#define get_fs() (current->thread.addr_limit) +#define set_fs(x) (current->thread.addr_limit = (x)) #define segment_eq(a, b) ((a).seg == (b).seg) -#define user_addr_max() (current_thread_info()->addr_limit.seg) +#define user_addr_max() (current->thread.addr_limit.seg) #define __addr_ok(addr) \ ((unsigned long __force)(addr) < user_addr_max()) diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 674134e9f5e5..2bd5c6ff7ee7 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -31,7 +31,9 @@ void common(void) { BLANK(); OFFSET(TI_flags, thread_info, flags); OFFSET(TI_status, thread_info, status); - OFFSET(TI_addr_limit, thread_info, addr_limit); + + BLANK(); + OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 2b0ef26da0bd..bf603ebbfd8e 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -17,11 +17,11 @@ /* Standard copy_to_user with segment limit checking */ ENTRY(_copy_to_user) - GET_THREAD_INFO(%rax) + mov PER_CPU_VAR(current_task), %rax movq %rdi,%rcx addq %rdx,%rcx jc bad_to_user - cmpq TI_addr_limit(%rax),%rcx + cmpq TASK_addr_limit(%rax),%rcx ja bad_to_user ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \ "jmp copy_user_generic_string", \ @@ -32,11 +32,11 @@ ENDPROC(_copy_to_user) /* Standard copy_from_user with segment limit checking */ ENTRY(_copy_from_user) - GET_THREAD_INFO(%rax) + mov PER_CPU_VAR(current_task), %rax movq %rsi,%rcx addq %rdx,%rcx jc bad_from_user - cmpq TI_addr_limit(%rax),%rcx + cmpq TASK_addr_limit(%rax),%rcx ja bad_from_user ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \ "jmp copy_user_generic_string", \ diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 28a6654f0d08..b6fcb9a9ddbc 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -6,6 +6,7 @@ */ #include #include +#include #include /** diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 46668cda4ffd..0ef5128c2de8 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -35,8 +35,8 @@ .text ENTRY(__get_user_1) - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + mov PER_CPU_VAR(current_task), %_ASM_DX + cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user ASM_STAC 1: movzbl (%_ASM_AX),%edx @@ -48,8 +48,8 @@ ENDPROC(__get_user_1) ENTRY(__get_user_2) add $1,%_ASM_AX jc bad_get_user - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + mov PER_CPU_VAR(current_task), %_ASM_DX + cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user ASM_STAC 2: movzwl -1(%_ASM_AX),%edx @@ -61,8 +61,8 @@ ENDPROC(__get_user_2) ENTRY(__get_user_4) add $3,%_ASM_AX jc bad_get_user - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + mov PER_CPU_VAR(current_task), %_ASM_DX + cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user ASM_STAC 3: movl -3(%_ASM_AX),%edx @@ -75,8 +75,8 @@ ENTRY(__get_user_8) #ifdef CONFIG_X86_64 add $7,%_ASM_AX jc bad_get_user - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + mov PER_CPU_VAR(current_task), %_ASM_DX + cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user ASM_STAC 4: movq -7(%_ASM_AX),%rdx @@ -86,8 +86,8 @@ ENTRY(__get_user_8) #else add $7,%_ASM_AX jc bad_get_user_8 - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + mov PER_CPU_VAR(current_task), %_ASM_DX + cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user_8 ASM_STAC 4: movl -7(%_ASM_AX),%edx diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index e0817a12d323..c891ece81e5b 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -29,14 +29,14 @@ * as they get called from within inline assembly. */ -#define ENTER GET_THREAD_INFO(%_ASM_BX) +#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX #define EXIT ASM_CLAC ; \ ret .text ENTRY(__put_user_1) ENTER - cmp TI_addr_limit(%_ASM_BX),%_ASM_CX + cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX jae bad_put_user ASM_STAC 1: movb %al,(%_ASM_CX) @@ -46,7 +46,7 @@ ENDPROC(__put_user_1) ENTRY(__put_user_2) ENTER - mov TI_addr_limit(%_ASM_BX),%_ASM_BX + mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user @@ -58,7 +58,7 @@ ENDPROC(__put_user_2) ENTRY(__put_user_4) ENTER - mov TI_addr_limit(%_ASM_BX),%_ASM_BX + mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user @@ -70,7 +70,7 @@ ENDPROC(__put_user_4) ENTRY(__put_user_8) ENTER - mov TI_addr_limit(%_ASM_BX),%_ASM_BX + mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 0a42327a59d7..9f760cdcaf40 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -6,7 +6,7 @@ * Copyright 2002 Andi Kleen */ #include -#include +#include /* * Zero Userspace diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c index 5edee645d890..262285e48a09 100644 --- a/drivers/pnp/isapnp/proc.c +++ b/drivers/pnp/isapnp/proc.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include extern struct pnp_protocol isapnp_protocol; diff --git a/lib/bitmap.c b/lib/bitmap.c index c66da508cbf7..eca88087fa8a 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -14,9 +14,9 @@ #include #include #include +#include #include -#include /* * bitmaps provide an array of bits, implemented using an an -- cgit v1.2.3