diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-11-28 14:26:41 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-12-11 15:52:32 +0100 |
commit | ba1f14fbe70965ae0fb1655a5275a62723f65b77 (patch) | |
tree | 412534b605bf33e5e930172686dd4f31d157fc51 /include/asm-generic | |
parent | 8e8339a3a1069141985daaa2521ba304509ddecd (diff) | |
download | linux-ba1f14fbe70965ae0fb1655a5275a62723f65b77.tar.bz2 |
sched: Remove PREEMPT_NEED_RESCHED from generic code
While hunting a preemption issue with Alexander, Ben noticed that the
currently generic PREEMPT_NEED_RESCHED stuff is horribly broken for
load-store architectures.
We currently rely on the IPI to fold TIF_NEED_RESCHED into
PREEMPT_NEED_RESCHED, but when this IPI lands while we already have
a load for the preempt-count but before the store, the store will erase
the PREEMPT_NEED_RESCHED change.
The current preempt-count only works on load-store archs because
interrupts are assumed to be completely balanced wrt their preempt_count
fiddling; the previous preempt_count load will match the preempt_count
state after the interrupt and therefore nothing gets lost.
This patch removes the PREEMPT_NEED_RESCHED usage from generic code and
pushes it into x86 arch code; the generic code goes back to relying on
TIF_NEED_RESCHED.
Boot tested on x86_64 and compile tested on ppc64.
Reported-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reported-and-Tested-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20131128132641.GP10022@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/preempt.h | 35 |
1 files changed, 11 insertions, 24 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index ddf2b420ac8f..1cd3f5d767a8 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -3,13 +3,11 @@ #include <linux/thread_info.h> -/* - * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users - * that think a non-zero value indicates we cannot preempt. - */ +#define PREEMPT_ENABLED (0) + static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; + return current_thread_info()->preempt_count; } static __always_inline int *preempt_count_ptr(void) @@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void) return ¤t_thread_info()->preempt_count; } -/* - * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the - * alternative is loosing a reschedule. Better schedule too often -- also this - * should be a very rare operation. - */ static __always_inline void preempt_count_set(int pc) { *preempt_count_ptr() = pc; @@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc) task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ } while (0) -/* - * We fold the NEED_RESCHED bit into the preempt count such that - * preempt_enable() can decrement and test for needing to reschedule with a - * single instruction. - * - * We invert the actual bit, so that when the decrement hits 0 we know we both - * need to resched (the bit is cleared) and can resched (no preempt count). - */ - static __always_inline void set_preempt_need_resched(void) { - *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; } static __always_inline void clear_preempt_need_resched(void) { - *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; } static __always_inline bool test_preempt_need_resched(void) { - return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); + return false; } /* @@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val) static __always_inline bool __preempt_count_dec_and_test(void) { - return !--*preempt_count_ptr(); + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); } /* @@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) */ static __always_inline bool should_resched(void) { - return unlikely(!*preempt_count_ptr()); + return unlikely(!preempt_count() && tif_need_resched()); } #ifdef CONFIG_PREEMPT |