summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-09-28 18:05:34 +0200
committerIngo Molnar <mingo@kernel.org>2015-10-06 17:08:15 +0200
commitfc13aebab7d8f0d19d557c721a0f25cdf7ae905c (patch)
treec744211cc93f060f5ddb732288d6aef10b033baa /kernel/sched
parent609ca066386b2e64d4c0b0f55da327654962a0c9 (diff)
downloadlinux-fc13aebab7d8f0d19d557c721a0f25cdf7ae905c.tar.bz2
sched/core: Add preempt argument to __schedule()
There is only a single PREEMPT_ACTIVE use in the regular __schedule() path and that is to circumvent the task->state check. Since the code setting PREEMPT_ACTIVE is the immediate caller of __schedule() we can replace this with a function argument. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8d8722b84dee..0a71f89fb3fc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3056,7 +3056,7 @@ again:
*
* WARNING: must be called with preemption disabled!
*/
-static void __sched __schedule(void)
+static void __sched __schedule(bool preempt)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
@@ -3096,7 +3096,7 @@ static void __sched __schedule(void)
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
switch_count = &prev->nivcsw;
- if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ if (!preempt && prev->state) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
@@ -3161,7 +3161,7 @@ asmlinkage __visible void __sched schedule(void)
sched_submit_work(tsk);
do {
preempt_disable();
- __schedule();
+ __schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
}
@@ -3202,7 +3202,7 @@ static void __sched notrace preempt_schedule_common(void)
{
do {
preempt_active_enter();
- __schedule();
+ __schedule(true);
preempt_active_exit();
/*
@@ -3267,7 +3267,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
* an infinite recursion.
*/
prev_ctx = exception_enter();
- __schedule();
+ __schedule(true);
exception_exit(prev_ctx);
barrier();
@@ -3296,7 +3296,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
do {
preempt_active_enter();
local_irq_enable();
- __schedule();
+ __schedule(true);
local_irq_disable();
preempt_active_exit();
} while (need_resched());