summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-25 21:08:33 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:33 +0100
commit6478d8800b75253b2a934ddcb734e13ade023ad0 (patch)
treedf4017269b8755735578445c0a8a9e8b3b2615e9 /kernel
parent58b8a73ab8becfcaea84abc2a06038281efa4c8a (diff)
downloadlinux-6478d8800b75253b2a934ddcb734e13ade023ad0.tar.bz2
sched: remove the !PREEMPT_BKL code
remove the !PREEMPT_BKL code. this removes 160 lines of legacy code. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.preempt4
-rw-r--r--kernel/sched.c19
2 files changed, 3 insertions, 20 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 4420ef427f83..0669b70fa6a3 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -52,10 +52,6 @@ config PREEMPT
endchoice
-config PREEMPT_BKL
- def_bool y
- depends on SMP || PREEMPT
-
config RCU_TRACE
bool "Enable tracing for RCU - currently stats in debugfs"
select DEBUG_FS
diff --git a/kernel/sched.c b/kernel/sched.c
index 22712b2e058a..629614ad0358 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3955,10 +3955,9 @@ EXPORT_SYMBOL(schedule);
asmlinkage void __sched preempt_schedule(void)
{
struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
struct task_struct *task = current;
int saved_lock_depth;
-#endif
+
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
@@ -3974,14 +3973,10 @@ asmlinkage void __sched preempt_schedule(void)
* clear ->lock_depth so that schedule() doesnt
* auto-release the semaphore:
*/
-#ifdef CONFIG_PREEMPT_BKL
saved_lock_depth = task->lock_depth;
task->lock_depth = -1;
-#endif
schedule();
-#ifdef CONFIG_PREEMPT_BKL
task->lock_depth = saved_lock_depth;
-#endif
sub_preempt_count(PREEMPT_ACTIVE);
/*
@@ -4002,10 +3997,9 @@ EXPORT_SYMBOL(preempt_schedule);
asmlinkage void __sched preempt_schedule_irq(void)
{
struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
struct task_struct *task = current;
int saved_lock_depth;
-#endif
+
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
@@ -4017,16 +4011,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
* clear ->lock_depth so that schedule() doesnt
* auto-release the semaphore:
*/
-#ifdef CONFIG_PREEMPT_BKL
saved_lock_depth = task->lock_depth;
task->lock_depth = -1;
-#endif
local_irq_enable();
schedule();
local_irq_disable();
-#ifdef CONFIG_PREEMPT_BKL
task->lock_depth = saved_lock_depth;
-#endif
sub_preempt_count(PREEMPT_ACTIVE);
/*
@@ -5241,11 +5231,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
- task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
-#else
task_thread_info(idle)->preempt_count = 0;
-#endif
+
/*
* The idle tasks have their own, simple scheduling class:
*/