summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2010-07-04 00:02:26 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2010-07-09 15:40:32 +0200
commit5e3d20a68f63fc5a310687d81956c3b96e488b84 (patch)
tree576da563442772fe7a492589c1e08c422ff02b1c
parent815c4163b6c8ebf8152f42b0a5fd015cfdcedc78 (diff)
downloadlinux-5e3d20a68f63fc5a310687d81956c3b96e488b84.tar.bz2
init: Remove the BKL from startup code
I have shown by code review that no driver takes the BKL at init time any more, so whatever the init code was locking against is no longer there and it is now safe to remove the BKL there. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Steven Rostedt <rostedt@goodmis> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r--init/main.c5
-rw-r--r--kernel/trace/trace.c8
2 files changed, 0 insertions, 13 deletions
diff --git a/init/main.c b/init/main.c
index a42fdf4aeba9..9b34c1b8d76c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -444,7 +444,6 @@ static noinline void __init_refok rest_init(void)
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
rcu_read_unlock();
complete(&kthreadd_done);
- unlock_kernel();
/*
* The boot idle thread must execute schedule()
@@ -565,7 +564,6 @@ asmlinkage void __init start_kernel(void)
* Interrupts are still disabled. Do necessary setups, then
* enable them
*/
- lock_kernel();
tick_init();
boot_cpu_init();
page_address_init();
@@ -829,7 +827,6 @@ static noinline int init_post(void)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
free_initmem();
- unlock_kernel();
mark_rodata_ro();
system_state = SYSTEM_RUNNING;
numa_default_policy();
@@ -869,8 +866,6 @@ static int __init kernel_init(void * unused)
* Wait until kthreadd is all set-up.
*/
wait_for_completion(&kthreadd_done);
- lock_kernel();
-
/*
* init can allocate pages on any node
*/
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 086d36316805..8047ca5a8237 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -734,13 +734,6 @@ __acquires(kernel_lock)
return -1;
}
- /*
- * When this gets called we hold the BKL which means that
- * preemption is disabled. Various trace selftests however
- * need to disable and enable preemption for successful tests.
- * So we drop the BKL here and grab it after the tests again.
- */
- unlock_kernel();
mutex_lock(&trace_types_lock);
tracing_selftest_running = true;
@@ -822,7 +815,6 @@ __acquires(kernel_lock)
#endif
out_unlock:
- lock_kernel();
return ret;
}