summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/init_task_64.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 11:17:24 +0200
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 11:17:24 +0200
commit250c22777fe1ccd7ac588579a6c16db4c0161cc5 (patch)
tree55c317efb7d792ec6fdae1d1937c67a502c48dec /arch/x86/kernel/init_task_64.c
parent2db55d344e529492545cb3b755c7e9ba8e4fa94e (diff)
downloadlinux-250c22777fe1ccd7ac588579a6c16db4c0161cc5.tar.bz2
x86_64: move kernel
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/init_task_64.c')
-rw-r--r--arch/x86/kernel/init_task_64.c54
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/x86/kernel/init_task_64.c b/arch/x86/kernel/init_task_64.c
new file mode 100644
index 000000000000..4ff33d4f8551
--- /dev/null
+++ b/arch/x86/kernel/init_task_64.c
@@ -0,0 +1,54 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+/*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+ * so they are allowed to end up in the .data.cacheline_aligned
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+
+/* Copies of the original ist values from the tss are only accessed during
+ * debugging, no special alignment required.
+ */
+DEFINE_PER_CPU(struct orig_ist, orig_ist);
+
+#define ALIGN_TO_4K __attribute__((section(".data.init_task")))