summaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-03-10 15:28:04 -0700
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:19:55 +0200
commit61c4628b538608c1a85211ed8438136adfeb9a95 (patch)
tree290a695299a363153bc692e6d705ac680d64359e /kernel/fork.c
parentfa5c4639419668cbb18ca3d20c1253559a3b43ae (diff)
downloadlinux-61c4628b538608c1a85211ed8438136adfeb9a95.tar.bz2
x86, fpu: split FPU state from task struct - v5
Split the FPU save area from the task struct. This allows easy migration of FPU context, and it's generally cleaner. It also allows the following two optimizations: 1) only allocate when the application actually uses FPU, so in the first lazy FPU trap. This could save memory for non-fpu using apps. Next patch does this lazy allocation. 2) allocate the right size for the actual cpu rather than 512 bytes always. Patches enabling xsave/xrstor support (coming shortly) will take advantage of this. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c31
1 files changed, 25 insertions, 6 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 9c042f901570..44a18192c420 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -132,6 +132,10 @@ void __put_task_struct(struct task_struct *tsk)
free_task(tsk);
}
+void __attribute__((weak)) arch_task_cache_init(void)
+{
+}
+
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -144,6 +148,9 @@ void __init fork_init(unsigned long mempages)
ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
#endif
+ /* do the arch specific task caches init */
+ arch_task_cache_init();
+
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
@@ -163,6 +170,13 @@ void __init fork_init(unsigned long mempages)
init_task.signal->rlim[RLIMIT_NPROC];
}
+int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
+ struct task_struct *src)
+{
+ *dst = *src;
+ return 0;
+}
+
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
@@ -181,15 +195,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
return NULL;
}
- *tsk = *orig;
+ err = arch_dup_task_struct(tsk, orig);
+ if (err)
+ goto out;
+
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
- if (err) {
- free_thread_info(ti);
- free_task_struct(tsk);
- return NULL;
- }
+ if (err)
+ goto out;
setup_thread_stack(tsk, orig);
@@ -205,6 +219,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
#endif
tsk->splice_pipe = NULL;
return tsk;
+
+out:
+ free_thread_info(ti);
+ free_task_struct(tsk);
+ return NULL;
}
#ifdef CONFIG_MMU