From a9241ea5fd709fc935dade130f4e3b2612bbe9e3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 6 Feb 2015 15:01:58 -0500 Subject: x86/fpu: Don't reset thread.fpu_counter The "else" branch clears ->fpu_counter as a remnant of the lazy FPU usage counting: e07e23e1fd30 ("[PATCH] non lazy "sleazy" fpu implementation") However, switch_fpu_prepare() does this now so that else branch is superfluous. If we do use_eager_fpu(), then this has no effect. Otherwise, if we actually wanted to prevent fpu preload after the context switch we would need to reset it unconditionally, even if __thread_has_fpu(). Signed-off-by: Oleg Nesterov Signed-off-by: Rik van Riel Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1423252925-14451-2-git-send-email-riel@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/kernel/i387.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index a9a4229f6161..4d0db9ed58e0 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -108,8 +108,7 @@ void unlazy_fpu(struct task_struct *tsk) if (__thread_has_fpu(tsk)) { __save_init_fpu(tsk); __thread_fpu_end(tsk); - } else - tsk->thread.fpu_counter = 0; + } preempt_enable(); } EXPORT_SYMBOL(unlazy_fpu); -- cgit v1.2.3 From 1a2a7f4ec8e3a7ac582dac4d01fcc7e8acd3bb30 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 6 Feb 2015 15:01:59 -0500 Subject: x86/fpu: Don't do __thread_fpu_end() if use_eager_fpu() unlazy_fpu()->__thread_fpu_end() doesn't look right if use_eager_fpu(). Unconditional __thread_fpu_end() is only correct if we know that this thread can't return to user-mode and use FPU. Fortunately it has only 2 callers. fpu_copy() checks use_eager_fpu(), and init_fpu(current) can be only called by the coredumping thread via regset->get(). But it is exported to modules, and imo this should be fixed anyway. And if we check use_eager_fpu() we can use __save_fpu() like fpu_copy() and save_init_fpu() do. - It seems that even !use_eager_fpu() case doesn't need the unconditional __thread_fpu_end(), we only need it if __save_init_fpu() returns 0. - It is still not clear to me if __save_init_fpu() can safely nest with another save + restore from __kernel_fpu_begin(). If not, we can use kernel_fpu_disable() to fix the race. Signed-off-by: Oleg Nesterov Signed-off-by: Rik van Riel Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1423252925-14451-3-git-send-email-riel@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/kernel/i387.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 4d0db9ed58e0..f3ced6f4b2b6 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -106,8 +106,12 @@ void unlazy_fpu(struct task_struct *tsk) { preempt_disable(); if (__thread_has_fpu(tsk)) { - __save_init_fpu(tsk); - __thread_fpu_end(tsk); + if (use_eager_fpu()) { + __save_fpu(tsk); + } else { + __save_init_fpu(tsk); + __thread_fpu_end(tsk); + } } preempt_enable(); } -- cgit v1.2.3 From 08a744c6bfded3d5fa66f94263f81773226113d1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 6 Feb 2015 15:02:00 -0500 Subject: x86/fpu: Change math_error() to use unlazy_fpu(), kill (now) unused save_init_fpu() math_error() calls save_init_fpu() after conditional_sti(), this means that the caller can be preempted. If !use_eager_fpu() we can hit the WARN_ON_ONCE(!__thread_has_fpu(tsk)) and/or save the wrong FPU state. Change math_error() to use unlazy_fpu() and kill save_init_fpu(). Signed-off-by: Oleg Nesterov Signed-off-by: Rik van Riel Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1423252925-14451-4-git-send-email-riel@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/include/asm/fpu-internal.h | 18 ------------------ arch/x86/kernel/traps.c | 2 +- 2 files changed, 1 insertion(+), 19 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index e97622f57722..02f2e0817918 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -517,24 +517,6 @@ static inline void __save_fpu(struct task_struct *tsk) fpu_fxsave(&tsk->thread.fpu); } -/* - * These disable preemption on their own and are safe - */ -static inline void save_init_fpu(struct task_struct *tsk) -{ - WARN_ON_ONCE(!__thread_has_fpu(tsk)); - - if (use_eager_fpu()) { - __save_fpu(tsk); - return; - } - - preempt_disable(); - __save_init_fpu(tsk); - __thread_fpu_end(tsk); - preempt_enable(); -} - /* * i387 state interaction */ diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 88900e288021..9d889f74e806 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -663,7 +663,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) /* * Save the info for the exception handler and clear the error. */ - save_init_fpu(task); + unlazy_fpu(task); task->thread.trap_nr = trapnr; task->thread.error_code = error_code; info.si_signo = SIGFPE; -- cgit v1.2.3 From 1c927eea4cad83c439cb51e9c96ad19cb005157d Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 6 Feb 2015 15:02:01 -0500 Subject: x86/fpu: Move lazy restore functions up a few lines We need another lazy restore related function, that will be called from a function that is above where the lazy restore functions are now. It would be nice to keep all three functions grouped together. Signed-off-by: Rik van Riel Cc: Linus Torvalds Cc: Oleg Nesterov Link: http://lkml.kernel.org/r/1423252925-14451-5-git-send-email-riel@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/include/asm/fpu-internal.h | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 02f2e0817918..217d6d7b9cb0 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -67,6 +67,24 @@ extern void finit_soft_fpu(struct i387_soft_struct *soft); static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} #endif +/* + * Must be run with preemption disabled: this clears the fpu_owner_task, + * on this CPU. + * + * This will disable any lazy FPU state restore of the current FPU state, + * but if the current thread owns the FPU, it will still be saved by. + */ +static inline void __cpu_disable_lazy_restore(unsigned int cpu) +{ + per_cpu(fpu_owner_task, cpu) = NULL; +} + +static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) +{ + return new == this_cpu_read_stable(fpu_owner_task) && + cpu == new->thread.fpu.last_cpu; +} + static inline int is_ia32_compat_frame(void) { return config_enabled(CONFIG_IA32_EMULATION) && @@ -398,24 +416,6 @@ static inline void drop_init_fpu(struct task_struct *tsk) */ typedef struct { int preload; } fpu_switch_t; -/* - * Must be run with preemption disabled: this clears the fpu_owner_task, - * on this CPU. - * - * This will disable any lazy FPU state restore of the current FPU state, - * but if the current thread owns the FPU, it will still be saved by. - */ -static inline void __cpu_disable_lazy_restore(unsigned int cpu) -{ - per_cpu(fpu_owner_task, cpu) = NULL; -} - -static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) -{ - return new == this_cpu_read_stable(fpu_owner_task) && - cpu == new->thread.fpu.last_cpu; -} - static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) { fpu_switch_t fpu; -- cgit v1.2.3 From 33e03dedd759cc9396252d9641b25d01909a26bb Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 6 Feb 2015 15:02:02 -0500 Subject: x86/fpu: Introduce task_disable_lazy_fpu_restore() helper Currently there are a few magic assignments sprinkled through the code that disable lazy FPU state restoring, some more effective than others, and all equally mystifying. It would be easier to have a helper to explicitly disable lazy FPU state restoring for a task. Signed-off-by: Rik van Riel Cc: Linus Torvalds Cc: Oleg Nesterov Link: http://lkml.kernel.org/r/1423252925-14451-6-git-send-email-riel@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/include/asm/fpu-internal.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 217d6d7b9cb0..9c27f44e1c5c 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -79,6 +79,16 @@ static inline void __cpu_disable_lazy_restore(unsigned int cpu) per_cpu(fpu_owner_task, cpu) = NULL; } +/* + * Used to indicate that the FPU state in memory is newer than the FPU + * state in registers, and the FPU state should be reloaded next time the + * task is run. Only safe on the current task, or non-running tasks. + */ +static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk) +{ + tsk->thread.fpu.last_cpu = ~0; +} + static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) { return new == this_cpu_read_stable(fpu_owner_task) && -- cgit v1.2.3 From 1361ef29c7e49ae7cf37220c25fac1904b77f71a Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 6 Feb 2015 15:02:03 -0500 Subject: x86/fpu: Use an explicit if/else in switch_fpu_prepare() Use an explicit if/else branch after __save_init_fpu(old) in switch_fpu_prepare(). This makes substituting the assignment with a call in task_disable_lazy_fpu_restore() in the next patch easier to review. Signed-off-by: Rik van Riel Cc: Linus Torvalds Cc: Oleg Nesterov Link: http://lkml.kernel.org/r/1423252925-14451-7-git-send-email-riel@redhat.com [ Space out stuff for more readability. ] Signed-off-by: Borislav Petkov --- arch/x86/include/asm/fpu-internal.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 9c27f44e1c5c..04c2807aab66 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -434,13 +434,17 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta * If the task has used the math, pre-load the FPU on xsave processors * or if the past 5 consecutive context-switches used math. */ - fpu.preload = tsk_used_math(new) && (use_eager_fpu() || - new->thread.fpu_counter > 5); + fpu.preload = tsk_used_math(new) && + (use_eager_fpu() || new->thread.fpu_counter > 5); + if (__thread_has_fpu(old)) { if (!__save_init_fpu(old)) - cpu = ~0; - old->thread.fpu.last_cpu = cpu; - old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ + old->thread.fpu.last_cpu = ~0; + else + old->thread.fpu.last_cpu = cpu; + + /* But leave fpu_owner_task! */ + old->thread.fpu.has_fpu = 0; /* Don't change CR0.TS if we just switch! */ if (fpu.preload) { -- cgit v1.2.3 From 6a5fe8952bd676baf382d14df21e7b32b5d8943e Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 6 Feb 2015 15:02:04 -0500 Subject: x86/fpu: Use task_disable_lazy_fpu_restore() helper Replace magic assignments of fpu.last_cpu = ~0 with more explicit task_disable_lazy_fpu_restore() calls. Signed-off-by: Rik van Riel Cc: Oleg Nesterov Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1423252925-14451-8-git-send-email-riel@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/include/asm/fpu-internal.h | 4 ++-- arch/x86/kernel/i387.c | 2 +- arch/x86/kernel/process.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 04c2807aab66..e5f8f8eaf225 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -439,7 +439,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta if (__thread_has_fpu(old)) { if (!__save_init_fpu(old)) - old->thread.fpu.last_cpu = ~0; + task_disable_lazy_fpu_restore(old); else old->thread.fpu.last_cpu = cpu; @@ -455,7 +455,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta stts(); } else { old->thread.fpu_counter = 0; - old->thread.fpu.last_cpu = ~0; + task_disable_lazy_fpu_restore(old); if (fpu.preload) { new->thread.fpu_counter++; if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index f3ced6f4b2b6..5722ab6c7c36 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -236,7 +236,7 @@ int init_fpu(struct task_struct *tsk) if (tsk_used_math(tsk)) { if (cpu_has_fpu && tsk == current) unlazy_fpu(tsk); - tsk->thread.fpu.last_cpu = ~0; + task_disable_lazy_fpu_restore(tsk); return 0; } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e127ddaa2d5a..ce8b10351e28 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -68,8 +68,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) dst->thread.fpu_counter = 0; dst->thread.fpu.has_fpu = 0; - dst->thread.fpu.last_cpu = ~0; dst->thread.fpu.state = NULL; + task_disable_lazy_fpu_restore(dst); if (tsk_used_math(src)) { int err = fpu_alloc(&dst->thread.fpu); if (err) -- cgit v1.2.3 From 728e53fef429a0f3c9dda3587c3ccc57ad268b70 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 6 Feb 2015 15:02:05 -0500 Subject: x86/fpu: Also check fpu_lazy_restore() when use_eager_fpu() With Oleg's patch: 33a3ebdc077f ("x86, fpu: Don't abuse has_fpu in __kernel_fpu_begin/end()") kernel threads no longer have an FPU state even on systems with use_eager_fpu(). That in turn means that a task may still have its FPU state loaded in the FPU registers, if the task only got interrupted by kernel threads from when it went to sleep, to when it woke up again. In that case, there is no need to restore the FPU state for this task, since it is still in the registers. The kernel can simply use the same logic to determine this as is used for !use_eager_fpu() systems. Signed-off-by: Rik van Riel Cc: Linus Torvalds Cc: Oleg Nesterov Link: http://lkml.kernel.org/r/1423252925-14451-9-git-send-email-riel@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/include/asm/fpu-internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index e5f8f8eaf225..19fb41cc4755 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -458,7 +458,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta task_disable_lazy_fpu_restore(old); if (fpu.preload) { new->thread.fpu_counter++; - if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) + if (fpu_lazy_restore(new, cpu)) fpu.preload = 0; else prefetch(new->thread.fpu.state); -- cgit v1.2.3 From 7aeccb83e76316b365e4b44a1dd982ee22a7d8b2 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 19 Jan 2015 19:51:32 +0100 Subject: x86/fpu: __kernel_fpu_begin() should clear fpu_owner_task even if use_eager_fpu() __kernel_fpu_begin() does nothing if !__thread_has_fpu() && use_eager_fpu(), perhaps it assumes that this case is simply impossible. This is certainly not possible if in_interrupt() == T; interrupted_user_mode() should have FPU, and interrupted_kernel_fpu_idle() should fail if !__thread_has_fpu(). However, even if use_eager_fpu() == T a task can do drop_fpu(), then switch to another thread which becomes fpu_owner_task, then resume and call some function which does kernel_fpu_begin(). Say, an exiting task does a lot of things after exit_thread(), it is not safe to assume that it can't use FPU in these paths. Signed-off-by: Oleg Nesterov Reviewed-by: Rik van Riel Cc: Linus Torvalds Cc: Suresh Siddha Cc: Andy Lutomirski Cc: Pekka Riikonen Link: http://lkml.kernel.org/r/20150119185132.GB16427@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/kernel/i387.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index f59d80622e60..ad3a2a23f248 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -93,9 +93,10 @@ void __kernel_fpu_begin(void) if (__thread_has_fpu(me)) { __save_init_fpu(me); - } else if (!use_eager_fpu()) { + } else { this_cpu_write(fpu_owner_task, NULL); - clts(); + if (!use_eager_fpu()) + clts(); } } EXPORT_SYMBOL(__kernel_fpu_begin); -- cgit v1.2.3 From 4b2e762e2e53c721458a83d547b222178bb72a34 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 19 Jan 2015 19:51:51 +0100 Subject: x86/fpu: Always allow FPU in interrupt if use_eager_fpu() The __thread_has_fpu() check in interrupted_kernel_fpu_idle() was needed to prevent the nested kernel_fpu_begin(). Now that we have in_kernel_fpu and !__thread_has_fpu() case in __kernel_fpu_begin() does not depend on use_eager_fpu() (except clts) we can remove it. __thread_has_fpu() can be false even if use_eager_fpu(), but this case does not differ from !use_eager_fpu() case except we should not worry about X86_CR0_TS, __kernel_fpu_begin()/end() will not touch this bit. Note: I think we can kill all irq_fpu_usable() checks except in_kernel_fpu, just we need to record the state of X86_CR0_TS in __kernel_fpu_begin() and conditionalize stts() in __kernel_fpu_end(), but this needs another patch. Signed-off-by: Oleg Nesterov Reviewed-by: Rik van Riel Acked-by: Andy Lutomirski Cc: Linus Torvalds Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150119185151.GC16427@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/kernel/i387.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index ad3a2a23f248..8416b5f85806 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -41,8 +41,8 @@ void kernel_fpu_enable(void) * be set (so that the clts/stts pair does nothing that is * visible in the interrupted kernel thread). * - * Except for the eagerfpu case when we return 1 unless we've already - * been eager and saved the state in kernel_fpu_begin(). + * Except for the eagerfpu case when we return true; in the likely case + * the thread has FPU but we are not going to set/clear TS. */ static inline bool interrupted_kernel_fpu_idle(void) { @@ -50,7 +50,7 @@ static inline bool interrupted_kernel_fpu_idle(void) return false; if (use_eager_fpu()) - return __thread_has_fpu(current); + return true; return !__thread_has_fpu(current) && (read_cr0() & X86_CR0_TS); -- cgit v1.2.3 From 110d7f7513bbb916b8654da9e2973ac5bed929a9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 19 Jan 2015 19:52:12 +0100 Subject: x86/fpu: Don't abuse FPU in kernel threads if use_eager_fpu() AFAICS, there is no reason why kernel threads should have FPU context even if use_eager_fpu() == T. Now that interrupted_kernel_fpu_idle() does not check __thread_has_fpu() in the use_eager_fpu() case, we can remove the init_fpu() code from eager_fpu_init() and change flush_thread() called by do_execve() to initialize FPU. Note: of course, the change in flush_thread() is horrible and must be cleanuped. We need the new helper, and flush_thread() should return the error if init_fpu() fails. Signed-off-by: Oleg Nesterov Reviewed-by: Rik van Riel Cc: Linus Torvalds Cc: Suresh Siddha Cc: Andy Lutomirski Link: http://lkml.kernel.org/r/20150119185212.GD16427@redhat.com Signed-off-by: Borislav Petkov --- arch/x86/kernel/process.c | 7 +++++++ arch/x86/kernel/xsave.c | 13 +------------ 2 files changed, 8 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ce8b10351e28..83480373a642 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -130,6 +130,7 @@ void flush_thread(void) flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); + drop_init_fpu(tsk); /* * Free the FPU state for non xsave platforms. They get reallocated @@ -137,6 +138,12 @@ void flush_thread(void) */ if (!use_eager_fpu()) free_thread_xstate(tsk); + else if (!used_math()) { + /* kthread execs. TODO: cleanup this horror. */ + if (WARN_ON(init_fpu(current))) + force_sig(SIGKILL, current); + math_state_restore(); + } } static void hard_disable_TSC(void) diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 0de1fae2bdf0..de9dcf89a302 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -688,7 +688,7 @@ void eager_fpu_init(void) { static __refdata void (*boot_func)(void) = eager_fpu_init_bp; - clear_used_math(); + WARN_ON(used_math()); current_thread_info()->status = 0; if (eagerfpu == ENABLE) @@ -703,17 +703,6 @@ void eager_fpu_init(void) boot_func(); boot_func = NULL; } - - /* - * This is same as math_state_restore(). But use_xsave() is - * not yet patched to use math_state_restore(). - */ - init_fpu(current); - __thread_fpu_begin(current); - if (cpu_has_xsave) - xrstor_state(init_xstate_buf, -1); - else - fxrstor_checking(&init_xstate_buf->i387); } /* -- cgit v1.2.3 From e7f180dcd8ab48f18b20d7e8a7e9b39192bdf8e0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 10 Mar 2015 07:06:24 +0100 Subject: x86/fpu: Change xstateregs_get()/set() to use ->xsave.i387 rather than ->fxsave This is a cosmetic change: xstateregs_get() and xstateregs_set() abuse ->fxsave to access xsave->i387.sw_reserved. This practice is correct, ->fxsave and xsave->i387 share the same memory, but IMHO this looks confusing. And we can make this code more readable if we add a "struct xsave_struct *" local variable as well. Signed-off-by: Oleg Nesterov Signed-off-by: Borislav Petkov Reviewed-by: Rik van Riel Cc: Andy Lutomirski Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Tavis Ormandy Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1425967585-4725-1-git-send-email-bp@alien8.de Link: http://lkml.kernel.org/r/20150302183237.GB23085@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/i387.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 8416b5f85806..03cc0add8694 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -339,6 +339,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + struct xsave_struct *xsave = &target->thread.fpu.state->xsave; int ret; if (!cpu_has_xsave) @@ -353,14 +354,12 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, * memory layout in the thread struct, so that we can copy the entire * xstateregs to the user using one user_regset_copyout(). */ - memcpy(&target->thread.fpu.state->fxsave.sw_reserved, - xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); - + memcpy(&xsave->i387.sw_reserved, + xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); /* * Copy the xstate memory layout. */ - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu.state->xsave, 0, -1); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); return ret; } @@ -368,8 +367,8 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { + struct xsave_struct *xsave = &target->thread.fpu.state->xsave; int ret; - struct xsave_hdr_struct *xsave_hdr; if (!cpu_has_xsave) return -ENODEV; @@ -378,22 +377,16 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, if (ret) return ret; - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu.state->xsave, 0, -1); - + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); /* * mxcsr reserved bits must be masked to zero for security reasons. */ - target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; - - xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr; - - xsave_hdr->xstate_bv &= pcntxt_mask; + xsave->i387.mxcsr &= mxcsr_feature_mask; + xsave->xsave_hdr.xstate_bv &= pcntxt_mask; /* * These bits must be zero. */ - memset(xsave_hdr->reserved, 0, 48); - + memset(&xsave->xsave_hdr.reserved, 0, 48); return ret; } -- cgit v1.2.3 From 1d23c4518b1f3a03c278f23333149245c178d2a6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 10 Mar 2015 07:06:25 +0100 Subject: x86/fpu: Factor out memset(xstate, 0) in fpu_finit() paths fx_finit() has two users but only fpu_finit() needs to clear xstate, alloc_bootmem_align() in setup_init_fpu_buf() returns zero-filled memory. And note that both memset()'s look confusing. Yes, offsetof() is 0 for ->fxsave or ->fsave, but it would be cleaner to turn them into a single memset() which zeroes fpu->state. Signed-off-by: Oleg Nesterov Signed-off-by: Borislav Petkov Acked-by: Rik van Riel Cc: Andy Lutomirski Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Tavis Ormandy Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1425967585-4725-2-git-send-email-bp@alien8.de Link: http://lkml.kernel.org/r/20150302183257.GC23085@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 1 - arch/x86/kernel/i387.c | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 61609b963eab..5fa1be21ac2a 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -135,7 +135,6 @@ static __always_inline __pure bool use_fxsr(void) static inline void fx_finit(struct i387_fxsave_struct *fx) { - memset(fx, 0, xstate_size); fx->cwd = 0x37f; fx->mxcsr = MXCSR_DEFAULT; } diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 03cc0add8694..0f3de6674ae3 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -224,11 +224,12 @@ void fpu_finit(struct fpu *fpu) return; } + memset(fpu->state, 0, xstate_size); + if (cpu_has_fxsr) { fx_finit(&fpu->state->fxsave); } else { struct i387_fsave_struct *fp = &fpu->state->fsave; - memset(fp, 0, xstate_size); fp->cwd = 0xffff037fu; fp->swd = 0xffff0000u; fp->twd = 0xffffffffu; -- cgit v1.2.3 From fb14b4eadf73500d3b2104f031472a268562c047 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 11 Mar 2015 18:34:09 +0100 Subject: x86/fpu: Document user_fpu_begin() Currently, user_fpu_begin() has a single caller and it is not clear why do we actually need it and why we should not worry about preemption right after preempt_enable(). Signed-off-by: Oleg Nesterov Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Pekka Riikonen Cc: Quentin Casasnovas Cc: Rik van Riel Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150311173409.GC5032@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 810f20fd4e4e..c58c9302152b 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -508,10 +508,12 @@ static inline int restore_xstate_sig(void __user *buf, int ia32_frame) } /* - * Need to be preemption-safe. + * Needs to be preemption-safe. * * NOTE! user_fpu_begin() must be used only immediately before restoring - * it. This function does not do any save/restore on their own. + * the save state. It does not do any saving/restoring on its own. In + * lazy FPU mode, it is just an optimization to avoid a #NM exception, + * the task can lose the FPU right after preempt_enable(). */ static inline void user_fpu_begin(void) { -- cgit v1.2.3 From 8f4d81863ba4e8dfee93bd50840f1099a296251f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 11 Mar 2015 18:34:29 +0100 Subject: x86/fpu: Introduce restore_init_xstate() Extract the "use_eager_fpu()" code from drop_init_fpu() into a new, simple helper restore_init_xstate(). The next patch adds another user. - It is not clear why we do not check use_fxsr() like fpu_restore_checking() does. eager_fpu_init_bp() calls setup_init_fpu_buf() too, and we have the "eagerfpu=on" kernel option. - Ignoring the fact that init_xstate_buf is "struct xsave_struct *", not "union thread_xstate *", it is not clear why we can not simply use fpu_restore_checking() and avoid the code duplication. - It is not clear why we can't call setup_init_fpu_buf() unconditionally to always create init_xstate_buf(). Then do_device_not_available() path (at least) could use restore_init_xstate() too. It doesn't need to init fpu->state, its content doesn't matter until unlazy_fpu()/__switch_to()/etc which overwrites this memory anyway. Signed-off-by: Oleg Nesterov Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Pekka Riikonen Cc: Quentin Casasnovas Cc: Rik van Riel Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150311173429.GD5032@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index c58c9302152b..7d2f7fa6b2dd 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -401,16 +401,20 @@ static inline void drop_fpu(struct task_struct *tsk) preempt_enable(); } +static inline void restore_init_xstate(void) +{ + if (use_xsave()) + xrstor_state(init_xstate_buf, -1); + else + fxrstor_checking(&init_xstate_buf->i387); +} + static inline void drop_init_fpu(struct task_struct *tsk) { if (!use_eager_fpu()) drop_fpu(tsk); - else { - if (use_xsave()) - xrstor_state(init_xstate_buf, -1); - else - fxrstor_checking(&init_xstate_buf->i387); - } + else + restore_init_xstate(); } /* -- cgit v1.2.3 From 9cb6ce823bbd1adbe15e30bd1435c84c2e271767 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 11 Mar 2015 18:34:49 +0100 Subject: x86/fpu: Use restore_init_xstate() instead of math_state_restore() on kthread exec Change flush_thread() to do user_fpu_begin() and restore_init_xstate() instead of math_state_restore(). Note: "TODO: cleanup this horror" is still valid. We do not need init_fpu() at all, we only need fpu_alloc() and memset(0). But this needs other changes, in particular user_fpu_begin() should set used_math(). Signed-off-by: Oleg Nesterov Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Pekka Riikonen Cc: Quentin Casasnovas Cc: Rik van Riel Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150311173449.GE5032@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/process.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index dcaf4b00d0b4..6b058296a456 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -143,7 +143,8 @@ void flush_thread(void) /* kthread execs. TODO: cleanup this horror. */ if (WARN_ON(init_fpu(current))) force_sig(SIGKILL, current); - math_state_restore(); + user_fpu_begin(); + restore_init_xstate(); } } -- cgit v1.2.3 From f893959b0898bd876673adbeb6798bdf25c034d7 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 13 Mar 2015 18:30:30 +0100 Subject: x86/fpu: Don't abuse drop_init_fpu() in flush_thread() flush_thread() -> drop_init_fpu() is suboptimal and confusing. It does drop_fpu() or restore_init_xstate() depending on !use_eager_fpu(). But flush_thread() too checks eagerfpu right after that, and if it is true then restore_init_xstate() just burns CPU for no reason. We are going to load init_xstate_buf again after we set used_math()/user_has_fpu(), until then the FPU state can't survive after switch_to(). Remove it, and change the "if (!use_eager_fpu())" to call drop_fpu(). While at it, clean up the tsk/current usage. Signed-off-by: Oleg Nesterov Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Pekka Riikonen Cc: Quentin Casasnovas Cc: Rik van Riel Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150313173030.GA31217@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/process.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6b058296a456..1d2ebadba7ac 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -132,17 +132,14 @@ void flush_thread(void) flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); - drop_init_fpu(tsk); - /* - * Free the FPU state for non xsave platforms. They get reallocated - * lazily at the first use. - */ - if (!use_eager_fpu()) + if (!use_eager_fpu()) { + /* FPU state will be reallocated lazily at the first use. */ + drop_fpu(tsk); free_thread_xstate(tsk); - else if (!used_math()) { + } else if (!used_math()) { /* kthread execs. TODO: cleanup this horror. */ - if (WARN_ON(init_fpu(current))) - force_sig(SIGKILL, current); + if (WARN_ON(init_fpu(tsk))) + force_sig(SIGKILL, tsk); user_fpu_begin(); restore_init_xstate(); } -- cgit v1.2.3 From d2d0ac9a4644e00120bb9b7427a512a99d2cacc5 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 14 Mar 2015 11:52:12 +0100 Subject: x86/fpu: Fold __drop_fpu() into its sole user Fold it into drop_fpu(). Phew, one less FPU function to pay attention to. No functionality change. Signed-off-by: Borislav Petkov Acked-by: Oleg Nesterov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Pekka Riikonen Cc: Quentin Casasnovas Cc: Rik van Riel Cc: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 7d2f7fa6b2dd..2d4adff428ac 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -378,8 +378,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk) __thread_set_has_fpu(tsk); } -static inline void __drop_fpu(struct task_struct *tsk) +static inline void drop_fpu(struct task_struct *tsk) { + /* + * Forget coprocessor state.. + */ + preempt_disable(); + tsk->thread.fpu_counter = 0; + if (__thread_has_fpu(tsk)) { /* Ignore delayed exceptions from user space */ asm volatile("1: fwait\n" @@ -387,16 +393,7 @@ static inline void __drop_fpu(struct task_struct *tsk) _ASM_EXTABLE(1b, 2b)); __thread_fpu_end(tsk); } -} -static inline void drop_fpu(struct task_struct *tsk) -{ - /* - * Forget coprocessor state.. - */ - preempt_disable(); - tsk->thread.fpu_counter = 0; - __drop_fpu(tsk); clear_stopped_child_used_math(tsk); preempt_enable(); } -- cgit v1.2.3 From b85e67d1483c72b77d1bdc265aa8ba91590794c1 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 16 Mar 2015 10:21:55 +0100 Subject: x86/fpu: Rename drop_init_fpu() to fpu_reset_state() Call it what it does and in accordance with the context where it is used: we reset the FPU state either because we were unable to restore it from the one saved in the task or because we simply want to reset it. Signed-off-by: Borislav Petkov Acked-by: Oleg Nesterov Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Rik van Riel Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 8 ++++++-- arch/x86/kernel/i387.c | 2 +- arch/x86/kernel/signal.c | 2 +- arch/x86/kernel/traps.c | 2 +- arch/x86/kernel/xsave.c | 4 ++-- 5 files changed, 11 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 2d4adff428ac..da5e96756570 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -406,7 +406,11 @@ static inline void restore_init_xstate(void) fxrstor_checking(&init_xstate_buf->i387); } -static inline void drop_init_fpu(struct task_struct *tsk) +/* + * Reset the FPU state in the eager case and drop it in the lazy case (later use + * will reinit it). + */ +static inline void fpu_reset_state(struct task_struct *tsk) { if (!use_eager_fpu()) drop_fpu(tsk); @@ -480,7 +484,7 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) { if (fpu.preload) { if (unlikely(restore_fpu_checking(new))) - drop_init_fpu(new); + fpu_reset_state(new); } } diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 29e982ada854..41575b9b1021 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -108,7 +108,7 @@ void __kernel_fpu_end(void) if (__thread_has_fpu(me)) { if (WARN_ON(restore_fpu_checking(me))) - drop_init_fpu(me); + fpu_reset_state(me); } else if (!use_eager_fpu()) { stts(); } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index e5042463c1bc..59eaae6185e2 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -679,7 +679,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) * Ensure the signal handler starts with the new fpu state. */ if (used_math()) - drop_init_fpu(current); + fpu_reset_state(current); } signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP)); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 7ee7369d5aec..edf66c066da9 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -863,7 +863,7 @@ void math_state_restore(void) kernel_fpu_disable(); __thread_fpu_begin(tsk); if (unlikely(restore_fpu_checking(tsk))) { - drop_init_fpu(tsk); + fpu_reset_state(tsk); force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); } else { tsk->thread.fpu_counter++; diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 0bf82c5ac529..65c29b070e09 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -342,7 +342,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) config_enabled(CONFIG_IA32_EMULATION)); if (!buf) { - drop_init_fpu(tsk); + fpu_reset_state(tsk); return 0; } @@ -416,7 +416,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) */ user_fpu_begin(); if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { - drop_init_fpu(tsk); + fpu_reset_state(tsk); return -1; } } -- cgit v1.2.3 From 4bd5bf8c85e6bca5be9e7c4b3d7ad1942ae323f3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 13 Mar 2015 19:27:16 +0100 Subject: x86/fpu: Don't allocate fpu->state for swapper/0 Now that kthreads do not use FPU until they get executed, swapper/0 doesn't need to allocate fpu->state. Signed-off-by: Oleg Nesterov Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Pekka Riikonen Cc: Quentin Casasnovas Cc: Rik van Riel Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150313182716.GB8249@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/xsave.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 65c29b070e09..ada8df7b89c0 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -680,8 +680,6 @@ void xsave_init(void) static inline void __init eager_fpu_init_bp(void) { - current->thread.fpu.state = - alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct)); if (!init_xstate_buf) setup_init_fpu_buf(); } -- cgit v1.2.3 From 7fc253e277ecf1ea57c2d670bdbcda3dffd19453 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 14 Mar 2015 16:13:34 +0100 Subject: x86/fpu: Kill eager_fpu_init_bp() Now that eager_fpu_init_bp() does setup_init_fpu_buf() only and nothing else, we can remove it and move this code into its "caller", eager_fpu_init(). This avoids the confusing games with "static __refdata void (*boot_func)": init_xstate_buf can be NULL only during boot, so it is safe to call the __init-annotated setup_init_fpu_buf() function in eager_fpu_init(), we just need to mark it as __init_refok. Signed-off-by: Oleg Nesterov Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Pekka Riikonen Cc: Quentin Casasnovas Cc: Rik van Riel Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150314151334.GC13029@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/xsave.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index ada8df7b89c0..87a815b85f3e 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -678,16 +678,12 @@ void xsave_init(void) this_func(); } -static inline void __init eager_fpu_init_bp(void) -{ - if (!init_xstate_buf) - setup_init_fpu_buf(); -} - -void eager_fpu_init(void) +/* + * setup_init_fpu_buf() is __init and it is OK to call it here because + * init_xstate_buf will be unset only once during boot. + */ +void __init_refok eager_fpu_init(void) { - static __refdata void (*boot_func)(void) = eager_fpu_init_bp; - WARN_ON(used_math()); current_thread_info()->status = 0; @@ -699,10 +695,8 @@ void eager_fpu_init(void) return; } - if (boot_func) { - boot_func(); - boot_func = NULL; - } + if (!init_xstate_buf) + setup_init_fpu_buf(); } /* -- cgit v1.2.3