From 6ca7a8a15035add0a4f9b2fd658118d41dbeb20c Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 21 Dec 2014 15:02:23 +0100 Subject: x86/fpu: Use a symbolic name for asm operand Fix up the else-case in fpu_fxsave() which seems like it has been overlooked. Correct comment style in restore_fpu_checking() while at it. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1419170543-11393-1-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index e97622f57722..0dbc08282291 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu) if (config_enabled(CONFIG_X86_32)) asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); else if (config_enabled(CONFIG_AS_FXSAVEQ)) - asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); + asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave)); else { /* Using "rex64; fxsave %0" is broken because, if the memory * operand uses any extended registers for addressing, a second @@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu) static inline int restore_fpu_checking(struct task_struct *tsk) { - /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception - is pending. Clear the x87 state here by setting it to fixed - values. "m" is a random variable that should be in L1 */ + /* + * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is + * pending. Clear the x87 state here by setting it to fixed values. + * "m" is a random variable that should be in L1. + */ if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { asm volatile( "fnclex\n\t" -- cgit v1.2.3 From 14e153ef75eecae8fd0738ffb42120f4962a00cd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 15 Jan 2015 20:19:43 +0100 Subject: x86, fpu: Introduce per-cpu in_kernel_fpu state interrupted_kernel_fpu_idle() tries to detect if kernel_fpu_begin() is safe or not. In particular it should obviously deny the nested kernel_fpu_begin() and this logic looks very confusing. If use_eager_fpu() == T we rely on a) __thread_has_fpu() check in interrupted_kernel_fpu_idle(), and b) on the fact that _begin() does __thread_clear_has_fpu(). Otherwise we demand that the interrupted task has no FPU if it is in kernel mode, this works because __kernel_fpu_begin() does clts() and interrupted_kernel_fpu_idle() checks X86_CR0_TS. Add the per-cpu "bool in_kernel_fpu" variable, and change this code to check/set/clear it. This allows to do more cleanups and fixes, see the next changes. The patch also moves WARN_ON_ONCE() under preempt_disable() just to make this_cpu_read() look better, this is not really needed. And in fact I think we should move it into __kernel_fpu_begin(). Signed-off-by: Oleg Nesterov Reviewed-by: Rik van Riel Cc: matt.fleming@intel.com Cc: bp@suse.de Cc: pbonzini@redhat.com Cc: luto@amacapital.net Cc: Linus Torvalds Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150115191943.GB27332@redhat.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/i387.h | 2 +- arch/x86/kernel/i387.c | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index ed8089d69094..5e275d31802e 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void); static inline void kernel_fpu_begin(void) { - WARN_ON_ONCE(!irq_fpu_usable()); preempt_disable(); + WARN_ON_ONCE(!irq_fpu_usable()); __kernel_fpu_begin(); } diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index a9a4229f6161..a81572338243 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -19,6 +19,8 @@ #include #include +static DEFINE_PER_CPU(bool, in_kernel_fpu); + /* * Were we in an interrupt that interrupted kernel mode? * @@ -33,6 +35,9 @@ */ static inline bool interrupted_kernel_fpu_idle(void) { + if (this_cpu_read(in_kernel_fpu)) + return false; + if (use_eager_fpu()) return __thread_has_fpu(current); @@ -73,6 +78,8 @@ void __kernel_fpu_begin(void) { struct task_struct *me = current; + this_cpu_write(in_kernel_fpu, true); + if (__thread_has_fpu(me)) { __thread_clear_has_fpu(me); __save_init_fpu(me); @@ -99,6 +106,8 @@ void __kernel_fpu_end(void) } else { stts(); } + + this_cpu_write(in_kernel_fpu, false); } EXPORT_SYMBOL(__kernel_fpu_end); -- cgit v1.2.3 From 33a3ebdc077fd85f1bf4d4586eea579b297461ae Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 15 Jan 2015 20:20:05 +0100 Subject: x86, fpu: Don't abuse has_fpu in __kernel_fpu_begin/end() Now that we have in_kernel_fpu we can remove __thread_clear_has_fpu() in __kernel_fpu_begin(). And this allows to replace the asymmetrical and nontrivial use_eager_fpu + tsk_used_math check in kernel_fpu_end() with the same __thread_has_fpu() check. The logic becomes really simple; if _begin() does save() then _end() needs restore(), this is controlled by __thread_has_fpu(). Otherwise they do clts/stts unless use_eager_fpu(). Not only this makes begin/end symmetrical and imo more understandable, potentially this allows to change irq_fpu_usable() to avoid all other checks except "in_kernel_fpu". Also, with this patch __kernel_fpu_end() does restore_fpu_checking() and WARNs if it fails instead of math_state_restore(). I think this looks better because we no longer need __thread_fpu_begin(), and it would be better to report the failure in this case. Signed-off-by: Oleg Nesterov Acked-by: Rik van Riel Cc: matt.fleming@intel.com Cc: bp@suse.de Cc: pbonzini@redhat.com Cc: luto@amacapital.net Cc: Linus Torvalds Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150115192005.GC27332@redhat.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/i387.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index a81572338243..12088a3f459f 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -81,9 +81,7 @@ void __kernel_fpu_begin(void) this_cpu_write(in_kernel_fpu, true); if (__thread_has_fpu(me)) { - __thread_clear_has_fpu(me); __save_init_fpu(me); - /* We do 'stts()' in __kernel_fpu_end() */ } else if (!use_eager_fpu()) { this_cpu_write(fpu_owner_task, NULL); clts(); @@ -93,17 +91,12 @@ EXPORT_SYMBOL(__kernel_fpu_begin); void __kernel_fpu_end(void) { - if (use_eager_fpu()) { - /* - * For eager fpu, most the time, tsk_used_math() is true. - * Restore the user math as we are done with the kernel usage. - * At few instances during thread exit, signal handling etc, - * tsk_used_math() is false. Those few places will take proper - * actions, so we don't need to restore the math here. - */ - if (likely(tsk_used_math(current))) - math_state_restore(); - } else { + struct task_struct *me = current; + + if (__thread_has_fpu(me)) { + if (WARN_ON(restore_fpu_checking(me))) + drop_init_fpu(me); + } else if (!use_eager_fpu()) { stts(); } -- cgit v1.2.3 From 7575637ab293861a799f3bbafe0d8c597389f4e9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 15 Jan 2015 20:20:28 +0100 Subject: x86, fpu: Fix math_state_restore() race with kernel_fpu_begin() math_state_restore() can race with kernel_fpu_begin() if irq comes right after __thread_fpu_begin(), __save_init_fpu() will overwrite fpu->state we are going to restore. Add 2 simple helpers, kernel_fpu_disable() and kernel_fpu_enable() which simply set/clear in_kernel_fpu, and change math_state_restore() to exclude kernel_fpu_begin() in between. Alternatively we could use local_irq_save/restore, but probably these new helpers can have more users. Perhaps they should disable/enable preemption themselves, in this case we can remove preempt_disable() in __restore_xstate_sig(). Signed-off-by: Oleg Nesterov Reviewed-by: Rik van Riel Cc: matt.fleming@intel.com Cc: bp@suse.de Cc: pbonzini@redhat.com Cc: luto@amacapital.net Cc: Linus Torvalds Cc: Suresh Siddha Link: http://lkml.kernel.org/r/20150115192028.GD27332@redhat.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/i387.h | 4 ++++ arch/x86/kernel/i387.c | 11 +++++++++++ arch/x86/kernel/traps.c | 12 +++++------- 3 files changed, 20 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 5e275d31802e..6eb6fcb83f63 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -51,6 +51,10 @@ static inline void kernel_fpu_end(void) preempt_enable(); } +/* Must be called with preempt disabled */ +extern void kernel_fpu_disable(void); +extern void kernel_fpu_enable(void); + /* * Some instructions like VIA's padlock instructions generate a spurious * DNA fault but don't modify SSE registers. And these instructions diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 12088a3f459f..81049ffab2d6 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -21,6 +21,17 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu); +void kernel_fpu_disable(void) +{ + WARN_ON(this_cpu_read(in_kernel_fpu)); + this_cpu_write(in_kernel_fpu, true); +} + +void kernel_fpu_enable(void) +{ + this_cpu_write(in_kernel_fpu, false); +} + /* * Were we in an interrupt that interrupted kernel mode? * diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 88900e288021..fb4cb6adf225 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -788,18 +788,16 @@ void math_state_restore(void) local_irq_disable(); } + /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */ + kernel_fpu_disable(); __thread_fpu_begin(tsk); - - /* - * Paranoid restore. send a SIGSEGV if we fail to restore the state. - */ if (unlikely(restore_fpu_checking(tsk))) { drop_init_fpu(tsk); force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); - return; + } else { + tsk->thread.fpu_counter++; } - - tsk->thread.fpu_counter++; + kernel_fpu_enable(); } EXPORT_SYMBOL_GPL(math_state_restore); -- cgit v1.2.3