summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/fpu
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-05-05 11:34:49 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-19 15:48:12 +0200
commite97131a8391e9fce5126ed54dc66c9d8965d3b4e (patch)
tree8e1db79ae5c956db9a329ed1d76891d7c12535fd /arch/x86/kernel/fpu
parentd364a7656c1855c940dfa4baf4ebcc3c6a9e6fd2 (diff)
downloadlinux-e97131a8391e9fce5126ed54dc66c9d8965d3b4e.tar.bz2
x86/fpu: Add CONFIG_X86_DEBUG_FPU=y FPU debugging code
There are various internal FPU state debugging checks that never trigger in practice, but which are useful for FPU code development. Separate these out into CONFIG_X86_DEBUG_FPU=y, and also add a couple of new ones. The size difference is about 0.5K of code on defconfig: text data bss filename 15028906 2578816 1638400 vmlinux 15029430 2578816 1638400 vmlinux ( Keep this enabled by default until the new FPU code is debugged. ) Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/fpu')
-rw-r--r--arch/x86/kernel/fpu/core.c18
-rw-r--r--arch/x86/kernel/fpu/init.c12
-rw-r--r--arch/x86/kernel/fpu/xstate.c11
3 files changed, 30 insertions, 11 deletions
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index c0661604a258..01a15503c3be 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -38,13 +38,13 @@ DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
static void kernel_fpu_disable(void)
{
- WARN_ON(this_cpu_read(in_kernel_fpu));
+ WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
}
static void kernel_fpu_enable(void)
{
- WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, false);
}
@@ -109,7 +109,7 @@ void __kernel_fpu_begin(void)
{
struct fpu *fpu = &current->thread.fpu;
- WARN_ON_ONCE(!irq_fpu_usable());
+ WARN_ON_FPU(!irq_fpu_usable());
kernel_fpu_disable();
@@ -127,7 +127,7 @@ void __kernel_fpu_end(void)
struct fpu *fpu = &current->thread.fpu;
if (fpu->fpregs_active) {
- if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
+ if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
fpu__clear(fpu);
} else {
__fpregs_deactivate_hw();
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(irq_ts_restore);
*/
void fpu__save(struct fpu *fpu)
{
- WARN_ON(fpu != &current->thread.fpu);
+ WARN_ON_FPU(fpu != &current->thread.fpu);
preempt_disable();
if (fpu->fpregs_active) {
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(fpstate_init);
*/
static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- WARN_ON(src_fpu != &current->thread.fpu);
+ WARN_ON_FPU(src_fpu != &current->thread.fpu);
/*
* Don't let 'init optimized' areas of the XSAVE area
@@ -284,7 +284,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
*/
void fpu__activate_curr(struct fpu *fpu)
{
- WARN_ON_ONCE(fpu != &current->thread.fpu);
+ WARN_ON_FPU(fpu != &current->thread.fpu);
if (!fpu->fpstate_active) {
fpstate_init(&fpu->state);
@@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(fpu__activate_curr);
*/
void fpu__activate_stopped(struct fpu *child_fpu)
{
- WARN_ON_ONCE(child_fpu == &current->thread.fpu);
+ WARN_ON_FPU(child_fpu == &current->thread.fpu);
if (child_fpu->fpstate_active) {
child_fpu->last_cpu = -1;
@@ -407,7 +407,7 @@ static inline void copy_init_fpstate_to_fpregs(void)
*/
void fpu__clear(struct fpu *fpu)
{
- WARN_ON_ONCE(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
+ WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
if (!use_eager_fpu()) {
/* FPU state will be reallocated lazily at the first use. */
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index a9e506a99a83..e9f1d6e62146 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -143,6 +143,11 @@ EXPORT_SYMBOL_GPL(xstate_size);
*/
static void __init fpu__init_system_xstate_size_legacy(void)
{
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
/*
* Note that xstate_size might be overwriten later during
* fpu__init_system_xstate().
@@ -214,7 +219,12 @@ __setup("eagerfpu=", eager_fpu_setup);
*/
static void __init fpu__init_system_ctx_switch(void)
{
- WARN_ON(current->thread.fpu.fpstate_active);
+ static bool on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
+ WARN_ON_FPU(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;
/* Auto enable eagerfpu for xsaveopt */
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 201f08feb259..5724098adf1b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -262,6 +262,11 @@ static void __init setup_xstate_comp(void)
*/
static void __init setup_init_fpu_buf(void)
{
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
if (!cpu_has_xsave)
return;
@@ -317,6 +322,10 @@ static void __init init_xstate_size(void)
void __init fpu__init_system_xstate(void)
{
unsigned int eax, ebx, ecx, edx;
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
if (!cpu_has_xsave) {
pr_info("x86/fpu: Legacy x87 FPU detected.\n");
@@ -324,7 +333,7 @@ void __init fpu__init_system_xstate(void)
}
if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
- WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
+ WARN_ON_FPU(1);
return;
}