From 8f86a7373a1c8ee52d3cc64adf7f2ace13fd24ed Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 9 Mar 2014 18:05:24 +0100 Subject: x86, AMD: Convert to the new bit access MSR accessors ... and save us a bunch of code. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/1394384725-10796-3-git-send-email-bp@alien8.de Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/amd.c | 48 ++++++++++++----------------------------------- 1 file changed, 12 insertions(+), 36 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c67ffa686064..b85e43a5a462 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -233,9 +233,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) if (c->x86_model >= 6 && c->x86_model <= 10) { if (!cpu_has(c, X86_FEATURE_XMM)) { printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); - rdmsr(MSR_K7_HWCR, l, h); - l &= ~0x00008000; - wrmsr(MSR_K7_HWCR, l, h); + msr_clear_bit(MSR_K7_HWCR, 15); set_cpu_cap(c, X86_FEATURE_XMM); } } @@ -509,14 +507,8 @@ static void early_init_amd(struct cpuinfo_x86 *c) #endif /* F16h erratum 793, CVE-2013-6885 */ - if (c->x86 == 0x16 && c->x86_model <= 0xf) { - u64 val; - - rdmsrl(MSR_AMD64_LS_CFG, val); - if (!(val & BIT(15))) - wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15)); - } - + if (c->x86 == 0x16 && c->x86_model <= 0xf) + msr_set_bit(MSR_AMD64_LS_CFG, 15); } static const int amd_erratum_383[]; @@ -536,11 +528,8 @@ static void init_amd(struct cpuinfo_x86 *c) * Errata 63 for SH-B3 steppings * Errata 122 for all steppings (F+ have it disabled by default) */ - if (c->x86 == 0xf) { - rdmsrl(MSR_K7_HWCR, value); - value |= 1 << 6; - wrmsrl(MSR_K7_HWCR, value); - } + if (c->x86 == 0xf) + msr_set_bit(MSR_K7_HWCR, 6); #endif early_init_amd(c); @@ -623,14 +612,11 @@ static void init_amd(struct cpuinfo_x86 *c) (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && !cpu_has(c, X86_FEATURE_TOPOEXT)) { - if (!rdmsrl_safe(0xc0011005, &value)) { - value |= 1ULL << 54; - wrmsrl_safe(0xc0011005, value); + if (msr_set_bit(0xc0011005, 54) > 0) { rdmsrl(0xc0011005, value); - if (value & (1ULL << 54)) { + if (value & BIT_64(54)) { set_cpu_cap(c, X86_FEATURE_TOPOEXT); - printk(KERN_INFO FW_INFO "CPU: Re-enabling " - "disabled Topology Extensions Support\n"); + pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); } } } @@ -709,19 +695,12 @@ static void init_amd(struct cpuinfo_x86 *c) * Disable GART TLB Walk Errors on Fam10h. We do this here * because this is always needed when GART is enabled, even in a * kernel which has no MCE support built in. - * BIOS should disable GartTlbWlk Errors themself. If - * it doesn't do it here as suggested by the BKDG. + * BIOS should disable GartTlbWlk Errors already. If + * it doesn't, do it here as suggested by the BKDG. * * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 */ - u64 mask; - int err; - - err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); - if (err == 0) { - mask |= (1 << 10); - wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask); - } + msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); /* * On family 10h BIOS may not have properly enabled WC+ support, @@ -733,10 +712,7 @@ static void init_amd(struct cpuinfo_x86 *c) * NOTE: we want to use the _safe accessors so as not to #GP kvm * guests on older kvm hosts. */ - - rdmsrl_safe(MSR_AMD64_BU_CFG2, &value); - value &= ~(1ULL << 24); - wrmsrl_safe(MSR_AMD64_BU_CFG2, value); + msr_clear_bit(MSR_AMD64_BU_CFG2, 24); if (cpu_has_amd_erratum(c, amd_erratum_383)) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); -- cgit v1.2.3 From c0a639ad0bc6b178b46996bd1f821a04643e2bde Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 9 Mar 2014 18:05:25 +0100 Subject: x86, Intel: Convert to the new bit access MSR accessors ... and save some lines of code. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/1394384725-10796-4-git-send-email-bp@alien8.de Signed-off-by: H. Peter Anvin --- arch/x86/include/uapi/asm/msr-index.h | 9 ++++++--- arch/x86/kernel/cpu/intel.c | 30 +++++++----------------------- 2 files changed, 13 insertions(+), 26 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index c19fc60ff062..045e6db6f58a 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -368,14 +368,16 @@ #define THERM_LOG_THRESHOLD1 (1 << 9) /* MISC_ENABLE bits: architectural */ -#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) +#define MSR_BIT_FAST_STRING 0 +#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_BIT_FAST_STRING) #define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) #define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) #define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) #define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) #define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) -#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22) +#define MSR_BIT_LIMIT_CPUID 22 +#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_BIT_LIMIT_CPUID); #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) #define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) @@ -385,7 +387,8 @@ #define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) #define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) #define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) -#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9) +#define MSR_BIT_PRF_DIS 9 +#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_BIT_PRF_DIS) #define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) #define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) #define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 5cd9bfabd645..44ca6317af43 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -31,11 +31,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - - if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { - misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; - wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_LIMIT_CPUID) > 0) { c->cpuid_level = cpuid_eax(0); get_cpu_cap(c); } @@ -129,16 +125,9 @@ static void early_init_intel(struct cpuinfo_x86 *c) * Ingo Molnar reported a Pentium D (model 6) and a Xeon * (model 2) with the same problem. */ - if (c->x86 == 15) { - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - - if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { - printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); - - misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; - wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - } - } + if (c->x86 == 15) + if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_FAST_STRING) > 0) + pr_info("kmemcheck: Disabling fast string operations\n"); #endif /* @@ -197,8 +186,6 @@ static void intel_smp_check(struct cpuinfo_x86 *c) static void intel_workarounds(struct cpuinfo_x86 *c) { - unsigned long lo, hi; - #ifdef CONFIG_X86_F00F_BUG /* * All current models of Pentium and Pentium with MMX technology CPUs @@ -229,12 +216,9 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * Hardware prefetcher may cause stale data to be loaded into the cache. */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { - rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); - if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { - printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); - printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); - lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; - wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); + if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_PRF_DIS) > 0) { + pr_info("CPU: C0 stepping P4 Xeon detected.\n"); + pr_info("CPU: Disabling hardware prefetching (Errata 037)\n"); } } -- cgit v1.2.3 From 0b131be8d4125b32eb5b94e84f4f9dee6a0ef797 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 13 Mar 2014 15:40:52 -0700 Subject: x86, intel: Make MSR_IA32_MISC_ENABLE bit constants systematic Replace somewhat arbitrary constants for bits in MSR_IA32_MISC_ENABLE with verbose but systematic ones. Add _BIT defines for all the rest of them, too. Signed-off-by: H. Peter Anvin --- arch/x86/include/uapi/asm/msr-index.h | 78 ++++++++++++++++++++++------------- arch/x86/kernel/cpu/intel.c | 10 +++-- 2 files changed, 57 insertions(+), 31 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 045e6db6f58a..4924f4be2b99 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -368,36 +368,58 @@ #define THERM_LOG_THRESHOLD1 (1 << 9) /* MISC_ENABLE bits: architectural */ -#define MSR_BIT_FAST_STRING 0 -#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_BIT_FAST_STRING) -#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) -#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) -#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) -#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) -#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) -#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) -#define MSR_BIT_LIMIT_CPUID 22 -#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_BIT_LIMIT_CPUID); -#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) -#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) +#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0 +#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) +#define MSR_IA32_MISC_ENABLE_TCC_BIT 1 +#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT) +#define MSR_IA32_MISC_ENABLE_EMON_BIT 7 +#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT) +#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11 +#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT) +#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12 +#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT) +#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16 +#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT) +#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 +#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) +#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 +#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT); +#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 +#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) +#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34 +#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT) /* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ -#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2) -#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3) -#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) -#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) -#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) -#define MSR_BIT_PRF_DIS 9 -#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_BIT_PRF_DIS) -#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) -#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) -#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) -#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19) -#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20) -#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24) -#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37) -#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) -#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) +#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2 +#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT) +#define MSR_IA32_MISC_ENABLE_TM1_BIT 3 +#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT) +#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4 +#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT) +#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6 +#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT) +#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8 +#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT) +#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9 +#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) +#define MSR_IA32_MISC_ENABLE_FERR_BIT 10 +#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT) +#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10 +#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT) +#define MSR_IA32_MISC_ENABLE_TM2_BIT 13 +#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT) +#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19 +#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT) +#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20 +#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT) +#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24 +#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT) +#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37 +#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT) +#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38 +#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT) +#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39 +#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT) #define MSR_IA32_TSC_DEADLINE 0x000006E0 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 44ca6317af43..34bbb555e269 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -31,7 +31,8 @@ static void early_init_intel(struct cpuinfo_x86 *c) /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { - if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_LIMIT_CPUID) > 0) { + if (msr_clear_bit(MSR_IA32_MISC_ENABLE, + MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { c->cpuid_level = cpuid_eax(0); get_cpu_cap(c); } @@ -126,7 +127,8 @@ static void early_init_intel(struct cpuinfo_x86 *c) * (model 2) with the same problem. */ if (c->x86 == 15) - if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_FAST_STRING) > 0) + if (msr_clear_bit(MSR_IA32_MISC_ENABLE, + MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0) pr_info("kmemcheck: Disabling fast string operations\n"); #endif @@ -216,7 +218,9 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * Hardware prefetcher may cause stale data to be loaded into the cache. */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { - if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_PRF_DIS) > 0) { + if (msr_set_bit(MSR_IA32_MISC_ENABLE, + MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) + > 0) { pr_info("CPU: C0 stepping P4 Xeon detected.\n"); pr_info("CPU: Disabling hardware prefetching (Errata 037)\n"); } -- cgit v1.2.3 From 8c90487cdc64847b4fdd812ab3047f426fec4d13 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 26 Feb 2014 10:49:49 -0500 Subject: Rename TAINT_UNSAFE_SMP to TAINT_CPU_OUT_OF_SPEC Rename TAINT_UNSAFE_SMP to TAINT_CPU_OUT_OF_SPEC, so we can repurpose the flag to encompass a wider range of pushing the CPU beyond its warrany. Signed-off-by: Dave Jones Link: http://lkml.kernel.org/r/20140226154949.GA770@redhat.com Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/amd.c | 2 +- include/linux/kernel.h | 2 +- kernel/module.c | 2 +- kernel/panic.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index b85e43a5a462..ce8b8ff0e0ef 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -218,7 +218,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c) */ WARN_ONCE(1, "WARNING: This combination of AMD" " processors is not suitable for SMP.\n"); - add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); } static void init_amd_k7(struct cpuinfo_x86 *c) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 196d1ea86df0..08fb02477641 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -458,7 +458,7 @@ extern enum system_states { #define TAINT_PROPRIETARY_MODULE 0 #define TAINT_FORCED_MODULE 1 -#define TAINT_UNSAFE_SMP 2 +#define TAINT_CPU_OUT_OF_SPEC 2 #define TAINT_FORCED_RMMOD 3 #define TAINT_MACHINE_CHECK 4 #define TAINT_BAD_PAGE 5 diff --git a/kernel/module.c b/kernel/module.c index d24fcf29cb64..ca2c1aded7ee 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1015,7 +1015,7 @@ static size_t module_flags_taint(struct module *mod, char *buf) buf[l++] = 'C'; /* * TAINT_FORCED_RMMOD: could be added. - * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't + * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't * apply to modules. */ return l; diff --git a/kernel/panic.c b/kernel/panic.c index 6d6300375090..2270cfd1d6be 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -199,7 +199,7 @@ struct tnt { static const struct tnt tnts[] = { { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, { TAINT_FORCED_MODULE, 'F', ' ' }, - { TAINT_UNSAFE_SMP, 'S', ' ' }, + { TAINT_CPU_OUT_OF_SPEC, 'S', ' ' }, { TAINT_FORCED_RMMOD, 'R', ' ' }, { TAINT_MACHINE_CHECK, 'M', ' ' }, { TAINT_BAD_PAGE, 'B', ' ' }, -- cgit v1.2.3 From 69f2366c9456d0ce784cf5aba87ee77eeadc1d5e Mon Sep 17 00:00:00 2001 From: Chris Bainbridge Date: Fri, 7 Mar 2014 18:40:42 +0700 Subject: x86, cpu: Add forcepae parameter for booting PAE kernels on PAE-disabled Pentium M Many Pentium M systems disable PAE but may have a functionally usable PAE implementation. This adds the "forcepae" parameter which bypasses the boot check for PAE, and sets the CPU as being PAE capable. Using this parameter will taint the kernel with TAINT_CPU_OUT_OF_SPEC. Signed-off-by: Chris Bainbridge Link: http://lkml.kernel.org/r/20140307114040.GA4997@localhost Acked-by: Borislav Petkov Signed-off-by: H. Peter Anvin --- Documentation/kernel-parameters.txt | 7 +++++++ arch/x86/boot/cpucheck.c | 20 ++++++++++++++++++++ arch/x86/kernel/cpu/intel.c | 19 +++++++++++++++++++ 3 files changed, 46 insertions(+) (limited to 'arch/x86/kernel') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 7116fda7077f..06600cc9a26c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1011,6 +1011,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. parameter will force ia64_sal_cache_flush to call ia64_pal_cache_flush instead of SAL_CACHE_FLUSH. + forcepae [X86-32] + Forcefully enable Physical Address Extension (PAE). + Many Pentium M systems disable PAE but may have a + functionally usable PAE implementation. + Warning: use of this parameter will taint the kernel + and may cause unknown problems. + ftrace=[tracer] [FTRACE] will set and start the specified tracer as early as possible in order to facilitate early diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 100a9a10076a..f0d0b20fe149 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c @@ -67,6 +67,13 @@ static int is_transmeta(void) cpu_vendor[2] == A32('M', 'x', '8', '6'); } +static int is_intel(void) +{ + return cpu_vendor[0] == A32('G', 'e', 'n', 'u') && + cpu_vendor[1] == A32('i', 'n', 'e', 'I') && + cpu_vendor[2] == A32('n', 't', 'e', 'l'); +} + /* Returns a bitmask of which words we have error bits in */ static int check_cpuflags(void) { @@ -153,6 +160,19 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); err = check_cpuflags(); + } else if (err == 0x01 && + !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) && + is_intel() && cpu.level == 6 && + (cpu.model == 9 || cpu.model == 13)) { + /* PAE is disabled on this Pentium M but can be forced */ + if (cmdline_find_option_bool("forcepae")) { + puts("WARNING: Forcing PAE in CPU flags\n"); + set_bit(X86_FEATURE_PAE, cpu.flags); + err = check_cpuflags(); + } + else { + puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n"); + } } if (err_flags_ptr) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 34bbb555e269..897d6201ef10 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -186,6 +186,14 @@ static void intel_smp_check(struct cpuinfo_x86 *c) } } +static int forcepae; +static int __init forcepae_setup(char *__unused) +{ + forcepae = 1; + return 1; +} +__setup("forcepae", forcepae_setup); + static void intel_workarounds(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_F00F_BUG @@ -213,6 +221,17 @@ static void intel_workarounds(struct cpuinfo_x86 *c) if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); + /* + * PAE CPUID issue: many Pentium M report no PAE but may have a + * functionally usable PAE implementation. + * Forcefully enable PAE if kernel parameter "forcepae" is present. + */ + if (forcepae) { + printk(KERN_WARNING "PAE forced!\n"); + set_cpu_cap(c, X86_FEATURE_PAE); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); + } + /* * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. -- cgit v1.2.3