From 87d6021b814353d7b353afcc3698ffe49de7d4ec Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 1 Oct 2019 16:23:35 +0200 Subject: x86/math-emu: Limit MATH_EMULATION to 486SX compatibles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The FPU emulation code is old and fragile in places, try to limit its use to builds for CPUs that actually use it. As far as I can tell, this is only true for i486sx compatibles, including the Cyrix 486SLC, AMD Am486SX and ÉLAN SC410, UMC U5S amd DM&P VortexSX86, all of which were relatively short-lived and got replaced with i486DX compatible processors soon after introduction, though some of the embedded versions remained available much longer. Signed-off-by: Arnd Bergmann Signed-off-by: Borislav Petkov Reviewed-by: Kees Cook Cc: "H. Peter Anvin" Cc: Andrew Morton Cc: Bill Metzenthen Cc: Ingo Molnar Cc: Masahiro Yamada Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191001142344.1274185-2-arnd@arndb.de --- arch/x86/include/asm/module.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 7948a17febb4..c215d2762488 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -15,6 +15,8 @@ struct mod_arch_specific { #ifdef CONFIG_X86_64 /* X86_64 does not define MODULE_PROC_FAMILY */ +#elif defined CONFIG_M486SX +#define MODULE_PROC_FAMILY "486SX " #elif defined CONFIG_M486 #define MODULE_PROC_FAMILY "486 " #elif defined CONFIG_M586 -- cgit v1.2.3 From 9d40b85bb46a99bc95dad3a07787da93b0a018e9 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 7 Oct 2019 15:48:39 -0500 Subject: x86/cpufeatures: Add feature bit RDPRU on AMD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AMD Zen 2 introduces a new RDPRU instruction which is used to give access to some processor registers that are typically only accessible when the privilege level is zero. ECX is used as the implicit register to specify which register to read. RDPRU places the specified register’s value into EDX:EAX. For example, the RDPRU instruction can be used to read MPERF and APERF at CPL > 0. Add the feature bit so it is visible in /proc/cpuinfo. Details are available in the AMD64 Architecture Programmer’s Manual: https://www.amd.com/system/files/TechDocs/24594.pdf Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov Cc: Aaron Lewis Cc: ak@linux.intel.com Cc: Fenghua Yu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: "Peter Zijlstra (Intel)" Cc: robert.hu@linux.intel.com Cc: Thomas Gleixner Cc: Thomas Hellstrom Cc: x86-ml Link: https://lkml.kernel.org/r/20191007204839.5727.10803.stgit@localhost.localdomain --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0652d3eed9bd..1db10a112537 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -292,6 +292,7 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */ #define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ -- cgit v1.2.3 From b971880fe79f4042aaaf426744a5b19521bf77b3 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Tue, 5 Nov 2019 21:25:32 +0000 Subject: x86/Kconfig: Rename UMIP config parameter AMD 2nd generation EPYC processors support the UMIP (User-Mode Instruction Prevention) feature. So, rename X86_INTEL_UMIP to generic X86_UMIP and modify the text to cover both Intel and AMD. [ bp: take of the disabled-features.h copy in tools/ too. ] Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Ricardo Neri Cc: Thomas Gleixner Cc: "x86@kernel.org" Link: https://lkml.kernel.org/r/157298912544.17462.2018334793891409521.stgit@naples-babu.amd.com --- arch/x86/Kconfig | 16 ++++++++-------- arch/x86/include/asm/disabled-features.h | 2 +- arch/x86/include/asm/umip.h | 4 ++-- arch/x86/kernel/Makefile | 2 +- tools/arch/x86/include/asm/disabled-features.h | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 896f840ade2d..434fae95279c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1880,16 +1880,16 @@ config X86_SMAP If unsure, say Y. -config X86_INTEL_UMIP +config X86_UMIP def_bool y - depends on CPU_SUP_INTEL - prompt "Intel User Mode Instruction Prevention" if EXPERT + depends on CPU_SUP_INTEL || CPU_SUP_AMD + prompt "User Mode Instruction Prevention" if EXPERT ---help--- - The User Mode Instruction Prevention (UMIP) is a security - feature in newer Intel processors. If enabled, a general - protection fault is issued if the SGDT, SLDT, SIDT, SMSW - or STR instructions are executed in user mode. These instructions - unnecessarily expose information about the hardware state. + User Mode Instruction Prevention (UMIP) is a security feature in + some x86 processors. If enabled, a general protection fault is + issued if the SGDT, SLDT, SIDT, SMSW or STR instructions are + executed in user mode. These instructions unnecessarily expose + information about the hardware state. The vast majority of applications do not use these instructions. For the very few that do, software emulation is provided in diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index a5ea841cc6d2..8e1d0bb46361 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -22,7 +22,7 @@ # define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31)) #endif -#ifdef CONFIG_X86_INTEL_UMIP +#ifdef CONFIG_X86_UMIP # define DISABLE_UMIP 0 #else # define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31)) diff --git a/arch/x86/include/asm/umip.h b/arch/x86/include/asm/umip.h index db43f2a0d92c..aeed98c3c9e1 100644 --- a/arch/x86/include/asm/umip.h +++ b/arch/x86/include/asm/umip.h @@ -4,9 +4,9 @@ #include #include -#ifdef CONFIG_X86_INTEL_UMIP +#ifdef CONFIG_X86_UMIP bool fixup_umip_exception(struct pt_regs *regs); #else static inline bool fixup_umip_exception(struct pt_regs *regs) { return false; } -#endif /* CONFIG_X86_INTEL_UMIP */ +#endif /* CONFIG_X86_UMIP */ #endif /* _ASM_X86_UMIP_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3578ad248bc9..52ce1e239525 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -134,7 +134,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o -obj-$(CONFIG_X86_INTEL_UMIP) += umip.o +obj-$(CONFIG_X86_UMIP) += umip.o obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index a5ea841cc6d2..8e1d0bb46361 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -22,7 +22,7 @@ # define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31)) #endif -#ifdef CONFIG_X86_INTEL_UMIP +#ifdef CONFIG_X86_UMIP # define DISABLE_UMIP 0 #else # define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31)) -- cgit v1.2.3 From db8c33f8b5bea59d00ca12dcd6b65d01b1ea98ef Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 16 Sep 2019 15:39:58 -0700 Subject: x86/cpu: Align the x86_capability array to size of unsigned long The x86_capability array in cpuinfo_x86 is of type u32 and thus is naturally aligned to 4 bytes. But, set_bit() and clear_bit() require the array to be aligned to size of unsigned long (i.e. 8 bytes on 64-bit systems). The array pointer is handed into atomic bit operations. If the access is not aligned to unsigned long then the atomic bit operations can end up crossing a cache line boundary, which causes the CPU to do a full bus lock as it can't lock both cache lines at once. The bus lock operation is heavy weight and can cause severe performance degradation. The upcoming #AC split lock detection mechanism will issue warnings for this kind of access. Force the alignment of the array to unsigned long. This avoids the massive code changes which would be required when converting the array data type to unsigned long. [ tglx: Rewrote changelog so it contains information WHY this is required ] Suggested-by: David Laight Suggested-by: Thomas Gleixner Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190916223958.27048-4-tony.luck@intel.com --- arch/x86/include/asm/processor.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 6e0a3b43d027..c073534ca485 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -93,7 +93,15 @@ struct cpuinfo_x86 { __u32 extended_cpuid_level; /* Maximum supported CPUID level, -1=no CPUID: */ int cpuid_level; - __u32 x86_capability[NCAPINTS + NBUGINTS]; + /* + * Align to size of unsigned long because the x86_capability array + * is passed to bitops which require the alignment. Use unnamed + * union to enforce the array is aligned to size of unsigned long. + */ + union { + __u32 x86_capability[NCAPINTS + NBUGINTS]; + unsigned long x86_capability_alignment; + }; char x86_vendor_id[16]; char x86_model_id[64]; /* in KB - valid for CPUS which support this call: */ -- cgit v1.2.3