summaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-11 13:54:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-11 13:54:00 -0700
commit753c8d9b7d81206bb5d011b28abe829d364b028e (patch)
treedacd394bc1fa375be9f214934b1ab3daadfd7360 /arch/x86/include
parentd7fe42a64a19a4140fb94bcf996035319cd3e6b9 (diff)
parentcbf5b73d162b22e044fe0b7d51dcaa33be065253 (diff)
downloadlinux-753c8d9b7d81206bb5d011b28abe829d364b028e.tar.bz2
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A collection of assorted fixes: - Fix for the pinned cr0/4 fallout which escaped all testing efforts because the kvm-intel module was never loaded when the kernel was compiled with CONFIG_PARAVIRT=n. The cr0/4 accessors are moved out of line and static key is now solely used in the core code and therefore can stay in the RO after init section. So the kvm-intel and other modules do not longer reference the (read only) static key which the module loader tried to update. - Prevent an infinite loop in arch_stack_walk_user() by breaking out of the loop once the return address is detected to be 0. - Prevent the int3_emulate_call() selftest from corrupting the stack when KASAN is enabled. KASASN clobbers more registers than covered by the emulated call implementation. Convert the int3_magic() selftest to a ASM function so the compiler cannot KASANify it. - Unbreak the build with old GCC versions and with the Gold linker by reverting the 'Move of _etext to the actual end of .text'. In both cases the build fails with 'Invalid absolute R_X86_64_32S relocation: _etext' - Initialize the context lock for init_mm, which was never an issue until the alternatives code started to use a temporary mm for patching. - Fix a build warning vs. the LOWMEM_PAGES constant where clang complains rightfully about a signed integer overflow in the shift operation by converting the operand to an ULL. - Adjust the misnamed ENDPROC() of common_spurious in the 32bit entry code" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/stacktrace: Prevent infinite loop in arch_stack_walk_user() x86/asm: Move native_write_cr0/4() out of line x86/pgtable/32: Fix LOWMEM_PAGES constant x86/alternatives: Fix int3_emulate_call() selftest stack corruption x86/entry/32: Fix ENDPROC of common_spurious Revert "x86/build: Move _etext to actual end of .text" x86/ldt: Initialize the context lock for init_mm
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/mmu.h1
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/special_insns.h41
4 files changed, 5 insertions, 40 deletions
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 5ff3e8af2c20..e78c7db87801 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -59,6 +59,7 @@ typedef struct {
#define INIT_MM_CONTEXT(mm) \
.context = { \
.ctx_id = 1, \
+ .lock = __MUTEX_INITIALIZER(mm.context.lock), \
}
void leave_mm(int cpu);
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 4fe9e7fc74d3..c78da8eda8f2 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -106,6 +106,6 @@ do { \
* with only a host target support using a 32-bit type for internal
* representation.
*/
-#define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
+#define LOWMEM_PAGES ((((_ULL(2)<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
#endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3eab6ece52b4..6e0a3b43d027 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -741,6 +741,7 @@ extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void);
+extern void cr4_init(void);
static inline unsigned long get_debugctlmsr(void)
{
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index b2e84d113f2a..219be88a59d2 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -18,9 +18,7 @@
*/
extern unsigned long __force_order;
-/* Starts false and gets enabled once CPU feature detection is done. */
-DECLARE_STATIC_KEY_FALSE(cr_pinning);
-extern unsigned long cr4_pinned_bits;
+void native_write_cr0(unsigned long val);
static inline unsigned long native_read_cr0(void)
{
@@ -29,24 +27,6 @@ static inline unsigned long native_read_cr0(void)
return val;
}
-static inline void native_write_cr0(unsigned long val)
-{
- unsigned long bits_missing = 0;
-
-set_register:
- asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
-
- if (static_branch_likely(&cr_pinning)) {
- if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
- bits_missing = X86_CR0_WP;
- val |= bits_missing;
- goto set_register;
- }
- /* Warn after we've set the missing bits. */
- WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
- }
-}
-
static inline unsigned long native_read_cr2(void)
{
unsigned long val;
@@ -91,24 +71,7 @@ static inline unsigned long native_read_cr4(void)
return val;
}
-static inline void native_write_cr4(unsigned long val)
-{
- unsigned long bits_missing = 0;
-
-set_register:
- asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
-
- if (static_branch_likely(&cr_pinning)) {
- if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
- bits_missing = ~val & cr4_pinned_bits;
- val |= bits_missing;
- goto set_register;
- }
- /* Warn after we've set the missing bits. */
- WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
- bits_missing);
- }
-}
+void native_write_cr4(unsigned long val);
#ifdef CONFIG_X86_64
static inline unsigned long native_read_cr8(void)