From 4907c68abd3f60f650f98d5a69d4ec77c0bde44f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Oct 2018 12:38:26 +0200 Subject: x86/tsc: Force inlining of cyc2ns bits Looking at the asm for native_sched_clock() I noticed we don't inline enough. Mostly caused by sharing code with cyc2ns_read_begin(), which we didn't used to do. So mark all that __force_inline to make it DTRT. Fixes: 59eaef78bfea ("x86/tsc: Remodel cyc2ns to use seqcount_latch()") Reported-by: Eric Dumazet Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: hpa@zytor.com Cc: eric.dumazet@gmail.com Cc: bp@alien8.de Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20181011104019.695196158@infradead.org --- arch/x86/kernel/tsc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index b52bd2b6cdb4..6d5dc5dabfd7 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -58,7 +58,7 @@ struct cyc2ns { static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); -void cyc2ns_read_begin(struct cyc2ns_data *data) +void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data) { int seq, idx; @@ -75,7 +75,7 @@ void cyc2ns_read_begin(struct cyc2ns_data *data) } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence))); } -void cyc2ns_read_end(void) +void __always_inline cyc2ns_read_end(void) { preempt_enable_notrace(); } @@ -104,7 +104,7 @@ void cyc2ns_read_end(void) * -johnstul@us.ibm.com "math is hard, lets go shopping!" */ -static inline unsigned long long cycles_2_ns(unsigned long long cyc) +static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc) { struct cyc2ns_data data; unsigned long long ns; -- cgit v1.2.3 From b59167ac7bafd804c91e49ad53c6d33a7394d4c8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Oct 2018 12:38:27 +0200 Subject: x86/percpu: Fix this_cpu_read() Eric reported that a sequence count loop using this_cpu_read() got optimized out. This is wrong, this_cpu_read() must imply READ_ONCE() because the interface is IRQ-safe, therefore an interrupt can have changed the per-cpu value. Fixes: 7c3576d261ce ("[PATCH] i386: Convert PDA into the percpu section") Reported-by: Eric Dumazet Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Acked-by: Eric Dumazet Cc: hpa@zytor.com Cc: eric.dumazet@gmail.com Cc: bp@alien8.de Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20181011104019.748208519@infradead.org --- arch/x86/include/asm/percpu.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index e9202a0de8f0..1a19d11cfbbd 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -185,22 +185,22 @@ do { \ typeof(var) pfo_ret__; \ switch (sizeof(var)) { \ case 1: \ - asm(op "b "__percpu_arg(1)",%0" \ + asm volatile(op "b "__percpu_arg(1)",%0"\ : "=q" (pfo_ret__) \ : "m" (var)); \ break; \ case 2: \ - asm(op "w "__percpu_arg(1)",%0" \ + asm volatile(op "w "__percpu_arg(1)",%0"\ : "=r" (pfo_ret__) \ : "m" (var)); \ break; \ case 4: \ - asm(op "l "__percpu_arg(1)",%0" \ + asm volatile(op "l "__percpu_arg(1)",%0"\ : "=r" (pfo_ret__) \ : "m" (var)); \ break; \ case 8: \ - asm(op "q "__percpu_arg(1)",%0" \ + asm volatile(op "q "__percpu_arg(1)",%0"\ : "=r" (pfo_ret__) \ : "m" (var)); \ break; \ -- cgit v1.2.3 From 16561f27f94e6193ee8f5b9b74801e1668c86efc Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Oct 2018 16:21:18 -0700 Subject: x86/entry: Add some paranoid entry/exit CR3 handling comments Andi Kleen was just asking me about the NMI CR3 handling and why we restore it unconditionally. I was *sure* we had documented it well. We did not. Add some documentation. We have common entry code where the CR3 value is stashed, but three places in two big code paths where we restore it. I put bulk of the comments in this common path and then refer to it from the other spots. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Cc: luto@kernel.org Cc: bp@alien8.de Cc: "H. Peter Anvin" Cc: Borislav Petkov Link: https://lkml.kernel.org/r/20181012232118.3EAAE77B@viggo.jf.intel.com --- arch/x86/entry/entry_64.S | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 957dfb693ecc..1d9b4a300c8c 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1187,6 +1187,18 @@ ENTRY(paranoid_entry) xorl %ebx, %ebx 1: + /* + * Always stash CR3 in %r14. This value will be restored, + * verbatim, at exit. Needed if kernel is interrupted + * after switching to the user CR3 value but before + * returning to userspace. + * + * This is also why CS (stashed in the "iret frame" by the + * hardware at entry) can not be used: this may be a return + * to kernel code, but with a user CR3 value. The %ebx flag + * for SWAPGS is also unusable for CR3 because there is a + * window with a user GS and a kernel CR3. + */ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 ret @@ -1211,11 +1223,13 @@ ENTRY(paranoid_exit) testl %ebx, %ebx /* swapgs needed? */ jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ + /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG + /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 .Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel @@ -1626,6 +1640,7 @@ end_repeat_nmi: movq $-1, %rsi call do_nmi + /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 testl %ebx, %ebx /* swapgs needed? */ -- cgit v1.2.3 From 53c13ba8ed39e89f21a0b98f4c8a241bb44e483d Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Fri, 12 Oct 2018 17:53:12 -0700 Subject: x86/time: Correct the attribute on jiffies' definition Clang warns that the declaration of jiffies in include/linux/jiffies.h doesn't match the definition in arch/x86/time/kernel.c: arch/x86/kernel/time.c:29:42: warning: section does not match previous declaration [-Wsection] __visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES; ^ ./include/linux/cache.h:49:4: note: expanded from macro '__cacheline_aligned' __section__(".data..cacheline_aligned"))) ^ ./include/linux/jiffies.h:81:31: note: previous attribute is here extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; ^ ./arch/x86/include/asm/cache.h:20:2: note: expanded from macro '__cacheline_aligned_in_smp' __page_aligned_data ^ ./include/linux/linkage.h:39:29: note: expanded from macro '__page_aligned_data' #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) ^ ./include/linux/compiler_attributes.h:233:56: note: expanded from macro '__section' #define __section(S) __attribute__((__section__(#S))) ^ 1 warning generated. The declaration was changed in commit 7c30f352c852 ("jiffies.h: declare jiffies and jiffies_64 with ____cacheline_aligned_in_smp") but wasn't updated here. Make them match so Clang no longer warns. Fixes: 7c30f352c852 ("jiffies.h: declare jiffies and jiffies_64 with ____cacheline_aligned_in_smp") Signed-off-by: Nathan Chancellor Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Nick Desaulniers Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20181013005311.28617-1-natechancellor@gmail.com --- arch/x86/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index be01328eb755..fddaefc51fb6 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -25,7 +25,7 @@ #include #ifdef CONFIG_X86_64 -__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES; +__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; #endif unsigned long profile_pc(struct pt_regs *regs) -- cgit v1.2.3 From dca5203e3fe2f5d8bc8157b205d766142d3a6b77 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Fri, 12 Oct 2018 18:07:14 -0700 Subject: x86/boot: Add -Wno-pointer-sign to KBUILD_CFLAGS When compiling the kernel with Clang, this warning appears even though it is disabled for the whole kernel because this folder has its own set of KBUILD_CFLAGS. It was disabled before the beginning of git history. In file included from arch/x86/boot/compressed/kaslr.c:29: In file included from arch/x86/boot/compressed/misc.h:21: In file included from ./include/linux/elf.h:5: In file included from ./arch/x86/include/asm/elf.h:77: In file included from ./arch/x86/include/asm/vdso.h:11: In file included from ./include/linux/mm_types.h:9: In file included from ./include/linux/spinlock.h:88: In file included from ./arch/x86/include/asm/spinlock.h:43: In file included from ./arch/x86/include/asm/qrwlock.h:6: ./include/asm-generic/qrwlock.h:101:53: warning: passing 'u32 *' (aka 'unsigned int *') to parameter of type 'int *' converts between pointers to integer types with different sign [-Wpointer-sign] if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) ^~~~~ ./include/linux/compiler.h:76:40: note: expanded from macro 'likely' # define likely(x) __builtin_expect(!!(x), 1) ^ ./include/asm-generic/atomic-instrumented.h:69:66: note: passing argument to parameter 'old' here static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) ^ Signed-off-by: Nathan Chancellor Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Nick Desaulniers Link: https://lkml.kernel.org/r/20181013010713.6999-1-natechancellor@gmail.com --- arch/x86/boot/compressed/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 28764dacf018..466f66c8a7f8 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -37,6 +37,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding) KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) +KBUILD_CFLAGS += -Wno-pointer-sign KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n -- cgit v1.2.3 From 04f4f954b69526d7af8ffb8e5780f08b8a6cda2d Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Oct 2018 16:09:29 +0200 Subject: x86/entry/32: Clear the CS high bits Even if not on an entry stack, the CS's high bits must be initialized because they are unconditionally evaluated in PARANOID_EXIT_TO_KERNEL_MODE. Failing to do so broke the boot on Galileo Gen2 and IOT2000 boards. [ bp: Make the commit message tone passive and impartial. ] Fixes: b92a165df17e ("x86/entry/32: Handle Entry from Kernel-Mode on Entry-Stack") Signed-off-by: Jan Kiszka Signed-off-by: Borislav Petkov Reviewed-by: Joerg Roedel Acked-by: Joerg Roedel CC: "H. Peter Anvin" CC: Andrea Arcangeli CC: Andy Lutomirski CC: Boris Ostrovsky CC: Brian Gerst CC: Dave Hansen CC: David Laight CC: Denys Vlasenko CC: Eduardo Valentin CC: Greg KH CC: Ingo Molnar CC: Jiri Kosina CC: Josh Poimboeuf CC: Juergen Gross CC: Linus Torvalds CC: Peter Zijlstra CC: Thomas Gleixner CC: Will Deacon CC: aliguori@amazon.com CC: daniel.gruss@iaik.tugraz.at CC: hughd@google.com CC: keescook@google.com CC: linux-mm CC: x86-ml Link: http://lkml.kernel.org/r/f271c747-1714-5a5b-a71f-ae189a093b8d@siemens.com Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_32.S | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 2767c625a52c..fbbf1ba57ec6 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -389,6 +389,13 @@ * that register for the time this macro runs */ + /* + * The high bits of the CS dword (__csh) are used for + * CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case + * hardware didn't do this for us. + */ + andl $(0x0000ffff), PT_CS(%esp) + /* Are we on the entry stack? Bail out if not! */ movl PER_CPU_VAR(cpu_entry_area), %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx @@ -407,12 +414,6 @@ /* Load top of task-stack into %edi */ movl TSS_entry2task_stack(%edi), %edi - /* - * Clear unused upper bits of the dword containing the word-sized CS - * slot in pt_regs in case hardware didn't clear it for us. - */ - andl $(0x0000ffff), PT_CS(%esp) - /* Special case - entry from kernel mode via entry stack */ #ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS -- cgit v1.2.3 From ae852495be365f6be433dde6629d3f0316f8efde Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sun, 14 Oct 2018 11:38:18 -0700 Subject: x86/entry/64: Further improve paranoid_entry comments Commit: 16561f27f94e ("x86/entry: Add some paranoid entry/exit CR3 handling comments") ... added some comments. This improves them a bit: - When I first read the new comments, it was unclear to me whether they were referring to the case where paranoid_entry interrupted other entry code or where paranoid_entry was itself interrupted. Clarify it. - Remove the EBX comment. We no longer use EBX as a SWAPGS indicator. Signed-off-by: Andy Lutomirski Acked-by: Thomas Gleixner Cc: Borislav Petkov Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/c47daa1888dc2298e7e1d3f82bd76b776ea33393.1539542111.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_64.S | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 1d9b4a300c8c..f95dcb209fdf 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1189,15 +1189,13 @@ ENTRY(paranoid_entry) 1: /* * Always stash CR3 in %r14. This value will be restored, - * verbatim, at exit. Needed if kernel is interrupted - * after switching to the user CR3 value but before - * returning to userspace. + * verbatim, at exit. Needed if paranoid_entry interrupted + * another entry that already switched to the user CR3 value + * but has not yet returned to userspace. * * This is also why CS (stashed in the "iret frame" by the * hardware at entry) can not be used: this may be a return - * to kernel code, but with a user CR3 value. The %ebx flag - * for SWAPGS is also unusable for CR3 because there is a - * window with a user GS and a kernel CR3. + * to kernel code, but with a user CR3 value. */ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 -- cgit v1.2.3 From 6aa676761d4c1acfa31320e55fa1f83f3fcbbc7a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 16 Oct 2018 22:25:24 +0200 Subject: x86/fpu: Remove second definition of fpu in __fpu__restore_sig() Commit: c5bedc6847c3b ("x86/fpu: Get rid of PF_USED_MATH usage, convert it to fpu->fpstate_active") introduced the 'fpu' variable at top of __restore_xstate_sig(), which now shadows the other definition: arch/x86/kernel/fpu/signal.c:318:28: warning: symbol 'fpu' shadows an earlier one arch/x86/kernel/fpu/signal.c:271:20: originally declared here Remove the shadowed definition of 'fpu', as the two definitions are the same. Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: c5bedc6847c3b ("x86/fpu: Get rid of PF_USED_MATH usage, convert it to fpu->fpstate_active") Link: http://lkml.kernel.org/r/20181016202525.29437-3-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/fpu/signal.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 23f1691670b6..61a949d84dfa 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -314,7 +314,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) * thread's fpu state, reconstruct fxstate from the fsave * header. Validate and sanitize the copied state. */ - struct fpu *fpu = &tsk->thread.fpu; struct user_i387_ia32_struct env; int err = 0; -- cgit v1.2.3 From 2224d616528194b02424c91c2ee254b3d29942c3 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 16 Oct 2018 22:25:25 +0200 Subject: x86/fpu: Fix i486 + no387 boot crash by only saving FPU registers on context switch if there is an FPU Booting an i486 with "no387 nofxsr" ends with with the following crash: math_emulate: 0060:c101987d Kernel panic - not syncing: Math emulation needed in kernel on the first context switch in user land. The reason is that copy_fpregs_to_fpstate() tries FNSAVE which does not work as the FPU is turned off. This bug was introduced in: f1c8cd0176078 ("x86/fpu: Change fpu->fpregs_active users to fpu->fpstate_active") Add a check for X86_FEATURE_FPU before trying to save FPU registers (we have such a check in switch_fpu_finish() already). Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@vger.kernel.org Fixes: f1c8cd0176078 ("x86/fpu: Change fpu->fpregs_active users to fpu->fpstate_active") Link: http://lkml.kernel.org/r/20181016202525.29437-4-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu/internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index a38bf5a1e37a..69dcdf195b61 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -528,7 +528,7 @@ static inline void fpregs_activate(struct fpu *fpu) static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) { - if (old_fpu->initialized) { + if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) { if (!copy_fpregs_to_fpstate(old_fpu)) old_fpu->last_cpu = -1; else -- cgit v1.2.3 From 485734f3fc77c1eb77ffe138c027b9a4bf0178f3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 14 Oct 2018 09:52:08 +0200 Subject: x86/swiotlb: Enable swiotlb for > 4GiG RAM on 32-bit kernels We already build the swiotlb code for 32-bit kernels with PAE support, but the code to actually use swiotlb has only been enabled for 64-bit kernels for an unknown reason. Before Linux v4.18 we paper over this fact because the networking code, the SCSI layer and some random block drivers implemented their own bounce buffering scheme. [ mingo: Changelog fixes. ] Fixes: 21e07dba9fb1 ("scsi: reduce use of block bounce buffers") Fixes: ab74cfebafa3 ("net: remove the PCI_DMA_BUS_IS_PHYS check in illegal_highdma") Reported-by: Matthew Whitehead Signed-off-by: Christoph Hellwig Signed-off-by: Thomas Gleixner Tested-by: Matthew Whitehead Cc: konrad.wilk@oracle.com Cc: iommu@lists.linux-foundation.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20181014075208.2715-1-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-swiotlb.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 661583662430..71c0b01d93b1 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -42,10 +42,8 @@ IOMMU_INIT_FINISH(pci_swiotlb_detect_override, int __init pci_swiotlb_detect_4gb(void) { /* don't initialize swiotlb if iommu=off (no_iommu=1) */ -#ifdef CONFIG_X86_64 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN) swiotlb = 1; -#endif /* * If SME is active then swiotlb will be set to 1 so that bounce -- cgit v1.2.3