From a9cfccee6604854aebc70215610b9788667f4fec Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 4 Jun 2014 13:16:48 -0700 Subject: x86, build: Change code16gcc.h from a C header to an assembly header By changing code16gcc.h from a C header to an assembly header and use the -Wa,... option to gcc to force it to be added to the assembly input, we can avoid the problems with gcc reordering code bits on us. If we have -m16, we still use it, of course. Suggested-by: Kevin O'Connor Signed-off-by: H. Peter Anvin Link: http://lkml.kernel.org/n/tip-xw8ibgdemucl9fz3i1bymu6w@git.kernel.org --- arch/x86/Makefile | 9 +++------ arch/x86/boot/code16gcc.h | 24 ++++++++++-------------- 2 files changed, 13 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 602f57e590b5..a98cc9064d8b 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -15,12 +15,9 @@ endif # that way we can complain to the user if the CPU is insufficient. # # The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For -# older versions of GCC, we need to play evil and unreliable tricks to -# attempt to ensure that our asm(".code16gcc") is first in the asm -# output. -CODE16GCC_CFLAGS := -m32 -include $(srctree)/arch/x86/boot/code16gcc.h \ - $(call cc-option, -fno-toplevel-reorder,\ - $(call cc-option, -fno-unit-at-a-time)) +# older versions of GCC, include an *assembly* header to make sure that +# gcc doesn't play any games behind our back. +CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS)) REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \ diff --git a/arch/x86/boot/code16gcc.h b/arch/x86/boot/code16gcc.h index d93e48010b61..5ff426535397 100644 --- a/arch/x86/boot/code16gcc.h +++ b/arch/x86/boot/code16gcc.h @@ -1,15 +1,11 @@ -/* - * code16gcc.h - * - * This file is -include'd when compiling 16-bit C code. - * Note: this asm() needs to be emitted before gcc emits any code. - * Depending on gcc version, this requires -fno-unit-at-a-time or - * -fno-toplevel-reorder. - * - * Hopefully gcc will eventually have a real -m16 option so we can - * drop this hack long term. - */ +# +# code16gcc.h +# +# This file is added to the assembler via -Wa when compiling 16-bit C code. +# This is done this way instead via asm() to make sure gcc does not reorder +# things around us. +# +# gcc 4.9+ has a real -m16 option so we can drop this hack long term. +# -#ifndef __ASSEMBLY__ -asm(".code16gcc"); -#endif + .code16gcc -- cgit v1.2.3 From 891715793f0451e5114d200be932ac14ce8521a3 Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Thu, 19 Jun 2014 03:58:36 +0200 Subject: x86/tsc: Get rid of custom DIV_ROUND() macro When invoced for positive values, DIV_ROUND macro defined in arch/x86/kernel/tsc.c behaves exactly like DIV_ROUND_CLOSEST from include/linux/kernel.h file, so remove the custom macro in favour of the shared one. [ hpa: changed line breaks ] Signed-off-by: Michal Nazarewicz Link: http://lkml.kernel.org/r/1403143116-21755-1-git-send-email-mina86@mina86.com Acked-by: David Rientjes Signed-off-by: H. Peter Anvin --- arch/x86/kernel/tsc.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 57e5ce126d5a..8764232bf0f1 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -234,9 +234,6 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) return ns; } -/* XXX surely we already have this someplace in the kernel?! */ -#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d)) - static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) { unsigned long long tsc_now, ns_now; @@ -259,7 +256,9 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) * time function is continuous; see the comment near struct * cyc2ns_data. */ - data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz); + data->cyc2ns_mul = + DIV_ROUND_CLOSEST(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, + cpu_khz); data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; data->cyc2ns_offset = ns_now - mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); -- cgit v1.2.3 From b08ee5f7e4135d64b8edd769367f8964a725122e Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 11 Jul 2014 12:43:38 +0200 Subject: x86: Simplify __HAVE_ARCH_CMPXCHG tests Both the 32-bit and 64-bit cmpxchg.h header define __HAVE_ARCH_CMPXCHG and there's ifdeffery which checks it. But since both bitness define it, we can just as well move it up to the main cmpxchg header and simpify a bit of code in doing that. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/20140711104338.GB17083@pd.tnic Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/cmpxchg.h | 4 ++-- arch/x86/include/asm/cmpxchg_32.h | 2 -- arch/x86/include/asm/cmpxchg_64.h | 2 -- arch/x86/include/asm/mc146818rtc.h | 2 +- arch/x86/include/asm/mutex_32.h | 16 ++-------------- arch/x86/kernel/acpi/boot.c | 4 ---- 6 files changed, 5 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index d47786acb016..99c105d78b7e 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -4,6 +4,8 @@ #include #include /* Provides LOCK_PREFIX */ +#define __HAVE_ARCH_CMPXCHG 1 + /* * Non-existant functions to indicate usage errors at link time * (or compile-time if the compiler implements __compiletime_error(). @@ -143,7 +145,6 @@ extern void __add_wrong_size(void) # include #endif -#ifdef __HAVE_ARCH_CMPXCHG #define cmpxchg(ptr, old, new) \ __cmpxchg(ptr, old, new, sizeof(*(ptr))) @@ -152,7 +153,6 @@ extern void __add_wrong_size(void) #define cmpxchg_local(ptr, old, new) \ __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) -#endif /* * xadd() adds "inc" to "*ptr" and atomically returns the previous diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index f8bf2eecab86..f7e142926481 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -34,8 +34,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) : "memory"); } -#define __HAVE_ARCH_CMPXCHG 1 - #ifdef CONFIG_X86_CMPXCHG64 #define cmpxchg64(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 614be87f1a9b..1af94697aae5 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h @@ -6,8 +6,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) *ptr = val; } -#define __HAVE_ARCH_CMPXCHG 1 - #define cmpxchg64(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h index a55c7efcc4ed..0f555cc31984 100644 --- a/arch/x86/include/asm/mc146818rtc.h +++ b/arch/x86/include/asm/mc146818rtc.h @@ -13,7 +13,7 @@ #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ #endif -#if defined(CONFIG_X86_32) && defined(__HAVE_ARCH_CMPXCHG) +#if defined(CONFIG_X86_32) /* * This lock provides nmi access to the CMOS/RTC registers. It has some * special properties. It is owned by a CPU and stores the index register diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h index 0208c3c2cbc6..85e6cda45a02 100644 --- a/arch/x86/include/asm/mutex_32.h +++ b/arch/x86/include/asm/mutex_32.h @@ -100,23 +100,11 @@ do { \ static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - /* - * We have two variants here. The cmpxchg based one is the best one - * because it never induce a false contention state. It is included - * here because architectures using the inc/dec algorithms over the - * xchg ones are much more likely to support cmpxchg natively. - * - * If not we fall back to the spinlock based variant - that is - * just as efficient (and simpler) as a 'destructive' probing of - * the mutex state would be. - */ -#ifdef __HAVE_ARCH_CMPXCHG + /* cmpxchg because it never induces a false contention state. */ if (likely(atomic_cmpxchg(count, 1, 0) == 1)) return 1; + return 0; -#else - return fail_fn(count); -#endif } #endif /* _ASM_X86_MUTEX_32_H */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 86281ffb96d6..a531f6564ed0 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -74,10 +74,6 @@ int acpi_fix_pin2_polarity __initdata; static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; #endif -#ifndef __HAVE_ARCH_CMPXCHG -#warning ACPI uses CMPXCHG, i486 and later hardware -#endif - /* -------------------------------------------------------------------------- Boot-time Configuration -------------------------------------------------------------------------- */ -- cgit v1.2.3 From 3bab13b015a255b4b812c02670384d7d99a9ca34 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 25 Jun 2014 14:11:22 +0100 Subject: x86/debug: Drop several unnecessary CFI annotations With the conversion of the register saving code from macros to functions, and with those functions not clobbering most of the registers they spill, there's no need to annotate most of the spill operations; the only exceptions being %rbx (always modified) and %rcx (modified on the error_kernelspace: path). Also remove a bogus commented out annotation - there's no register %orig_rax after all. Signed-off-by: Jan Beulich Link: http://lkml.kernel.org/r/53AAE69A020000780001D3C7@mail.emea.novell.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_64.S | 52 +++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b25ca969edd2..f72c03a1ac54 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -207,7 +207,6 @@ ENDPROC(native_usergs_sysret64) */ .macro XCPT_FRAME start=1 offset=0 INTR_FRAME \start, RIP+\offset-ORIG_RAX - /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ .endm /* @@ -287,21 +286,21 @@ ENDPROC(native_usergs_sysret64) ENTRY(save_paranoid) XCPT_FRAME 1 RDI+8 cld - movq_cfi rdi, RDI+8 - movq_cfi rsi, RSI+8 + movq %rdi, RDI+8(%rsp) + movq %rsi, RSI+8(%rsp) movq_cfi rdx, RDX+8 movq_cfi rcx, RCX+8 movq_cfi rax, RAX+8 - movq_cfi r8, R8+8 - movq_cfi r9, R9+8 - movq_cfi r10, R10+8 - movq_cfi r11, R11+8 + movq %r8, R8+8(%rsp) + movq %r9, R9+8(%rsp) + movq %r10, R10+8(%rsp) + movq %r11, R11+8(%rsp) movq_cfi rbx, RBX+8 - movq_cfi rbp, RBP+8 - movq_cfi r12, R12+8 - movq_cfi r13, R13+8 - movq_cfi r14, R14+8 - movq_cfi r15, R15+8 + movq %rbp, RBP+8(%rsp) + movq %r12, R12+8(%rsp) + movq %r13, R13+8(%rsp) + movq %r14, R14+8(%rsp) + movq %r15, R15+8(%rsp) movl $1,%ebx movl $MSR_GS_BASE,%ecx rdmsr @@ -1395,21 +1394,21 @@ ENTRY(error_entry) CFI_ADJUST_CFA_OFFSET 15*8 /* oldrax contains error code */ cld - movq_cfi rdi, RDI+8 - movq_cfi rsi, RSI+8 - movq_cfi rdx, RDX+8 - movq_cfi rcx, RCX+8 - movq_cfi rax, RAX+8 - movq_cfi r8, R8+8 - movq_cfi r9, R9+8 - movq_cfi r10, R10+8 - movq_cfi r11, R11+8 + movq %rdi, RDI+8(%rsp) + movq %rsi, RSI+8(%rsp) + movq %rdx, RDX+8(%rsp) + movq %rcx, RCX+8(%rsp) + movq %rax, RAX+8(%rsp) + movq %r8, R8+8(%rsp) + movq %r9, R9+8(%rsp) + movq %r10, R10+8(%rsp) + movq %r11, R11+8(%rsp) movq_cfi rbx, RBX+8 - movq_cfi rbp, RBP+8 - movq_cfi r12, R12+8 - movq_cfi r13, R13+8 - movq_cfi r14, R14+8 - movq_cfi r15, R15+8 + movq %rbp, RBP+8(%rsp) + movq %r12, R12+8(%rsp) + movq %r13, R13+8(%rsp) + movq %r14, R14+8(%rsp) + movq %r15, R15+8(%rsp) xorl %ebx,%ebx testl $3,CS+8(%rsp) je error_kernelspace @@ -1427,6 +1426,7 @@ error_sti: * compat mode. Check for these here too. */ error_kernelspace: + CFI_REL_OFFSET rcx, RCX+8 incl %ebx leaq irq_return_iret(%rip),%rcx cmpq %rcx,RIP+8(%rsp) -- cgit v1.2.3