diff options
Diffstat (limited to 'arch/x86/include')
25 files changed, 564 insertions, 259 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 722aa3b04624..da31c8b8a92d 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -6,6 +6,7 @@ #include <asm/processor.h> #include <asm/alternative.h> #include <asm/cmpxchg.h> +#include <asm/rmwcc.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -76,12 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v) */ static inline int atomic_sub_and_test(int i, atomic_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" - : "+m" (v->counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; + GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e"); } /** @@ -118,12 +114,7 @@ static inline void atomic_dec(atomic_t *v) */ static inline int atomic_dec_and_test(atomic_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "decl %0; sete %1" - : "+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; + GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); } /** @@ -136,12 +127,7 @@ static inline int atomic_dec_and_test(atomic_t *v) */ static inline int atomic_inc_and_test(atomic_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "incl %0; sete %1" - : "+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; + GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); } /** @@ -155,12 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v) */ static inline int atomic_add_negative(int i, atomic_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" - : "+m" (v->counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; + GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 0e1cbfc8ee06..3f065c985aee 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -72,12 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) */ static inline int atomic64_sub_and_test(long i, atomic64_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" - : "=m" (v->counter), "=qm" (c) - : "er" (i), "m" (v->counter) : "memory"); - return c; + GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e"); } /** @@ -116,12 +111,7 @@ static inline void atomic64_dec(atomic64_t *v) */ static inline int atomic64_dec_and_test(atomic64_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "decq %0; sete %1" - : "=m" (v->counter), "=qm" (c) - : "m" (v->counter) : "memory"); - return c != 0; + GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e"); } /** @@ -134,12 +124,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v) */ static inline int atomic64_inc_and_test(atomic64_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "incq %0; sete %1" - : "=m" (v->counter), "=qm" (c) - : "m" (v->counter) : "memory"); - return c != 0; + GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e"); } /** @@ -153,12 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v) */ static inline int atomic64_add_negative(long i, atomic64_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" - : "=m" (v->counter), "=qm" (c) - : "er" (i), "m" (v->counter) : "memory"); - return c; + GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 41639ce8fd63..6d76d0935989 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -14,6 +14,7 @@ #include <linux/compiler.h> #include <asm/alternative.h> +#include <asm/rmwcc.h> #if BITS_PER_LONG == 32 # define _BITOPS_LONG_SHIFT 5 @@ -204,12 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_set_bit(long nr, volatile unsigned long *addr) { - int oldbit; - - asm volatile(LOCK_PREFIX "bts %2,%1\n\t" - "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); - - return oldbit; + GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c"); } /** @@ -255,13 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) { - int oldbit; - - asm volatile(LOCK_PREFIX "btr %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); - - return oldbit; + GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c"); } /** @@ -314,13 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_change_bit(long nr, volatile unsigned long *addr) { - int oldbit; - - asm volatile(LOCK_PREFIX "btc %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); - - return oldbit; + GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c"); } static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index 0fa675033912..cb4c73bfeb48 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h @@ -48,6 +48,8 @@ For 32-bit we have the following conventions - kernel is built with #include <asm/dwarf2.h> +#ifdef CONFIG_X86_64 + /* * 64-bit system call stack frame layout defines and helpers, * for assembly code: @@ -192,3 +194,51 @@ For 32-bit we have the following conventions - kernel is built with .macro icebp .byte 0xf1 .endm + +#else /* CONFIG_X86_64 */ + +/* + * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These + * are different from the entry_32.S versions in not changing the segment + * registers. So only suitable for in kernel use, not when transitioning + * from or to user space. The resulting stack frame is not a standard + * pt_regs frame. The main use case is calling C code from assembler + * when all the registers need to be preserved. + */ + + .macro SAVE_ALL + pushl_cfi %eax + CFI_REL_OFFSET eax, 0 + pushl_cfi %ebp + CFI_REL_OFFSET ebp, 0 + pushl_cfi %edi + CFI_REL_OFFSET edi, 0 + pushl_cfi %esi + CFI_REL_OFFSET esi, 0 + pushl_cfi %edx + CFI_REL_OFFSET edx, 0 + pushl_cfi %ecx + CFI_REL_OFFSET ecx, 0 + pushl_cfi %ebx + CFI_REL_OFFSET ebx, 0 + .endm + + .macro RESTORE_ALL + popl_cfi %ebx + CFI_RESTORE ebx + popl_cfi %ecx + CFI_RESTORE ecx + popl_cfi %edx + CFI_RESTORE edx + popl_cfi %esi + CFI_RESTORE esi + popl_cfi %edi + CFI_RESTORE edi + popl_cfi %ebp + CFI_RESTORE ebp + popl_cfi %eax + CFI_RESTORE eax + .endm + +#endif /* CONFIG_X86_64 */ + diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 0062a0125041..65c6e6e3a552 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -109,6 +109,8 @@ static inline bool efi_is_native(void) return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); } +extern struct console early_efi_console; + #else /* * IF EFI is not configured, have the EFI calls return -ENOSYS. diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h new file mode 100644 index 000000000000..459769d39263 --- /dev/null +++ b/arch/x86/include/asm/intel-mid.h @@ -0,0 +1,113 @@ +/* + * intel-mid.h: Intel MID specific setup code + * + * (C) Copyright 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _ASM_X86_INTEL_MID_H +#define _ASM_X86_INTEL_MID_H + +#include <linux/sfi.h> +#include <linux/platform_device.h> + +extern int intel_mid_pci_init(void); +extern int get_gpio_by_name(const char *name); +extern void intel_scu_device_register(struct platform_device *pdev); +extern int __init sfi_parse_mrtc(struct sfi_table_header *table); +extern int __init sfi_parse_mtmr(struct sfi_table_header *table); +extern int sfi_mrtc_num; +extern struct sfi_rtc_table_entry sfi_mrtc_array[]; + +/* + * Here defines the array of devices platform data that IAFW would export + * through SFI "DEVS" table, we use name and type to match the device and + * its platform data. + */ +struct devs_id { + char name[SFI_NAME_LEN + 1]; + u8 type; + u8 delay; + void *(*get_platform_data)(void *info); + /* Custom handler for devices */ + void (*device_handler)(struct sfi_device_table_entry *pentry, + struct devs_id *dev); +}; + +#define sfi_device(i) \ + static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \ + __attribute__((__section__(".x86_intel_mid_dev.init"))) = &i + +/* + * Medfield is the follow-up of Moorestown, it combines two chip solution into + * one. Other than that it also added always-on and constant tsc and lapic + * timers. Medfield is the platform name, and the chip name is called Penwell + * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be + * identified via MSRs. + */ +enum intel_mid_cpu_type { + /* 1 was Moorestown */ + INTEL_MID_CPU_CHIP_PENWELL = 2, +}; + +extern enum intel_mid_cpu_type __intel_mid_cpu_chip; + +#ifdef CONFIG_X86_INTEL_MID + +static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) +{ + return __intel_mid_cpu_chip; +} + +static inline bool intel_mid_has_msic(void) +{ + return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL); +} + +#else /* !CONFIG_X86_INTEL_MID */ + +#define intel_mid_identify_cpu() (0) +#define intel_mid_has_msic() (0) + +#endif /* !CONFIG_X86_INTEL_MID */ + +enum intel_mid_timer_options { + INTEL_MID_TIMER_DEFAULT, + INTEL_MID_TIMER_APBT_ONLY, + INTEL_MID_TIMER_LAPIC_APBT, +}; + +extern enum intel_mid_timer_options intel_mid_timer_options; + +/* + * Penwell uses spread spectrum clock, so the freq number is not exactly + * the same as reported by MSR based on SDM. + */ +#define PENWELL_FSB_FREQ_83SKU 83200 +#define PENWELL_FSB_FREQ_100SKU 99840 + +#define SFI_MTMR_MAX_NUM 8 +#define SFI_MRTC_MAX 8 + +extern struct console early_mrst_console; +extern void mrst_early_console_init(void); + +extern struct console early_hsu_console; +extern void hsu_early_console_init(const char *); + +extern void intel_scu_devices_create(void); +extern void intel_scu_devices_destroy(void); + +/* VRTC timer */ +#define MRST_VRTC_MAP_SZ (1024) +/*#define MRST_VRTC_PGOFFSET (0xc00) */ + +extern void intel_mid_rtc_init(void); + +/* the offset for the mapping of global gpio pin to irq */ +#define INTEL_MID_IRQ_OFFSET 0x100 + +#endif /* _ASM_X86_INTEL_MID_H */ diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h index 1e69a75412a4..86ff4685c409 100644 --- a/arch/x86/include/asm/mrst-vrtc.h +++ b/arch/x86/include/asm/intel_mid_vrtc.h @@ -1,5 +1,5 @@ -#ifndef _MRST_VRTC_H -#define _MRST_VRTC_H +#ifndef _INTEL_MID_VRTC_H +#define _INTEL_MID_VRTC_H extern unsigned char vrtc_cmos_read(unsigned char reg); extern void vrtc_cmos_write(unsigned char val, unsigned char reg); diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 2d89e3980cbd..5b23e605e707 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -52,12 +52,7 @@ static inline void local_sub(long i, local_t *l) */ static inline int local_sub_and_test(long i, local_t *l) { - unsigned char c; - - asm volatile(_ASM_SUB "%2,%0; sete %1" - : "+m" (l->a.counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; + GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e"); } /** @@ -70,12 +65,7 @@ static inline int local_sub_and_test(long i, local_t *l) */ static inline int local_dec_and_test(local_t *l) { - unsigned char c; - - asm volatile(_ASM_DEC "%0; sete %1" - : "+m" (l->a.counter), "=qm" (c) - : : "memory"); - return c != 0; + GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e"); } /** @@ -88,12 +78,7 @@ static inline int local_dec_and_test(local_t *l) */ static inline int local_inc_and_test(local_t *l) { - unsigned char c; - - asm volatile(_ASM_INC "%0; sete %1" - : "+m" (l->a.counter), "=qm" (c) - : : "memory"); - return c != 0; + GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e"); } /** @@ -107,12 +92,7 @@ static inline int local_inc_and_test(local_t *l) */ static inline int local_add_negative(long i, local_t *l) { - unsigned char c; - - asm volatile(_ASM_ADD "%2,%0; sets %1" - : "+m" (l->a.counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; + GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index cbe6b9e404ce..c696a8687567 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -16,6 +16,7 @@ #define MCG_EXT_CNT_SHIFT 16 #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ +#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */ /* MCG_STATUS register defines */ #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ diff --git a/arch/x86/include/asm/misc.h b/arch/x86/include/asm/misc.h new file mode 100644 index 000000000000..475f5bbc7f53 --- /dev/null +++ b/arch/x86/include/asm/misc.h @@ -0,0 +1,6 @@ +#ifndef _ASM_X86_MISC_H +#define _ASM_X86_MISC_H + +int num_digits(int val); + +#endif /* _ASM_X86_MISC_H */ diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h deleted file mode 100644 index fc18bf3ce7c8..000000000000 --- a/arch/x86/include/asm/mrst.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * mrst.h: Intel Moorestown platform specific setup code - * - * (C) Copyright 2009 Intel Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - */ -#ifndef _ASM_X86_MRST_H -#define _ASM_X86_MRST_H - -#include <linux/sfi.h> - -extern int pci_mrst_init(void); -extern int __init sfi_parse_mrtc(struct sfi_table_header *table); -extern int sfi_mrtc_num; -extern struct sfi_rtc_table_entry sfi_mrtc_array[]; - -/* - * Medfield is the follow-up of Moorestown, it combines two chip solution into - * one. Other than that it also added always-on and constant tsc and lapic - * timers. Medfield is the platform name, and the chip name is called Penwell - * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be - * identified via MSRs. - */ -enum mrst_cpu_type { - /* 1 was Moorestown */ - MRST_CPU_CHIP_PENWELL = 2, -}; - -extern enum mrst_cpu_type __mrst_cpu_chip; - -#ifdef CONFIG_X86_INTEL_MID - -static inline enum mrst_cpu_type mrst_identify_cpu(void) -{ - return __mrst_cpu_chip; -} - -#else /* !CONFIG_X86_INTEL_MID */ - -#define mrst_identify_cpu() (0) - -#endif /* !CONFIG_X86_INTEL_MID */ - -enum mrst_timer_options { - MRST_TIMER_DEFAULT, - MRST_TIMER_APBT_ONLY, - MRST_TIMER_LAPIC_APBT, -}; - -extern enum mrst_timer_options mrst_timer_options; - -/* - * Penwell uses spread spectrum clock, so the freq number is not exactly - * the same as reported by MSR based on SDM. - */ -#define PENWELL_FSB_FREQ_83SKU 83200 -#define PENWELL_FSB_FREQ_100SKU 99840 - -#define SFI_MTMR_MAX_NUM 8 -#define SFI_MRTC_MAX 8 - -extern struct console early_mrst_console; -extern void mrst_early_console_init(void); - -extern struct console early_hsu_console; -extern void hsu_early_console_init(const char *); - -extern void intel_scu_devices_create(void); -extern void intel_scu_devices_destroy(void); - -/* VRTC timer */ -#define MRST_VRTC_MAP_SZ (1024) -/*#define MRST_VRTC_PGOFFSET (0xc00) */ - -extern void mrst_rtc_init(void); - -#endif /* _ASM_X86_MRST_H */ diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h new file mode 100644 index 000000000000..8729723636fd --- /dev/null +++ b/arch/x86/include/asm/preempt.h @@ -0,0 +1,100 @@ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include <asm/rmwcc.h> +#include <asm/percpu.h> +#include <linux/thread_info.h> + +DECLARE_PER_CPU(int, __preempt_count); + +/* + * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users + * that think a non-zero value indicates we cannot preempt. + */ +static __always_inline int preempt_count(void) +{ + return __this_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; +} + +static __always_inline void preempt_count_set(int pc) +{ + __this_cpu_write_4(__preempt_count, pc); +} + +/* + * must be macros to avoid header recursion hell + */ +#define task_preempt_count(p) \ + (task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED) + +#define init_task_preempt_count(p) do { \ + task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \ +} while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \ + per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ +} while (0) + +/* + * We fold the NEED_RESCHED bit into the preempt count such that + * preempt_enable() can decrement and test for needing to reschedule with a + * single instruction. + * + * We invert the actual bit, so that when the decrement hits 0 we know we both + * need to resched (the bit is cleared) and can resched (no preempt count). + */ + +static __always_inline void set_preempt_need_resched(void) +{ + __this_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); +} + +static __always_inline void clear_preempt_need_resched(void) +{ + __this_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); +} + +static __always_inline bool test_preempt_need_resched(void) +{ + return !(__this_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); +} + +/* + * The various preempt_count add/sub methods + */ + +static __always_inline void __preempt_count_add(int val) +{ + __this_cpu_add_4(__preempt_count, val); +} + +static __always_inline void __preempt_count_sub(int val) +{ + __this_cpu_add_4(__preempt_count, -val); +} + +static __always_inline bool __preempt_count_dec_and_test(void) +{ + GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); +} + +/* + * Returns true when we need to resched and can (barring IRQ state). + */ +static __always_inline bool should_resched(void) +{ + return unlikely(!__this_cpu_read_4(__preempt_count)); +} + +#ifdef CONFIG_PREEMPT + extern asmlinkage void ___preempt_schedule(void); +# define __preempt_schedule() asm ("call ___preempt_schedule") + extern asmlinkage void preempt_schedule(void); +# ifdef CONFIG_CONTEXT_TRACKING + extern asmlinkage void ___preempt_schedule_context(void); +# define __preempt_schedule_context() asm ("call ___preempt_schedule_context") +# endif +#endif + +#endif /* __ASM_PREEMPT_H */ diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index bade6ac3b14f..fbeb06ed0eaa 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h @@ -39,10 +39,5 @@ static inline void x86_dtb_init(void) { } extern char cmd_line[COMMAND_LINE_SIZE]; -#define pci_address_to_pio pci_address_to_pio -unsigned long pci_address_to_pio(phys_addr_t addr); - -#define HAVE_ARCH_DEVTREE_FIXUPS - #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h new file mode 100644 index 000000000000..1ff990f1de8e --- /dev/null +++ b/arch/x86/include/asm/rmwcc.h @@ -0,0 +1,41 @@ +#ifndef _ASM_X86_RMWcc +#define _ASM_X86_RMWcc + +#ifdef CC_HAVE_ASM_GOTO + +#define __GEN_RMWcc(fullop, var, cc, ...) \ +do { \ + asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \ + : : "m" (var), ## __VA_ARGS__ \ + : "memory" : cc_label); \ + return 0; \ +cc_label: \ + return 1; \ +} while (0) + +#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ + __GEN_RMWcc(op " " arg0, var, cc) + +#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ + __GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val)) + +#else /* !CC_HAVE_ASM_GOTO */ + +#define __GEN_RMWcc(fullop, var, cc, ...) \ +do { \ + char c; \ + asm volatile (fullop "; set" cc " %1" \ + : "+m" (var), "=qm" (c) \ + : __VA_ARGS__ : "memory"); \ + return c != 0; \ +} while (0) + +#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ + __GEN_RMWcc(op " " arg0, var, cc) + +#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ + __GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val)) + +#endif /* CC_HAVE_ASM_GOTO */ + +#endif /* _ASM_X86_RMWcc */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 347555492dad..59bcf4e22418 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -51,9 +51,9 @@ extern void i386_reserve_resources(void); extern void setup_default_timer_irq(void); #ifdef CONFIG_X86_INTEL_MID -extern void x86_mrst_early_setup(void); +extern void x86_intel_mid_early_setup(void); #else -static inline void x86_mrst_early_setup(void) { } +static inline void x86_intel_mid_early_setup(void) { } #endif #ifdef CONFIG_X86_INTEL_CE diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 27811190cbd7..c46a46be1ec6 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -28,8 +28,7 @@ struct thread_info { __u32 flags; /* low level flags */ __u32 status; /* thread synchronous flags */ __u32 cpu; /* current CPU */ - int preempt_count; /* 0 => preemptable, - <0 => BUG */ + int saved_preempt_count; mm_segment_t addr_limit; struct restart_block restart_block; void __user *sysenter_return; @@ -49,7 +48,7 @@ struct thread_info { .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ - .preempt_count = INIT_PREEMPT_COUNT, \ + .saved_preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ .restart_block = { \ .fn = do_no_restart_syscall, \ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 5838fa911aa0..8ec57c07b125 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -542,5 +542,103 @@ extern struct movsl_mask { # include <asm/uaccess_64.h> #endif +unsigned long __must_check _copy_from_user(void *to, const void __user *from, + unsigned n); +unsigned long __must_check _copy_to_user(void __user *to, const void *from, + unsigned n); + +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS +# define copy_user_diag __compiletime_error +#else +# define copy_user_diag __compiletime_warning +#endif + +extern void copy_user_diag("copy_from_user() buffer size is too small") +copy_from_user_overflow(void); +extern void copy_user_diag("copy_to_user() buffer size is too small") +copy_to_user_overflow(void) __asm__("copy_from_user_overflow"); + +#undef copy_user_diag + +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + +extern void +__compiletime_warning("copy_from_user() buffer size is not provably correct") +__copy_from_user_overflow(void) __asm__("copy_from_user_overflow"); +#define __copy_from_user_overflow(size, count) __copy_from_user_overflow() + +extern void +__compiletime_warning("copy_to_user() buffer size is not provably correct") +__copy_to_user_overflow(void) __asm__("copy_from_user_overflow"); +#define __copy_to_user_overflow(size, count) __copy_to_user_overflow() + +#else + +static inline void +__copy_from_user_overflow(int size, unsigned long count) +{ + WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); +} + +#define __copy_to_user_overflow __copy_from_user_overflow + +#endif + +static inline unsigned long __must_check +copy_from_user(void *to, const void __user *from, unsigned long n) +{ + int sz = __compiletime_object_size(to); + + might_fault(); + + /* + * While we would like to have the compiler do the checking for us + * even in the non-constant size case, any false positives there are + * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even + * without - the [hopefully] dangerous looking nature of the warning + * would make people go look at the respecitive call sites over and + * over again just to find that there's no problem). + * + * And there are cases where it's just not realistic for the compiler + * to prove the count to be in range. For example when multiple call + * sites of a helper function - perhaps in different source files - + * all doing proper range checking, yet the helper function not doing + * so again. + * + * Therefore limit the compile time checking to the constant size + * case, and do only runtime checking for non-constant sizes. + */ + + if (likely(sz < 0 || sz >= n)) + n = _copy_from_user(to, from, n); + else if(__builtin_constant_p(n)) + copy_from_user_overflow(); + else + __copy_from_user_overflow(sz, n); + + return n; +} + +static inline unsigned long __must_check +copy_to_user(void __user *to, const void *from, unsigned long n) +{ + int sz = __compiletime_object_size(from); + + might_fault(); + + /* See the comment in copy_from_user() above. */ + if (likely(sz < 0 || sz >= n)) + n = _copy_to_user(to, from, n); + else if(__builtin_constant_p(n)) + copy_to_user_overflow(); + else + __copy_to_user_overflow(sz, n); + + return n; +} + +#undef __copy_from_user_overflow +#undef __copy_to_user_overflow + #endif /* _ASM_X86_UACCESS_H */ diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 7f760a9f1f61..3c03a5de64d3 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -184,33 +184,4 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from, return __copy_from_user_ll_nocache_nozero(to, from, n); } -unsigned long __must_check copy_to_user(void __user *to, - const void *from, unsigned long n); -unsigned long __must_check _copy_from_user(void *to, - const void __user *from, - unsigned long n); - - -extern void copy_from_user_overflow(void) -#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS - __compiletime_error("copy_from_user() buffer size is not provably correct") -#else - __compiletime_warning("copy_from_user() buffer size is not provably correct") -#endif -; - -static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, - unsigned long n) -{ - int sz = __compiletime_object_size(to); - - if (likely(sz == -1 || sz >= n)) - n = _copy_from_user(to, from, n); - else - copy_from_user_overflow(); - - return n; -} - #endif /* _ASM_X86_UACCESS_32_H */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 4f7923dd0007..190413d0de57 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -46,42 +46,13 @@ copy_user_generic(void *to, const void *from, unsigned len) } __must_check unsigned long -_copy_to_user(void __user *to, const void *from, unsigned len); -__must_check unsigned long -_copy_from_user(void *to, const void __user *from, unsigned len); -__must_check unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); -static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, - unsigned long n) -{ - int sz = __compiletime_object_size(to); - - might_fault(); - if (likely(sz == -1 || sz >= n)) - n = _copy_from_user(to, from, n); -#ifdef CONFIG_DEBUG_VM - else - WARN(1, "Buffer overflow detected!\n"); -#endif - return n; -} - static __always_inline __must_check -int copy_to_user(void __user *dst, const void *src, unsigned size) -{ - might_fault(); - - return _copy_to_user(dst, src, size); -} - -static __always_inline __must_check -int __copy_from_user(void *dst, const void __user *src, unsigned size) +int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) { int ret = 0; - might_fault(); if (!__builtin_constant_p(size)) return copy_user_generic(dst, (__force void *)src, size); switch (size) { @@ -121,11 +92,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) } static __always_inline __must_check -int __copy_to_user(void __user *dst, const void *src, unsigned size) +int __copy_from_user(void *dst, const void __user *src, unsigned size) +{ + might_fault(); + return __copy_from_user_nocheck(dst, src, size); +} + +static __always_inline __must_check +int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) { int ret = 0; - might_fault(); if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst, src, size); switch (size) { @@ -165,6 +142,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) } static __always_inline __must_check +int __copy_to_user(void __user *dst, const void *src, unsigned size) +{ + might_fault(); + return __copy_to_user_nocheck(dst, src, size); +} + +static __always_inline __must_check int __copy_in_user(void __user *dst, const void __user *src, unsigned size) { int ret = 0; @@ -220,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) static __must_check __always_inline int __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) { - return copy_user_generic(dst, (__force const void *)src, size); + return __copy_from_user_nocheck(dst, (__force const void *)src, size); } static __must_check __always_inline int __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) { - return copy_user_generic((__force void *)dst, src, size); + return __copy_to_user_nocheck((__force void *)dst, src, size); } extern long __copy_user_nocache(void *dst, const void __user *src, diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 6e5197910fd8..3087ea9c5f2e 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -35,7 +35,10 @@ typedef u8 uprobe_opcode_t; struct arch_uprobe { u16 fixups; - u8 insn[MAX_UINSN_BYTES]; + union { + u8 insn[MAX_UINSN_BYTES]; + u8 ixol[MAX_UINSN_BYTES]; + }; #ifdef CONFIG_X86_64 unsigned long rip_rela_target_address; #endif @@ -49,11 +52,4 @@ struct arch_uprobe_task { unsigned int saved_tf; }; -extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); -extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); -extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); -extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); #endif /* _ASM_UPROBES_H */ diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index 062921ef34e9..6b964a0b86d1 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h @@ -12,6 +12,7 @@ extern enum uv_system_type get_uv_system_type(void); extern int is_uv_system(void); extern void uv_cpu_init(void); extern void uv_nmi_init(void); +extern void uv_register_nmi_notifier(void); extern void uv_system_init(void); extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, @@ -25,6 +26,7 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } static inline int is_uv_system(void) { return 0; } static inline void uv_cpu_init(void) { } static inline void uv_system_init(void) { } +static inline void uv_register_nmi_notifier(void) { } static inline const struct cpumask * uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int cpu) diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 2c32df95bb78..a30836c8ac4d 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -502,8 +502,8 @@ struct uv_blade_info { unsigned short nr_online_cpus; unsigned short pnode; short memory_nid; - spinlock_t nmi_lock; - unsigned long nmi_count; + spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ + unsigned long nmi_count; /* obsolete, see uv_hub_nmi */ }; extern struct uv_blade_info *uv_blade_info; extern short *uv_node_to_blade; @@ -576,6 +576,59 @@ static inline int uv_num_possible_blades(void) return uv_possible_blades; } +/* Per Hub NMI support */ +extern void uv_nmi_setup(void); + +/* BMC sets a bit this MMR non-zero before sending an NMI */ +#define UVH_NMI_MMR UVH_SCRATCH5 +#define UVH_NMI_MMR_CLEAR UVH_SCRATCH5_ALIAS +#define UVH_NMI_MMR_SHIFT 63 +#define UVH_NMI_MMR_TYPE "SCRATCH5" + +/* Newer SMM NMI handler, not present in all systems */ +#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0 +#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS +#define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \ + UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\ + UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT) +#define UVH_NMI_MMRX_TYPE "EXTIO_INT0" + +/* Non-zero indicates newer SMM NMI handler present */ +#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST + +/* Indicates to BIOS that we want to use the newer SMM NMI handler */ +#define UVH_NMI_MMRX_REQ UVH_SCRATCH5_ALIAS_2 +#define UVH_NMI_MMRX_REQ_SHIFT 62 + +struct uv_hub_nmi_s { + raw_spinlock_t nmi_lock; + atomic_t in_nmi; /* flag this node in UV NMI IRQ */ + atomic_t cpu_owner; /* last locker of this struct */ + atomic_t read_mmr_count; /* count of MMR reads */ + atomic_t nmi_count; /* count of true UV NMIs */ + unsigned long nmi_value; /* last value read from NMI MMR */ +}; + +struct uv_cpu_nmi_s { + struct uv_hub_nmi_s *hub; + atomic_t state; + atomic_t pinging; + int queries; + int pings; +}; + +DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); +#define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi)) +#define uv_hub_nmi (uv_cpu_nmi.hub) +#define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu)) +#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) + +/* uv_cpu_nmi_states */ +#define UV_NMI_STATE_OUT 0 +#define UV_NMI_STATE_IN 1 +#define UV_NMI_STATE_DUMP 2 +#define UV_NMI_STATE_DUMP_DONE 3 + /* Update SCIR state */ static inline void uv_set_scir_bits(unsigned char value) { diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index bd5f80e58a23..e42249bcf7e1 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h @@ -461,6 +461,23 @@ union uvh_event_occurred0_u { /* ========================================================================= */ +/* UVH_EXTIO_INT0_BROADCAST */ +/* ========================================================================= */ +#define UVH_EXTIO_INT0_BROADCAST 0x61448UL +#define UVH_EXTIO_INT0_BROADCAST_32 0x3f0 + +#define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT 0 +#define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK 0x0000000000000001UL + +union uvh_extio_int0_broadcast_u { + unsigned long v; + struct uvh_extio_int0_broadcast_s { + unsigned long enable:1; /* RW */ + unsigned long rsvd_1_63:63; + } s; +}; + +/* ========================================================================= */ /* UVH_GR0_TLB_INT0_CONFIG */ /* ========================================================================= */ #define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL @@ -2606,6 +2623,20 @@ union uvh_scratch5_u { }; /* ========================================================================= */ +/* UVH_SCRATCH5_ALIAS */ +/* ========================================================================= */ +#define UVH_SCRATCH5_ALIAS 0x2d0208UL +#define UVH_SCRATCH5_ALIAS_32 0x780 + + +/* ========================================================================= */ +/* UVH_SCRATCH5_ALIAS_2 */ +/* ========================================================================= */ +#define UVH_SCRATCH5_ALIAS_2 0x2d0210UL +#define UVH_SCRATCH5_ALIAS_2_32 0x788 + + +/* ========================================================================= */ /* UVXH_EVENT_OCCURRED2 */ /* ========================================================================= */ #define UVXH_EVENT_OCCURRED2 0x70100UL diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index c15ddaf90710..9c3733c5f8f7 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -158,7 +158,7 @@ enum { X86_SUBARCH_PC = 0, X86_SUBARCH_LGUEST, X86_SUBARCH_XEN, - X86_SUBARCH_MRST, + X86_SUBARCH_INTEL_MID, X86_SUBARCH_CE4100, X86_NR_SUBARCHS, }; diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index b80420bcd09d..b8f1c0176cbc 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -27,6 +27,19 @@ #define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) + +/* + * There is a single feature flag that signifies the presence of the MSR + * that can be used to retrieve both the local APIC Timer frequency as + * well as the TSC frequency. + */ + +/* Local APIC timer frequency MSR (HV_X64_MSR_APIC_FREQUENCY) is available */ +#define HV_X64_MSR_APIC_FREQUENCY_AVAILABLE (1 << 11) + +/* TSC frequency MSR (HV_X64_MSR_TSC_FREQUENCY) is available */ +#define HV_X64_MSR_TSC_FREQUENCY_AVAILABLE (1 << 11) + /* * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available @@ -136,6 +149,12 @@ /* MSR used to read the per-partition time reference counter */ #define HV_X64_MSR_TIME_REF_COUNT 0x40000020 +/* MSR used to retrieve the TSC frequency */ +#define HV_X64_MSR_TSC_FREQUENCY 0x40000022 + +/* MSR used to retrieve the local APIC timer frequency */ +#define HV_X64_MSR_APIC_FREQUENCY 0x40000023 + /* Define the virtual APIC registers */ #define HV_X64_MSR_EOI 0x40000070 #define HV_X64_MSR_ICR 0x40000071 |