diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/atomic.h | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/cmpxchg_32.h | 55 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/futex.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/local.h | 18 | ||||
-rw-r--r-- | arch/x86/include/asm/module.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/percpu.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 33 | ||||
-rw-r--r-- | arch/x86/include/asm/swab.h | 29 | ||||
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 42 |
11 files changed, 25 insertions, 194 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index b6c3b821acf6..722aa3b04624 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -172,23 +172,7 @@ static inline int atomic_add_negative(int i, atomic_t *v) */ static inline int atomic_add_return(int i, atomic_t *v) { -#ifdef CONFIG_M386 - int __i; - unsigned long flags; - if (unlikely(boot_cpu_data.x86 <= 3)) - goto no_xadd; -#endif - /* Modern 486+ processor */ return i + xadd(&v->counter, i); - -#ifdef CONFIG_M386 -no_xadd: /* Legacy 386 processor */ - raw_local_irq_save(flags); - __i = atomic_read(v); - atomic_set(v, i + __i); - raw_local_irq_restore(flags); - return i + __i; -#endif } /** diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 53f4b219336b..f8bf2eecab86 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -34,9 +34,7 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) : "memory"); } -#ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 -#endif #ifdef CONFIG_X86_CMPXCHG64 #define cmpxchg64(ptr, o, n) \ @@ -73,59 +71,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) return prev; } -#ifndef CONFIG_X86_CMPXCHG -/* - * Building a kernel capable running on 80386. It may be necessary to - * simulate the cmpxchg on the 80386 CPU. For that purpose we define - * a function for each of the sizes we support. - */ - -extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); -extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); -extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); - -static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 1: - return cmpxchg_386_u8(ptr, old, new); - case 2: - return cmpxchg_386_u16(ptr, old, new); - case 4: - return cmpxchg_386_u32(ptr, old, new); - } - return old; -} - -#define cmpxchg(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) __ret; \ - if (likely(boot_cpu_data.x86 > 3)) \ - __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ - (unsigned long)(o), (unsigned long)(n), \ - sizeof(*(ptr))); \ - else \ - __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ - (unsigned long)(o), (unsigned long)(n), \ - sizeof(*(ptr))); \ - __ret; \ -}) -#define cmpxchg_local(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) __ret; \ - if (likely(boot_cpu_data.x86 > 3)) \ - __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ - (unsigned long)(o), (unsigned long)(n), \ - sizeof(*(ptr))); \ - else \ - __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ - (unsigned long)(o), (unsigned long)(n), \ - sizeof(*(ptr))); \ - __ret; \ -}) -#endif - #ifndef CONFIG_X86_CMPXCHG64 /* * Building a kernel capable running on 80386 and 80486. It may be necessary diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index c22a492daf57..da40b1e2228e 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -313,12 +313,6 @@ extern const char * const x86_power_flags[32]; #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) -#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) -# define cpu_has_invlpg 1 -#else -# define cpu_has_invlpg (boot_cpu_data.x86 > 3) -#endif - #ifdef CONFIG_X86_64 #undef cpu_has_vme diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index f373046e63ec..be27ba1e947a 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -55,12 +55,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) - /* Real i386 machines can only support FUTEX_OP_SET */ - if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3) - return -ENOSYS; -#endif - pagefault_disable(); switch (op) { @@ -118,12 +112,6 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, { int ret = 0; -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) - /* Real i386 machines have no cmpxchg instruction */ - if (boot_cpu_data.x86 == 3) - return -ENOSYS; -#endif - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index c8bed0da434a..2d89e3980cbd 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -124,27 +124,11 @@ static inline int local_add_negative(long i, local_t *l) */ static inline long local_add_return(long i, local_t *l) { - long __i; -#ifdef CONFIG_M386 - unsigned long flags; - if (unlikely(boot_cpu_data.x86 <= 3)) - goto no_xadd; -#endif - /* Modern 486+ processor */ - __i = i; + long __i = i; asm volatile(_ASM_XADD "%0, %1;" : "+r" (i), "+m" (l->a.counter) : : "memory"); return i + __i; - -#ifdef CONFIG_M386 -no_xadd: /* Legacy 386 processor */ - local_irq_save(flags); - __i = local_read(l); - local_set(l, i + __i); - local_irq_restore(flags); - return i + __i; -#endif } static inline long local_sub_return(long i, local_t *l) diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 9eae7752ae9b..e3b7819caeef 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -5,8 +5,6 @@ #ifdef CONFIG_X86_64 /* X86_64 does not define MODULE_PROC_FAMILY */ -#elif defined CONFIG_M386 -#define MODULE_PROC_FAMILY "386 " #elif defined CONFIG_M486 #define MODULE_PROC_FAMILY "486 " #elif defined CONFIG_M586 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 1104afaba52b..0da5200ee79d 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -406,7 +406,6 @@ do { \ #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) -#ifndef CONFIG_M386 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) #define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) @@ -421,8 +420,6 @@ do { \ #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) -#endif /* !CONFIG_M386 */ - #ifdef CONFIG_X86_CMPXCHG64 #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ ({ \ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index db0d8c32090c..e101b38912de 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -672,18 +672,29 @@ static inline void sync_core(void) { int tmp; -#if defined(CONFIG_M386) || defined(CONFIG_M486) - if (boot_cpu_data.x86 < 5) - /* There is no speculative execution. - * jmp is a barrier to prefetching. */ - asm volatile("jmp 1f\n1:\n" ::: "memory"); - else +#ifdef CONFIG_M486 + /* + * Do a CPUID if available, otherwise do a jump. The jump + * can conveniently enough be the jump around CPUID. + */ + asm volatile("cmpl %2,%1\n\t" + "jl 1f\n\t" + "cpuid\n" + "1:" + : "=a" (tmp) + : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) + : "ebx", "ecx", "edx", "memory"); +#else + /* + * CPUID is a barrier to speculative execution. + * Prefetched instructions are automatically + * invalidated when modified. + */ + asm volatile("cpuid" + : "=a" (tmp) + : "0" (1) + : "ebx", "ecx", "edx", "memory"); #endif - /* cpuid is a barrier to speculative execution. - * Prefetched instructions are automatically - * invalidated when modified. */ - asm volatile("cpuid" : "=a" (tmp) : "0" (1) - : "ebx", "ecx", "edx", "memory"); } static inline void __monitor(const void *eax, unsigned long ecx, diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h index 557cd9f00661..7f235c7105c1 100644 --- a/arch/x86/include/asm/swab.h +++ b/arch/x86/include/asm/swab.h @@ -6,22 +6,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 val) { -#ifdef __i386__ -# ifdef CONFIG_X86_BSWAP - asm("bswap %0" : "=r" (val) : "0" (val)); -# else - asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ - "rorl $16,%0\n\t" /* swap words */ - "xchgb %b0,%h0" /* swap higher bytes */ - : "=q" (val) - : "0" (val)); -# endif - -#else /* __i386__ */ - asm("bswapl %0" - : "=r" (val) - : "0" (val)); -#endif + asm("bswapl %0" : "=r" (val) : "0" (val)); return val; } #define __arch_swab32 __arch_swab32 @@ -37,22 +22,12 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 val) __u64 u; } v; v.u = val; -# ifdef CONFIG_X86_BSWAP asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); -# else - v.s.a = __arch_swab32(v.s.a); - v.s.b = __arch_swab32(v.s.b); - asm("xchgl %0,%1" - : "=r" (v.s.a), "=r" (v.s.b) - : "0" (v.s.a), "1" (v.s.b)); -# endif return v.u; #else /* __i386__ */ - asm("bswapq %0" - : "=r" (val) - : "0" (val)); + asm("bswapq %0" : "=r" (val) : "0" (val)); return val; #endif } diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 74a44333545a..0fee48e279cc 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -56,10 +56,7 @@ static inline void __flush_tlb_all(void) static inline void __flush_tlb_one(unsigned long addr) { - if (cpu_has_invlpg) __flush_tlb_single(addr); - else - __flush_tlb(); } #define TLB_FLUSH_ALL -1UL diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 7ccf8d131535..1709801d18ec 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -237,8 +237,6 @@ extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); -#ifdef CONFIG_X86_WP_WORKS_OK - /** * put_user: - Write a simple value into user space. * @x: Value to copy to user space. @@ -326,29 +324,6 @@ do { \ } \ } while (0) -#else - -#define __put_user_size(x, ptr, size, retval, errret) \ -do { \ - __typeof__(*(ptr))__pus_tmp = x; \ - retval = 0; \ - \ - if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ - retval = errret; \ -} while (0) - -#define put_user(x, ptr) \ -({ \ - int __ret_pu; \ - __typeof__(*(ptr))__pus_tmp = x; \ - __ret_pu = 0; \ - if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ - sizeof(*(ptr))) != 0)) \ - __ret_pu = -EFAULT; \ - __ret_pu; \ -}) -#endif - #ifdef CONFIG_X86_32 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() @@ -543,29 +518,12 @@ struct __large_struct { unsigned long buf[100]; }; (x) = (__force __typeof__(*(ptr)))__gue_val; \ } while (0) -#ifdef CONFIG_X86_WP_WORKS_OK - #define put_user_try uaccess_try #define put_user_catch(err) uaccess_catch(err) #define put_user_ex(x, ptr) \ __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#else /* !CONFIG_X86_WP_WORKS_OK */ - -#define put_user_try do { \ - int __uaccess_err = 0; - -#define put_user_catch(err) \ - (err) |= __uaccess_err; \ -} while (0) - -#define put_user_ex(x, ptr) do { \ - __uaccess_err |= __put_user(x, ptr); \ -} while (0) - -#endif /* CONFIG_X86_WP_WORKS_OK */ - extern unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n); extern __must_check long |