diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/mach-msm/include/mach/uncompress.h | 1 | ||||
-rw-r--r-- | arch/avr32/mach-at32ap/include/mach/board.h | 2 | ||||
-rw-r--r-- | arch/blackfin/include/asm/cmpxchg.h | 3 | ||||
-rw-r--r-- | arch/frv/mb93090-mb00/pci-dma.c | 1 | ||||
-rw-r--r-- | arch/parisc/include/asm/atomic.h | 107 | ||||
-rw-r--r-- | arch/parisc/include/asm/cmpxchg.h | 116 | ||||
-rw-r--r-- | arch/x86/kvm/pmu.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
-rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 2 |
9 files changed, 126 insertions, 110 deletions
diff --git a/arch/arm/mach-msm/include/mach/uncompress.h b/arch/arm/mach-msm/include/mach/uncompress.h index 169a84007456..c14011fe832d 100644 --- a/arch/arm/mach-msm/include/mach/uncompress.h +++ b/arch/arm/mach-msm/include/mach/uncompress.h @@ -16,6 +16,7 @@ #ifndef __ASM_ARCH_MSM_UNCOMPRESS_H #define __ASM_ARCH_MSM_UNCOMPRESS_H +#include <asm/barrier.h> #include <asm/processor.h> #include <mach/msm_iomap.h> diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h index 71733866cb4f..70742ec997f8 100644 --- a/arch/avr32/mach-at32ap/include/mach/board.h +++ b/arch/avr32/mach-at32ap/include/mach/board.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/serial.h> #include <linux/platform_data/macb.h> -#include <linux/platform_data/atmel_nand.h> +#include <linux/platform_data/atmel.h> #define GPIO_PIN_NONE (-1) diff --git a/arch/blackfin/include/asm/cmpxchg.h b/arch/blackfin/include/asm/cmpxchg.h index ba2484f4cb2a..c05868cc61c1 100644 --- a/arch/blackfin/include/asm/cmpxchg.h +++ b/arch/blackfin/include/asm/cmpxchg.h @@ -122,7 +122,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#include <asm-generic/cmpxchg.h> +#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif /* !CONFIG_SMP */ diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 41098a3803a2..4f8d8bcdc7de 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -13,6 +13,7 @@ #include <linux/dma-mapping.h> #include <linux/list.h> #include <linux/pci.h> +#include <linux/export.h> #include <linux/highmem.h> #include <linux/scatterlist.h> #include <asm/io.h> diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 3ae56073cc3d..6c6defc24619 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -6,6 +6,7 @@ #define _ASM_PARISC_ATOMIC_H_ #include <linux/types.h> +#include <asm/cmpxchg.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -48,112 +49,6 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) #endif -/* This should get optimized out since it's never called. -** Or get a link error if xchg is used "wrong". -*/ -extern void __xchg_called_with_bad_pointer(void); - - -/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __xchg8(char, char *); -extern unsigned long __xchg32(int, int *); -#ifdef CONFIG_64BIT -extern unsigned long __xchg64(unsigned long, unsigned long *); -#endif - -/* optimizer better get rid of switch since size is a constant */ -static __inline__ unsigned long -__xchg(unsigned long x, __volatile__ void * ptr, int size) -{ - switch(size) { -#ifdef CONFIG_64BIT - case 8: return __xchg64(x,(unsigned long *) ptr); -#endif - case 4: return __xchg32((int) x, (int *) ptr); - case 1: return __xchg8((char) x, (char *) ptr); - } - __xchg_called_with_bad_pointer(); - return x; -} - - -/* -** REVISIT - Abandoned use of LDCW in xchg() for now: -** o need to test sizeof(*ptr) to avoid clearing adjacent bytes -** o and while we are at it, could CONFIG_64BIT code use LDCD too? -** -** if (__builtin_constant_p(x) && (x == NULL)) -** if (((unsigned long)p & 0xf) == 0) -** return __ldcw(p); -*/ -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - - -#define __HAVE_ARCH_CMPXCHG 1 - -/* bug catcher for when unsupported size is used - won't link */ -extern void __cmpxchg_called_with_bad_pointer(void); - -/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); -extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); - -/* don't worry...optimizer will get rid of most of this */ -static __inline__ unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) -{ - switch(size) { -#ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); -#endif - case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr,o,n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -#include <asm-generic/cmpxchg-local.h> - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new_, int size) -{ - switch (size) { -#ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); -#endif - case 4: return __cmpxchg_u32(ptr, old, new_); - default: - return __cmpxchg_local_generic(ptr, old, new_, size); - } -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#ifdef CONFIG_64BIT -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#else -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif - /* * Note that we need not lock read accesses - aligned word writes/reads * are atomic, so a reader never sees inconsistent values. diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h new file mode 100644 index 000000000000..dbd13354ec41 --- /dev/null +++ b/arch/parisc/include/asm/cmpxchg.h @@ -0,0 +1,116 @@ +/* + * forked from parisc asm/atomic.h which was: + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> + */ + +#ifndef _ASM_PARISC_CMPXCHG_H_ +#define _ASM_PARISC_CMPXCHG_H_ + +/* This should get optimized out since it's never called. +** Or get a link error if xchg is used "wrong". +*/ +extern void __xchg_called_with_bad_pointer(void); + +/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ +extern unsigned long __xchg8(char, char *); +extern unsigned long __xchg32(int, int *); +#ifdef CONFIG_64BIT +extern unsigned long __xchg64(unsigned long, unsigned long *); +#endif + +/* optimizer better get rid of switch since size is a constant */ +static inline unsigned long +__xchg(unsigned long x, __volatile__ void *ptr, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __xchg64(x, (unsigned long *) ptr); +#endif + case 4: return __xchg32((int) x, (int *) ptr); + case 1: return __xchg8((char) x, (char *) ptr); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* +** REVISIT - Abandoned use of LDCW in xchg() for now: +** o need to test sizeof(*ptr) to avoid clearing adjacent bytes +** o and while we are at it, could CONFIG_64BIT code use LDCD too? +** +** if (__builtin_constant_p(x) && (x == NULL)) +** if (((unsigned long)p & 0xf) == 0) +** return __ldcw(p); +*/ +#define xchg(ptr, x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +#define __HAVE_ARCH_CMPXCHG 1 + +/* bug catcher for when unsupported size is used - won't link */ +extern void __cmpxchg_called_with_bad_pointer(void); + +/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ +extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, + unsigned int new_); +extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, + unsigned long old, unsigned long new_); + +/* don't worry...optimizer will get rid of most of this */ +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); +#endif + case 4: return __cmpxchg_u32((unsigned int *)ptr, + (unsigned int)old, (unsigned int)new_); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ +}) + +#include <asm-generic/cmpxchg-local.h> + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new_, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); +#endif + case 4: return __cmpxchg_u32(ptr, old, new_); + default: + return __cmpxchg_local_generic(ptr, old, new_, size); + } +} + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#ifdef CONFIG_64BIT +#define cmpxchg64_local(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ +}) +#else +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#endif /* _ASM_PARISC_CMPXCHG_H_ */ diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index a73f0c104813..173df38dbda5 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -369,7 +369,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) case MSR_CORE_PERF_FIXED_CTR_CTRL: if (pmu->fixed_ctr_ctrl == data) return 0; - if (!(data & 0xfffffffffffff444)) { + if (!(data & 0xfffffffffffff444ull)) { reprogram_fixed_counters(pmu, data); return 0; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 280751c84724..ad85adfef843 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3906,7 +3906,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); vmx_set_cr4(&vmx->vcpu, 0); vmx_set_efer(&vmx->vcpu, 0); vmx_fpu_activate(&vmx->vcpu); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5671752f8d9c..5a5b6e4dd738 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -289,7 +289,7 @@ void bpf_jit_compile(struct sk_filter *fp) EMIT2(0x24, K & 0xFF); /* and imm8,%al */ } else if (K >= 0xFFFF0000) { EMIT2(0x66, 0x25); /* and imm16,%ax */ - EMIT2(K, 2); + EMIT(K, 2); } else { EMIT1_off32(0x25, K); /* and imm32,%eax */ } |