diff options
author | David S. Miller <davem@davemloft.net> | 2017-07-21 03:38:43 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-07-21 03:38:43 +0100 |
commit | 7a68ada6ec7d88c68057d3a4c2a517eb94289976 (patch) | |
tree | 51cd586e74fc92bfbdf382fa1544a235d908b25c /arch/mips/include/asm | |
parent | 760446f967678e14ee1b6464ee1bb8562f299fa6 (diff) | |
parent | 96080f697786e0a30006fcbcc5b53f350fcb3e9f (diff) | |
download | linux-7a68ada6ec7d88c68057d3a4c2a517eb94289976.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'arch/mips/include/asm')
37 files changed, 350 insertions, 914 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 2535c7b4c482..7c8aab23bce8 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -12,6 +12,8 @@ generic-y += mm-arch-hooks.h generic-y += parport.h generic-y += percpu.h generic-y += preempt.h +generic-y += qrwlock.h +generic-y += qspinlock.h generic-y += sections.h generic-y += segment.h generic-y += serial.h diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h index de781cf54bc7..da80878f2c0d 100644 --- a/arch/mips/include/asm/branch.h +++ b/arch/mips/include/asm/branch.h @@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs) return __microMIPS_compute_return_epc(regs); if (cpu_has_mips16) return __MIPS16e_compute_return_epc(regs); - return regs->cp0_epc; - } - - if (!delay_slot(regs)) { + } else if (!delay_slot(regs)) { regs->cp0_epc += 4; return 0; } diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index b71ab4a5fd50..903f3bf48419 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -13,153 +13,107 @@ #include <asm/compiler.h> #include <asm/war.h> -static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) -{ - __u32 retval; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - unsigned long dummy; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %0, %3 # xchg_u32 \n" - " .set mips0 \n" - " move %2, %z4 \n" - " .set arch=r4000 \n" - " sc %2, %1 \n" - " beqzl %2, 1b \n" - " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) - : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) - : "memory"); - } else if (kernel_uses_llsc) { - unsigned long dummy; - - do { - __asm__ __volatile__( - " .set "MIPS_ISA_ARCH_LEVEL" \n" - " ll %0, %3 # xchg_u32 \n" - " .set mips0 \n" - " move %2, %z4 \n" - " .set "MIPS_ISA_ARCH_LEVEL" \n" - " sc %2, %1 \n" - " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), - "=&r" (dummy) - : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) - : "memory"); - } while (unlikely(!dummy)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - retval = *m; - *m = val; - raw_local_irq_restore(flags); /* implies memory barrier */ - } - - smp_llsc_mb(); - - return retval; -} +/* + * Using a branch-likely instruction to check the result of an sc instruction + * works around a bug present in R10000 CPUs prior to revision 3.0 that could + * cause ll-sc sequences to execute non-atomically. + */ +#if R10000_LLSC_WAR +# define __scbeqz "beqzl" +#else +# define __scbeqz "beqz" +#endif -#ifdef CONFIG_64BIT -static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) -{ - __u64 retval; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - unsigned long dummy; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %0, %3 # xchg_u64 \n" - " move %2, %z4 \n" - " scd %2, %1 \n" - " beqzl %2, 1b \n" - " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) - : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) - : "memory"); - } else if (kernel_uses_llsc) { - unsigned long dummy; - - do { - __asm__ __volatile__( - " .set "MIPS_ISA_ARCH_LEVEL" \n" - " lld %0, %3 # xchg_u64 \n" - " move %2, %z4 \n" - " scd %2, %1 \n" - " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), - "=&r" (dummy) - : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) - : "memory"); - } while (unlikely(!dummy)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - retval = *m; - *m = val; - raw_local_irq_restore(flags); /* implies memory barrier */ - } +/* + * These functions doesn't exist, so if they are called you'll either: + * + * - Get an error at compile-time due to __compiletime_error, if supported by + * your compiler. + * + * or: + * + * - Get an error at link-time due to the call to the missing function. + */ +extern unsigned long __cmpxchg_called_with_bad_pointer(void) + __compiletime_error("Bad argument size for cmpxchg"); +extern unsigned long __xchg_called_with_bad_pointer(void) + __compiletime_error("Bad argument size for xchg"); - smp_llsc_mb(); +#define __xchg_asm(ld, st, m, val) \ +({ \ + __typeof(*(m)) __ret; \ + \ + if (kernel_uses_llsc) { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .set " MIPS_ISA_ARCH_LEVEL " \n" \ + "1: " ld " %0, %2 # __xchg_asm \n" \ + " .set mips0 \n" \ + " move $1, %z3 \n" \ + " .set " MIPS_ISA_ARCH_LEVEL " \n" \ + " " st " $1, %1 \n" \ + "\t" __scbeqz " $1, 1b \n" \ + " .set pop \n" \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \ + : "memory"); \ + } else { \ + unsigned long __flags; \ + \ + raw_local_irq_save(__flags); \ + __ret = *m; \ + *m = val; \ + raw_local_irq_restore(__flags); \ + } \ + \ + __ret; \ +}) - return retval; -} -#else -extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); -#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels -#endif +extern unsigned long __xchg_small(volatile void *ptr, unsigned long val, + unsigned int size); -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +static inline unsigned long __xchg(volatile void *ptr, unsigned long x, + int size) { switch (size) { + case 1: + case 2: + return __xchg_small(ptr, x, size); + case 4: - return __xchg_u32(ptr, x); + return __xchg_asm("ll", "sc", (volatile u32 *)ptr, x); + case 8: - return __xchg_u64(ptr, x); - } + if (!IS_ENABLED(CONFIG_64BIT)) + return __xchg_called_with_bad_pointer(); + + return __xchg_asm("lld", "scd", (volatile u64 *)ptr, x); - return x; + default: + return __xchg_called_with_bad_pointer(); + } } #define xchg(ptr, x) \ ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \ + __typeof__(*(ptr)) __res; \ \ - ((__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ + smp_mb__before_llsc(); \ + \ + __res = (__typeof__(*(ptr))) \ + __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ + \ + smp_llsc_mb(); \ + \ + __res; \ }) #define __cmpxchg_asm(ld, st, m, old, new) \ ({ \ __typeof(*(m)) __ret; \ \ - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ - __asm__ __volatile__( \ - " .set push \n" \ - " .set noat \n" \ - " .set arch=r4000 \n" \ - "1: " ld " %0, %2 # __cmpxchg_asm \n" \ - " bne %0, %z3, 2f \n" \ - " .set mips0 \n" \ - " move $1, %z4 \n" \ - " .set arch=r4000 \n" \ - " " st " $1, %1 \n" \ - " beqzl $1, 1b \n" \ - "2: \n" \ - " .set pop \n" \ - : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ - : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ - : "memory"); \ - } else if (kernel_uses_llsc) { \ + if (kernel_uses_llsc) { \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ @@ -170,7 +124,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz " move $1, %z4 \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ " " st " $1, %1 \n" \ - " beqz $1, 1b \n" \ + "\t" __scbeqz " $1, 1b \n" \ " .set pop \n" \ "2: \n" \ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ @@ -189,44 +143,50 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz __ret; \ }) -/* - * This function doesn't exist, so you'll get a linker error - * if something tries to do an invalid cmpxchg(). - */ -extern void __cmpxchg_called_with_bad_pointer(void); +extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, + unsigned long new, unsigned int size); + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, unsigned int size) +{ + switch (size) { + case 1: + case 2: + return __cmpxchg_small(ptr, old, new, size); + + case 4: + return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new); + + case 8: + /* lld/scd are only available for MIPS64 */ + if (!IS_ENABLED(CONFIG_64BIT)) + return __cmpxchg_called_with_bad_pointer(); + + return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new); -#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \ + default: + return __cmpxchg_called_with_bad_pointer(); + } +} + +#define cmpxchg_local(ptr, old, new) \ + ((__typeof__(*(ptr))) \ + __cmpxchg((ptr), \ + (unsigned long)(__typeof__(*(ptr)))(old), \ + (unsigned long)(__typeof__(*(ptr)))(new), \ + sizeof(*(ptr)))) + +#define cmpxchg(ptr, old, new) \ ({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __res = 0; \ + __typeof__(*(ptr)) __res; \ \ - pre_barrier; \ - \ - switch (sizeof(*(__ptr))) { \ - case 4: \ - __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \ - break; \ - case 8: \ - if (sizeof(long) == 8) { \ - __res = __cmpxchg_asm("lld", "scd", __ptr, \ - __old, __new); \ - break; \ - } \ - default: \ - __cmpxchg_called_with_bad_pointer(); \ - break; \ - } \ - \ - post_barrier; \ + smp_mb__before_llsc(); \ + __res = cmpxchg_local((ptr), (old), (new)); \ + smp_llsc_mb(); \ \ __res; \ }) -#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb()) -#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, , ) - #ifdef CONFIG_64BIT #define cmpxchg64_local(ptr, o, n) \ ({ \ @@ -245,4 +205,6 @@ extern void __cmpxchg_called_with_bad_pointer(void); #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif +#undef __scbeqz + #endif /* __ASM_CMPXCHG_H */ diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 494d38274142..8baa9033b181 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -138,6 +138,9 @@ #ifndef cpu_has_mips16 #define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16) #endif +#ifndef cpu_has_mips16e2 +#define cpu_has_mips16e2 (cpu_data[0].ases & MIPS_ASE_MIPS16E2) +#endif #ifndef cpu_has_mdmx #define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX) #endif @@ -487,6 +490,47 @@ # define cpu_has_perf (cpu_data[0].options & MIPS_CPU_PERF) #endif +#if defined(CONFIG_SMP) && defined(__mips_isa_rev) && (__mips_isa_rev >= 6) +/* + * Some systems share FTLB RAMs between threads within a core (siblings in + * kernel parlance). This means that FTLB entries may become invalid at almost + * any point when an entry is evicted due to a sibling thread writing an entry + * to the shared FTLB RAM. + * + * This is only relevant to SMP systems, and the only systems that exhibit this + * property implement MIPSr6 or higher so we constrain support for this to + * kernels that will run on such systems. + */ +# ifndef cpu_has_shared_ftlb_ram +# define cpu_has_shared_ftlb_ram \ + (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_RAM) +# endif + +/* + * Some systems take this a step further & share FTLB entries between siblings. + * This is implemented as TLB writes happening as usual, but if an entry + * written by a sibling exists in the shared FTLB for a translation which would + * otherwise cause a TLB refill exception then the CPU will use the entry + * written by its sibling rather than triggering a refill & writing a matching + * TLB entry for itself. + * + * This is naturally only valid if a TLB entry is known to be suitable for use + * on all siblings in a CPU, and so it only takes effect when MMIDs are in use + * rather than ASIDs or when a TLB entry is marked global. + */ +# ifndef cpu_has_shared_ftlb_entries +# define cpu_has_shared_ftlb_entries \ + (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_ENTRIES) +# endif +#endif /* SMP && __mips_isa_rev >= 6 */ + +#ifndef cpu_has_shared_ftlb_ram +# define cpu_has_shared_ftlb_ram 0 +#endif +#ifndef cpu_has_shared_ftlb_entries +# define cpu_has_shared_ftlb_entries 0 +#endif + /* * Guest capabilities */ diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index bdd6dc18e65c..175fe565f4e1 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -84,6 +84,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6 case CPU_I6400: + case CPU_I6500: case CPU_P6600: #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 98f59307e6a3..d0c152b989f8 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -124,6 +124,7 @@ #define PRID_IMP_P5600 0xa800 #define PRID_IMP_I6400 0xa900 #define PRID_IMP_M6250 0xab00 +#define PRID_IMP_I6500 0xb000 /* * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE @@ -247,6 +248,7 @@ #define PRID_REV_LOONGSON3B_R1 0x0006 #define PRID_REV_LOONGSON3B_R2 0x0007 #define PRID_REV_LOONGSON3A_R2 0x0008 +#define PRID_REV_LOONGSON3A_R3 0x0009 /* * Older processors used to encode processor version and revision in two @@ -322,7 +324,7 @@ enum cpu_type_enum { */ CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, - CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, + CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, CPU_I6500, CPU_QEMU_GENERIC, @@ -416,6 +418,10 @@ enum cpu_type_enum { #define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */ #define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */ #define MIPS_CPU_UFR MBIT_ULL(53) /* CPU supports User mode FR switching */ +#define MIPS_CPU_SHARED_FTLB_RAM \ + MBIT_ULL(54) /* CPU shares FTLB RAM with another */ +#define MIPS_CPU_SHARED_FTLB_ENTRIES \ + MBIT_ULL(55) /* CPU shares FTLB entries with another */ /* * CPU ASE encodings @@ -430,5 +436,6 @@ enum cpu_type_enum { #define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */ #define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */ #define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/ +#define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */ #endif /* _ASM_CPU_H */ diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index ddd1c918103b..c5d351786416 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -18,7 +18,7 @@ #include <irq.h> #define IRQ_STACK_SIZE THREAD_SIZE -#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long)) +#define IRQ_STACK_START (IRQ_STACK_SIZE - 16) extern void *irq_stack[NR_CPUS]; diff --git a/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h index ade0356df257..e6a8108cde4e 100644 --- a/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h @@ -40,6 +40,7 @@ #endif #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h index c5b6eef0efa7..bace5b9ae4df 100644 --- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h @@ -31,6 +31,7 @@ #define cpu_has_ejtag 1 #define cpu_has_llsc 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h index bc1167dbd4e3..b56cf10b91d3 100644 --- a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h @@ -19,6 +19,7 @@ #define cpu_has_ejtag 1 #define cpu_has_llsc 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h index 30c5cd9fd973..291fe90aafa5 100644 --- a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h @@ -37,6 +37,7 @@ #endif #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h index 21eae03d752a..2ec10237688c 100644 --- a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h @@ -27,6 +27,7 @@ #define cpu_has_mcheck 0 #define cpu_has_ejtag 0 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-generic/mc146818rtc.h b/arch/mips/include/asm/mach-generic/mc146818rtc.h index 0b9a942f079d..9c72e540ff56 100644 --- a/arch/mips/include/asm/mach-generic/mc146818rtc.h +++ b/arch/mips/include/asm/mach-generic/mc146818rtc.h @@ -27,7 +27,7 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr) outb_p(data, RTC_PORT(1)); } -#define RTC_ALWAYS_BCD 1 +#define RTC_ALWAYS_BCD 0 #ifndef mc146818_decode_year #define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900) diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h index 9b19b72dba56..b80d5eafc9db 100644 --- a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h @@ -19,6 +19,7 @@ #define cpu_has_32fpr 1 #define cpu_has_counter 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_divec 0 #define cpu_has_cache_cdex_p 1 #define cpu_has_prefetch 0 diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h index 7449794eade6..136d6d464e32 100644 --- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h @@ -43,6 +43,7 @@ #define cpu_has_ejtag 0 #define cpu_has_llsc 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h index 4cec06d133db..ba8b4e30b3e2 100644 --- a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h @@ -16,6 +16,7 @@ */ #define cpu_has_watch 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_divec 0 #define cpu_has_vce 0 #define cpu_has_cache_cdex_p 0 diff --git a/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h index 241409b78ff1..63b4c889094b 100644 --- a/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h @@ -29,6 +29,7 @@ #define cpu_has_32fpr 1 #define cpu_has_counter 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_vce 0 #define cpu_has_cache_cdex_s 0 #define cpu_has_mcheck 0 diff --git a/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h b/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h index 0933f94a1e69..7c5e576f9d96 100644 --- a/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h @@ -23,6 +23,7 @@ #define cpu_has_ejtag 1 #define cpu_has_llsc 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h index d3f3258b7cd4..9f9bb9c53785 100644 --- a/arch/mips/include/asm/mach-loongson64/boot_param.h +++ b/arch/mips/include/asm/mach-loongson64/boot_param.h @@ -27,12 +27,22 @@ struct efi_memory_map_loongson { } __packed; enum loongson_cpu_type { - Loongson_2E = 0, - Loongson_2F = 1, - Loongson_3A = 2, - Loongson_3B = 3, - Loongson_1A = 4, - Loongson_1B = 5 + Legacy_2E = 0x0, + Legacy_2F = 0x1, + Legacy_3A = 0x2, + Legacy_3B = 0x3, + Legacy_1A = 0x4, + Legacy_1B = 0x5, + Legacy_2G = 0x6, + Legacy_2H = 0x7, + Loongson_1A = 0x100, + Loongson_1B = 0x101, + Loongson_2E = 0x200, + Loongson_2F = 0x201, + Loongson_2G = 0x202, + Loongson_2H = 0x203, + Loongson_3A = 0x300, + Loongson_3B = 0x301 }; /* diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h index 89328a3d44d8..581915ce231c 100644 --- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h @@ -32,6 +32,7 @@ #define cpu_has_mcheck 0 #define cpu_has_mdmx 0 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mips3d 0 #define cpu_has_mipsmt 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h index 091deb1700e5..0c29ff820bb9 100644 --- a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h @@ -13,6 +13,7 @@ #define cpu_has_4k_cache 1 #define cpu_has_watch 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_counter 1 #define cpu_has_divec 1 #define cpu_has_vce 0 diff --git a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h index b15307597ee3..6a1087ee8c6e 100644 --- a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h @@ -48,6 +48,7 @@ #define cpu_has_llsc 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h index d38be668e338..e1e182300fea 100644 --- a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h @@ -17,6 +17,7 @@ #define cpu_has_counter 1 #define cpu_has_watch 0 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_divec 0 #define cpu_has_cache_cdex_p 1 #define cpu_has_prefetch 0 diff --git a/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h index 92927b62b5a0..7022358057fd 100644 --- a/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h @@ -13,6 +13,7 @@ */ #define cpu_has_watch 1 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_divec 1 #define cpu_has_vce 0 #define cpu_has_cache_cdex_p 0 diff --git a/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h index 7f5144c6ce2d..b9d39dc45420 100644 --- a/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h @@ -6,6 +6,7 @@ #define cpu_has_inclusive_pcaches 0 #define cpu_has_mips16 0 +#define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h index 6b444cd9526f..ecb6c7335484 100644 --- a/arch/mips/include/asm/machine.h +++ b/arch/mips/include/asm/machine.h @@ -60,4 +60,35 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt) return NULL; } +/** + * struct mips_fdt_fixup - Describe a fixup to apply to an FDT + * @apply: applies the fixup to @fdt, returns zero on success else -errno + * @description: a short description of the fixup + * + * Describes a fixup applied to an FDT blob by the @apply function. The + * @description field provides a short description of the fixup intended for + * use in error messages if the @apply function returns non-zero. + */ +struct mips_fdt_fixup { + int (*apply)(void *fdt); + const char *description; +}; + +/** + * apply_mips_fdt_fixups() - apply fixups to an FDT blob + * @fdt_out: buffer in which to place the fixed-up FDT + * @fdt_out_size: the size of the @fdt_out buffer + * @fdt_in: the FDT blob + * @fixups: pointer to an array of fixups to be applied + * + * Loop through the array of fixups pointed to by @fixups, calling the apply + * function on each until either one returns an error or we reach the end of + * the list as indicated by an entry with a NULL apply field. + * + * Return: zero on success, else -errno + */ +extern int __init apply_mips_fdt_fixups(void *fdt_out, size_t fdt_out_size, + const void *fdt_in, + const struct mips_fdt_fixup *fixups); + #endif /* __MIPS_ASM_MACHINE_H__ */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 6875b69f59f7..dbb0eceda2c6 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -652,6 +652,7 @@ #define MIPS_CONF5_SBRI (_ULCAST_(1) << 6) #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) +#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) #define MIPS_CONF5_CV (_ULCAST_(1) << 29) diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index 702c273e67a9..e51add184717 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -47,8 +47,8 @@ typedef struct { #define Elf_Mips_Rel Elf32_Rel #define Elf_Mips_Rela Elf32_Rela -#define ELF_MIPS_R_SYM(rel) ELF32_R_SYM(rel.r_info) -#define ELF_MIPS_R_TYPE(rel) ELF32_R_TYPE(rel.r_info) +#define ELF_MIPS_R_SYM(rel) ELF32_R_SYM((rel).r_info) +#define ELF_MIPS_R_TYPE(rel) ELF32_R_TYPE((rel).r_info) #endif @@ -65,8 +65,8 @@ typedef struct { #define Elf_Mips_Rel Elf64_Mips_Rel #define Elf_Mips_Rela Elf64_Mips_Rela -#define ELF_MIPS_R_SYM(rel) (rel.r_sym) -#define ELF_MIPS_R_TYPE(rel) (rel.r_type) +#define ELF_MIPS_R_SYM(rel) ((rel).r_sym) +#define ELF_MIPS_R_TYPE(rel) ((rel).r_type) #endif diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index a1bdb1ea5234..39b9f311c4ef 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -116,7 +116,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { pud_t *pud; - pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER); + pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER); if (pud) pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); return pud; diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 98a117a05fbc..bab3d41e5987 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -47,7 +47,7 @@ extern int __cpu_logical_map[NR_CPUS]; /* Mask of CPUs which are currently definitely operating coherently */ extern cpumask_t cpu_coherent_mask; -extern void asmlinkage smp_bootstrap(void); +extern asmlinkage void smp_bootstrap(void); extern void calculate_cpu_foreign_map(void); diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index a8df44d60607..a7d21da16b6a 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -9,431 +9,9 @@ #ifndef _ASM_SPINLOCK_H #define _ASM_SPINLOCK_H -#include <linux/compiler.h> - -#include <asm/barrier.h> #include <asm/processor.h> -#include <asm/compiler.h> -#include <asm/war.h> - -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - * - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * These are fair FIFO ticket locks - * - * (the type definitions are in asm/spinlock_types.h) - */ - - -/* - * Ticket locks are conceptually two parts, one indicating the current head of - * the queue, and the other indicating the current tail. The lock is acquired - * by atomically noting the tail and incrementing it by one (thus adding - * ourself to the queue and noting our position), then waiting until the head - * becomes equal to the the initial value of the tail. - */ - -static inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - u32 counters = ACCESS_ONCE(lock->lock); - - return ((counters >> 16) ^ counters) & 0xffff; -} - -static inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return lock.h.serving_now == lock.h.ticket; -} - -#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) - -static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - u16 owner = READ_ONCE(lock->h.serving_now); - smp_rmb(); - for (;;) { - arch_spinlock_t tmp = READ_ONCE(*lock); - - if (tmp.h.serving_now == tmp.h.ticket || - tmp.h.serving_now != owner) - break; - - cpu_relax(); - } - smp_acquire__after_ctrl_dep(); -} - -static inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - u32 counters = ACCESS_ONCE(lock->lock); - - return (((counters >> 16) - counters) & 0xffff) > 1; -} -#define arch_spin_is_contended arch_spin_is_contended - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - int my_ticket; - int tmp; - int inc = 0x10000; - - if (R10000_LLSC_WAR) { - __asm__ __volatile__ ( - " .set push # arch_spin_lock \n" - " .set noreorder \n" - " \n" - "1: ll %[ticket], %[ticket_ptr] \n" - " addu %[my_ticket], %[ticket], %[inc] \n" - " sc %[my_ticket], %[ticket_ptr] \n" - " beqzl %[my_ticket], 1b \n" - " nop \n" - " srl %[my_ticket], %[ticket], 16 \n" - " andi %[ticket], %[ticket], 0xffff \n" - " bne %[ticket], %[my_ticket], 4f \n" - " subu %[ticket], %[my_ticket], %[ticket] \n" - "2: \n" - " .subsection 2 \n" - "4: andi %[ticket], %[ticket], 0xffff \n" - " sll %[ticket], 5 \n" - " \n" - "6: bnez %[ticket], 6b \n" - " subu %[ticket], 1 \n" - " \n" - " lhu %[ticket], %[serving_now_ptr] \n" - " beq %[ticket], %[my_ticket], 2b \n" - " subu %[ticket], %[my_ticket], %[ticket] \n" - " b 4b \n" - " subu %[ticket], %[ticket], 1 \n" - " .previous \n" - " .set pop \n" - : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), - [serving_now_ptr] "+m" (lock->h.serving_now), - [ticket] "=&r" (tmp), - [my_ticket] "=&r" (my_ticket) - : [inc] "r" (inc)); - } else { - __asm__ __volatile__ ( - " .set push # arch_spin_lock \n" - " .set noreorder \n" - " \n" - "1: ll %[ticket], %[ticket_ptr] \n" - " addu %[my_ticket], %[ticket], %[inc] \n" - " sc %[my_ticket], %[ticket_ptr] \n" - " beqz %[my_ticket], 1b \n" - " srl %[my_ticket], %[ticket], 16 \n" - " andi %[ticket], %[ticket], 0xffff \n" - " bne %[ticket], %[my_ticket], 4f \n" - " subu %[ticket], %[my_ticket], %[ticket] \n" - "2: .insn \n" - " .subsection 2 \n" - "4: andi %[ticket], %[ticket], 0xffff \n" - " sll %[ticket], 5 \n" - " \n" - "6: bnez %[ticket], 6b \n" - " subu %[ticket], 1 \n" - " \n" - " lhu %[ticket], %[serving_now_ptr] \n" - " beq %[ticket], %[my_ticket], 2b \n" - " subu %[ticket], %[my_ticket], %[ticket] \n" - " b 4b \n" - " subu %[ticket], %[ticket], 1 \n" - " .previous \n" - " .set pop \n" - : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), - [serving_now_ptr] "+m" (lock->h.serving_now), - [ticket] "=&r" (tmp), - [my_ticket] "=&r" (my_ticket) - : [inc] "r" (inc)); - } - - smp_llsc_mb(); -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - unsigned int serving_now = lock->h.serving_now + 1; - wmb(); - lock->h.serving_now = (u16)serving_now; - nudge_writes(); -} - -static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) -{ - int tmp, tmp2, tmp3; - int inc = 0x10000; - - if (R10000_LLSC_WAR) { - __asm__ __volatile__ ( - " .set push # arch_spin_trylock \n" - " .set noreorder \n" - " \n" - "1: ll %[ticket], %[ticket_ptr] \n" - " srl %[my_ticket], %[ticket], 16 \n" - " andi %[now_serving], %[ticket], 0xffff \n" - " bne %[my_ticket], %[now_serving], 3f \n" - " addu %[ticket], %[ticket], %[inc] \n" - " sc %[ticket], %[ticket_ptr] \n" - " beqzl %[ticket], 1b \n" - " li %[ticket], 1 \n" - "2: \n" - " .subsection 2 \n" - "3: b 2b \n" - " li %[ticket], 0 \n" - " .previous \n" - " .set pop \n" - : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), - [ticket] "=&r" (tmp), - [my_ticket] "=&r" (tmp2), - [now_serving] "=&r" (tmp3) - : [inc] "r" (inc)); - } else { - __asm__ __volatile__ ( - " .set push # arch_spin_trylock \n" - " .set noreorder \n" - " \n" - "1: ll %[ticket], %[ticket_ptr] \n" - " srl %[my_ticket], %[ticket], 16 \n" - " andi %[now_serving], %[ticket], 0xffff \n" - " bne %[my_ticket], %[now_serving], 3f \n" - " addu %[ticket], %[ticket], %[inc] \n" - " sc %[ticket], %[ticket_ptr] \n" - " beqz %[ticket], 1b \n" - " li %[ticket], 1 \n" - "2: .insn \n" - " .subsection 2 \n" - "3: b 2b \n" - " li %[ticket], 0 \n" - " .previous \n" - " .set pop \n" - : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), - [ticket] "=&r" (tmp), - [my_ticket] "=&r" (tmp2), - [now_serving] "=&r" (tmp3) - : [inc] "r" (inc)); - } - - smp_llsc_mb(); - - return tmp; -} - -/* - * Read-write spinlocks, allowing multiple readers but only one writer. - * - * NOTE! it is quite common to have readers in interrupts but no interrupt - * writers. For those circumstances we can "mix" irq-safe locks - any writer - * needs to get a irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - */ - -/* - * read_can_lock - would read_trylock() succeed? - * @lock: the rwlock in question. - */ -#define arch_read_can_lock(rw) ((rw)->lock >= 0) - -/* - * write_can_lock - would write_trylock() succeed? - * @lock: the rwlock in question. - */ -#define arch_write_can_lock(rw) (!(rw)->lock) - -static inline void arch_read_lock(arch_rwlock_t *rw) -{ - unsigned int tmp; - - if (R10000_LLSC_WAR) { - __asm__ __volatile__( - " .set noreorder # arch_read_lock \n" - "1: ll %1, %2 \n" - " bltz %1, 1b \n" - " addu %1, 1 \n" - " sc %1, %0 \n" - " beqzl %1, 1b \n" - " nop \n" - " .set reorder \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } else { - do { - __asm__ __volatile__( - "1: ll %1, %2 # arch_read_lock \n" - " bltz %1, 1b \n" - " addu %1, 1 \n" - "2: sc %1, %0 \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } while (unlikely(!tmp)); - } - - smp_llsc_mb(); -} - -static inline void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned int tmp; - - smp_mb__before_llsc(); - - if (R10000_LLSC_WAR) { - __asm__ __volatile__( - "1: ll %1, %2 # arch_read_unlock \n" - " addiu %1, -1 \n" - " sc %1, %0 \n" - " beqzl %1, 1b \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } else { - do { - __asm__ __volatile__( - "1: ll %1, %2 # arch_read_unlock \n" - " addiu %1, -1 \n" - " sc %1, %0 \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } while (unlikely(!tmp)); - } -} - -static inline void arch_write_lock(arch_rwlock_t *rw) -{ - unsigned int tmp; - - if (R10000_LLSC_WAR) { - __asm__ __volatile__( - " .set noreorder # arch_write_lock \n" - "1: ll %1, %2 \n" - " bnez %1, 1b \n" - " lui %1, 0x8000 \n" - " sc %1, %0 \n" - " beqzl %1, 1b \n" - " nop \n" - " .set reorder \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } else { - do { - __asm__ __volatile__( - "1: ll %1, %2 # arch_write_lock \n" - " bnez %1, 1b \n" - " lui %1, 0x8000 \n" - "2: sc %1, %0 \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } while (unlikely(!tmp)); - } - - smp_llsc_mb(); -} - -static inline void arch_write_unlock(arch_rwlock_t *rw) -{ - smp_mb__before_llsc(); - - __asm__ __volatile__( - " # arch_write_unlock \n" - " sw $0, %0 \n" - : "=m" (rw->lock) - : "m" (rw->lock) - : "memory"); -} - -static inline int arch_read_trylock(arch_rwlock_t *rw) -{ - unsigned int tmp; - int ret; - - if (R10000_LLSC_WAR) { - __asm__ __volatile__( - " .set noreorder # arch_read_trylock \n" - " li %2, 0 \n" - "1: ll %1, %3 \n" - " bltz %1, 2f \n" - " addu %1, 1 \n" - " sc %1, %0 \n" - " .set reorder \n" - " beqzl %1, 1b \n" - " nop \n" - __WEAK_LLSC_MB - " li %2, 1 \n" - "2: \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } else { - __asm__ __volatile__( - " .set noreorder # arch_read_trylock \n" - " li %2, 0 \n" - "1: ll %1, %3 \n" - " bltz %1, 2f \n" - " addu %1, 1 \n" - " sc %1, %0 \n" - " beqz %1, 1b \n" - " nop \n" - " .set reorder \n" - __WEAK_LLSC_MB - " li %2, 1 \n" - "2: .insn \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } - - return ret; -} - -static inline int arch_write_trylock(arch_rwlock_t *rw) -{ - unsigned int tmp; - int ret; - - if (R10000_LLSC_WAR) { - __asm__ __volatile__( - " .set noreorder # arch_write_trylock \n" - " li %2, 0 \n" - "1: ll %1, %3 \n" - " bnez %1, 2f \n" - " lui %1, 0x8000 \n" - " sc %1, %0 \n" - " beqzl %1, 1b \n" - " nop \n" - __WEAK_LLSC_MB - " li %2, 1 \n" - " .set reorder \n" - "2: \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } else { - do { - __asm__ __volatile__( - " ll %1, %3 # arch_write_trylock \n" - " li %2, 0 \n" - " bnez %1, 2f \n" - " lui %1, 0x8000 \n" - " sc %1, %0 \n" - " li %2, 1 \n" - "2: .insn \n" - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), - "=&r" (ret) - : GCC_OFF_SMALL_ASM() (rw->lock) - : "memory"); - } while (unlikely(!tmp)); - - smp_llsc_mb(); - } - - return ret; -} +#include <asm/qrwlock.h> +#include <asm/qspinlock.h> #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index 9b2528e612c0..177e722eb96c 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -1,37 +1,7 @@ #ifndef _ASM_SPINLOCK_TYPES_H #define _ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - -#include <linux/types.h> - -#include <asm/byteorder.h> - -typedef union { - /* - * bits 0..15 : serving_now - * bits 16..31 : ticket - */ - u32 lock; - struct { -#ifdef __BIG_ENDIAN - u16 ticket; - u16 serving_now; -#else - u16 serving_now; - u16 ticket; -#endif - } h; -} arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0 } - -typedef struct { - volatile unsigned int lock; -} arch_rwlock_t; - -#define __ARCH_RW_LOCK_UNLOCKED { 0 } +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> #endif diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index d87882513ee3..7c713025b23f 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -85,7 +85,7 @@ static inline void syscall_set_return_value(struct task_struct *task, { if (error) { regs->regs[2] = -error; - regs->regs[7] = -1; + regs->regs[7] = 1; } else { regs->regs[2] = val; regs->regs[7] = 0; diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 9700251159b1..b71306947290 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -497,283 +497,6 @@ do { \ extern void __put_user_unknown(void); /* - * ul{b,h,w} are macros and there are no equivalent macros for EVA. - * EVA unaligned access is handled in the ADE exception handler. - */ -#ifndef CONFIG_EVA -/* - * put_user_unaligned: - Write a simple value into user space. - * @x: Value to copy to user space. - * @ptr: Destination address, in user space. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * This macro copies a single simple value from kernel space to user - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and @x must be assignable - * to the result of dereferencing @ptr. - * - * Returns zero on success, or -EFAULT on error. - */ -#define put_user_unaligned(x,ptr) \ - __put_user_unaligned_check((x),(ptr),sizeof(*(ptr))) - -/* - * get_user_unaligned: - Get a simple variable from user space. - * @x: Variable to store result. - * @ptr: Source address, in user space. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * This macro copies a single simple variable from user space to kernel - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and the result of - * dereferencing @ptr must be assignable to @x without a cast. - * - * Returns zero on success, or -EFAULT on error. - * On error, the variable @x is set to zero. - */ -#define get_user_unaligned(x,ptr) \ - __get_user_unaligned_check((x),(ptr),sizeof(*(ptr))) - -/* - * __put_user_unaligned: - Write a simple value into user space, with less checking. - * @x: Value to copy to user space. - * @ptr: Destination address, in user space. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * This macro copies a single simple value from kernel space to user - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and @x must be assignable - * to the result of dereferencing @ptr. - * - * Caller must check the pointer with access_ok() before calling this - * function. - * - * Returns zero on success, or -EFAULT on error. - */ -#define __put_user_unaligned(x,ptr) \ - __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) - -/* - * __get_user_unaligned: - Get a simple variable from user space, with less checking. - * @x: Variable to store result. - * @ptr: Source address, in user space. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * This macro copies a single simple variable from user space to kernel - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and the result of - * dereferencing @ptr must be assignable to @x without a cast. - * - * Caller must check the pointer with access_ok() before calling this - * function. - * - * Returns zero on success, or -EFAULT on error. - * On error, the variable @x is set to zero. - */ -#define __get_user_unaligned(x,ptr) \ - __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) - -/* - * Yuck. We need two variants, one for 64bit operation and one - * for 32 bit mode and old iron. - */ -#ifdef CONFIG_32BIT -#define __GET_USER_UNALIGNED_DW(val, ptr) \ - __get_user_unaligned_asm_ll32(val, ptr) -#endif -#ifdef CONFIG_64BIT -#define __GET_USER_UNALIGNED_DW(val, ptr) \ - __get_user_unaligned_asm(val, "uld", ptr) -#endif - -extern void __get_user_unaligned_unknown(void); - -#define __get_user_unaligned_common(val, size, ptr) \ -do { \ - switch (size) { \ - case 1: __get_data_asm(val, "lb", ptr); break; \ - case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \ - case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \ - case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ - default: __get_user_unaligned_unknown(); break; \ - } \ -} while (0) - -#define __get_user_unaligned_nocheck(x,ptr,size) \ -({ \ - int __gu_err; \ - \ - __get_user_unaligned_common((x), size, ptr); \ - __gu_err; \ -}) - -#define __get_user_unaligned_check(x,ptr,size) \ -({ \ - int __gu_err = -EFAULT; \ - const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ - \ - if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ - __get_user_unaligned_common((x), size, __gu_ptr); \ - \ - __gu_err; \ -}) - -#define __get_data_unaligned_asm(val, insn, addr) \ -{ \ - long __gu_tmp; \ - \ - __asm__ __volatile__( \ - "1: " insn " %1, %3 \n" \ - "2: \n" \ - " .insn \n" \ - " .section .fixup,\"ax\" \n" \ - "3: li %0, %4 \n" \ - " move %1, $0 \n" \ - " j 2b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " "__UA_ADDR "\t1b, 3b \n" \ - " "__UA_ADDR "\t1b + 4, 3b \n" \ - " .previous \n" \ - : "=r" (__gu_err), "=r" (__gu_tmp) \ - : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ - \ - (val) = (__typeof__(*(addr))) __gu_tmp; \ -} - -/* - * Get a long long 64 using 32 bit registers. - */ -#define __get_user_unaligned_asm_ll32(val, addr) \ -{ \ - unsigned long long __gu_tmp; \ - \ - __asm__ __volatile__( \ - "1: ulw %1, (%3) \n" \ - "2: ulw %D1, 4(%3) \n" \ - " move %0, $0 \n" \ - "3: \n" \ - " .insn \n" \ - " .section .fixup,\"ax\" \n" \ - "4: li %0, %4 \n" \ - " move %1, $0 \n" \ - " move %D1, $0 \n" \ - " j 3b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " " __UA_ADDR " 1b, 4b \n" \ - " " __UA_ADDR " 1b + 4, 4b \n" \ - " " __UA_ADDR " 2b, 4b \n" \ - " " __UA_ADDR " 2b + 4, 4b \n" \ - " .previous \n" \ - : "=r" (__gu_err), "=&r" (__gu_tmp) \ - : "0" (0), "r" (addr), "i" (-EFAULT)); \ - (val) = (__typeof__(*(addr))) __gu_tmp; \ -} - -/* - * Yuck. We need two variants, one for 64bit operation and one - * for 32 bit mode and old iron. - */ -#ifdef CONFIG_32BIT -#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr) -#endif -#ifdef CONFIG_64BIT -#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) -#endif - -#define __put_user_unaligned_common(ptr, size) \ -do { \ - switch (size) { \ - case 1: __put_data_asm("sb", ptr); break; \ - case 2: __put_user_unaligned_asm("ush", ptr); break; \ - case 4: __put_user_unaligned_asm("usw", ptr); break; \ - case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \ - default: __put_user_unaligned_unknown(); break; \ -} while (0) - -#define __put_user_unaligned_nocheck(x,ptr,size) \ -({ \ - __typeof__(*(ptr)) __pu_val; \ - int __pu_err = 0; \ - \ - __pu_val = (x); \ - __put_user_unaligned_common(ptr, size); \ - __pu_err; \ -}) - -#define __put_user_unaligned_check(x,ptr,size) \ -({ \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - int __pu_err = -EFAULT; \ - \ - if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \ - __put_user_unaligned_common(__pu_addr, size); \ - \ - __pu_err; \ -}) - -#define __put_user_unaligned_asm(insn, ptr) \ -{ \ - __asm__ __volatile__( \ - "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ - "2: \n" \ - " .insn \n" \ - " .section .fixup,\"ax\" \n" \ - "3: li %0, %4 \n" \ - " j 2b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " " __UA_ADDR " 1b, 3b \n" \ - " .previous \n" \ - : "=r" (__pu_err) \ - : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ - "i" (-EFAULT)); \ -} - -#define __put_user_unaligned_asm_ll32(ptr) \ -{ \ - __asm__ __volatile__( \ - "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ - "2: sw %D2, 4(%3) \n" \ - "3: \n" \ - " .insn \n" \ - " .section .fixup,\"ax\" \n" \ - "4: li %0, %4 \n" \ - " j 3b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " " __UA_ADDR " 1b, 4b \n" \ - " " __UA_ADDR " 1b + 4, 4b \n" \ - " " __UA_ADDR " 2b, 4b \n" \ - " " __UA_ADDR " 2b + 4, 4b \n" \ - " .previous" \ - : "=r" (__pu_err) \ - : "0" (0), "r" (__pu_val), "r" (ptr), \ - "i" (-EFAULT)); \ -} - -extern void __put_user_unaligned_unknown(void); -#endif - -/* * We're generating jump to subroutines which will be outside the range of * jump instructions */ diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index 3748f4d120a5..59dae37f6b8d 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -72,9 +72,12 @@ Ip_u1u2s3(_beq); Ip_u1u2s3(_beql); Ip_u1s2(_bgez); Ip_u1s2(_bgezl); +Ip_u1s2(_bgtz); +Ip_u1s2(_blez); Ip_u1s2(_bltz); Ip_u1s2(_bltzl); Ip_u1u2s3(_bne); +Ip_u1(_break); Ip_u2s3u1(_cache); Ip_u1u2(_cfc1); Ip_u2u1(_cfcmsa); @@ -82,19 +85,28 @@ Ip_u1u2(_ctc1); Ip_u2u1(_ctcmsa); Ip_u2u1s3(_daddiu); Ip_u3u1u2(_daddu); +Ip_u1u2(_ddivu); Ip_u1(_di); Ip_u2u1msbu3(_dins); Ip_u2u1msbu3(_dinsm); +Ip_u2u1msbu3(_dinsu); Ip_u1u2(_divu); Ip_u1u2u3(_dmfc0); Ip_u1u2u3(_dmtc0); +Ip_u1u2(_dmultu); Ip_u2u1u3(_drotr); Ip_u2u1u3(_drotr32); +Ip_u2u1(_dsbh); +Ip_u2u1(_dshd); Ip_u2u1u3(_dsll); Ip_u2u1u3(_dsll32); +Ip_u3u2u1(_dsllv); Ip_u2u1u3(_dsra); +Ip_u2u1u3(_dsra32); +Ip_u3u2u1(_dsrav); Ip_u2u1u3(_dsrl); Ip_u2u1u3(_dsrl32); +Ip_u3u2u1(_dsrlv); Ip_u3u1u2(_dsubu); Ip_0(_eret); Ip_u2u1msbu3(_ext); @@ -104,6 +116,7 @@ Ip_u1(_jal); Ip_u2u1(_jalr); Ip_u1(_jr); Ip_u2s3u1(_lb); +Ip_u2s3u1(_lbu); Ip_u2s3u1(_ld); Ip_u3u1u2(_ldx); Ip_u2s3u1(_lh); @@ -112,27 +125,35 @@ Ip_u2s3u1(_ll); Ip_u2s3u1(_lld); Ip_u1s2(_lui); Ip_u2s3u1(_lw); +Ip_u2s3u1(_lwu); Ip_u3u1u2(_lwx); Ip_u1u2u3(_mfc0); Ip_u1u2u3(_mfhc0); Ip_u1(_mfhi); Ip_u1(_mflo); +Ip_u3u1u2(_movn); +Ip_u3u1u2(_movz); Ip_u1u2u3(_mtc0); Ip_u1u2u3(_mthc0); Ip_u1(_mthi); Ip_u1(_mtlo); Ip_u3u1u2(_mul); +Ip_u1u2(_multu); +Ip_u3u1u2(_nor); Ip_u3u1u2(_or); Ip_u2u1u3(_ori); Ip_u2s3u1(_pref); Ip_0(_rfe); Ip_u2u1u3(_rotr); +Ip_u2s3u1(_sb); Ip_u2s3u1(_sc); Ip_u2s3u1(_scd); Ip_u2s3u1(_sd); +Ip_u2s3u1(_sh); Ip_u2u1u3(_sll); Ip_u3u2u1(_sllv); Ip_s3s1s2(_slt); +Ip_u2u1s3(_slti); Ip_u2u1s3(_sltiu); Ip_u3u1u2(_sltu); Ip_u2u1u3(_sra); @@ -248,6 +269,15 @@ static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1, uasm_i_dsrl32(p, a1, a2, a3 - 32); } +static inline void uasm_i_dsra_safe(u32 **p, unsigned int a1, + unsigned int a2, unsigned int a3) +{ + if (a3 < 32) + uasm_i_dsra(p, a1, a2, a3); + else + uasm_i_dsra32(p, a1, a2, a3 - 32); +} + /* Handle relocations. */ struct uasm_reloc { u32 *addr; diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h index 8f4ca5dd992b..b7cd6cf77b83 100644 --- a/arch/mips/include/asm/vdso.h +++ b/arch/mips/include/asm/vdso.h @@ -79,8 +79,8 @@ union mips_vdso_data { struct { u64 xtime_sec; u64 xtime_nsec; - u32 wall_to_mono_sec; - u32 wall_to_mono_nsec; + u64 wall_to_mono_sec; + u64 wall_to_mono_nsec; u32 seq_count; u32 cs_shift; u8 clock_mode; diff --git a/arch/mips/include/asm/yamon-dt.h b/arch/mips/include/asm/yamon-dt.h new file mode 100644 index 000000000000..485cfe3e45e1 --- /dev/null +++ b/arch/mips/include/asm/yamon-dt.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_YAMON_DT_H__ +#define __MIPS_ASM_YAMON_DT_H__ + +#include <linux/types.h> + +/** + * struct yamon_mem_region - Represents a contiguous range of physical RAM. + * @start: Start physical address. + * @size: Maximum size of region. + * @discard: Length of additional memory to discard after the region. + */ +struct yamon_mem_region { + phys_addr_t start; + phys_addr_t size; + phys_addr_t discard; +}; + +/** + * yamon_dt_append_cmdline() - Append YAMON-provided command line to /chosen + * @fdt: the FDT blob + * + * Write the YAMON-provided command line to the bootargs property of the + * /chosen node in @fdt. + * + * Return: 0 on success, else -errno + */ +extern __init int yamon_dt_append_cmdline(void *fdt); + +/** + * yamon_dt_append_memory() - Append YAMON-provided memory info to /memory + * @fdt: the FDT blob + * @regions: zero size terminated array of physical memory regions + * + * Generate a /memory node in @fdt based upon memory size information provided + * by YAMON in its environment and the @regions array. + * + * Return: 0 on success, else -errno + */ +extern __init int yamon_dt_append_memory(void *fdt, + const struct yamon_mem_region *regions); + +/** + * yamon_dt_serial_config() - Append YAMON-provided serial config to /chosen + * @fdt: the FDT blob + * + * Generate a stdout-path property in the /chosen node of @fdt, based upon + * information provided in the YAMON environment about the UART configuration + * of the system. + * + * Return: 0 on success, else -errno + */ +extern __init int yamon_dt_serial_config(void *fdt); + +#endif /* __MIPS_ASM_YAMON_DT_H__ */ |