summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/Kbuild5
-rw-r--r--arch/mips/include/asm/asm-eva.h135
-rw-r--r--arch/mips/include/asm/asm.h13
-rw-r--r--arch/mips/include/asm/asmmacro-32.h128
-rw-r--r--arch/mips/include/asm/asmmacro.h345
-rw-r--r--arch/mips/include/asm/atomic.h40
-rw-r--r--arch/mips/include/asm/bitops.h28
-rw-r--r--arch/mips/include/asm/bootinfo.h26
-rw-r--r--arch/mips/include/asm/checksum.h44
-rw-r--r--arch/mips/include/asm/cmpxchg.h20
-rw-r--r--arch/mips/include/asm/cpu-features.h10
-rw-r--r--arch/mips/include/asm/cpu-info.h28
-rw-r--r--arch/mips/include/asm/cpu-type.h6
-rw-r--r--arch/mips/include/asm/cpu.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h5
-rw-r--r--arch/mips/include/asm/fpu.h4
-rw-r--r--arch/mips/include/asm/ftrace.h20
-rw-r--r--arch/mips/include/asm/futex.h25
-rw-r--r--arch/mips/include/asm/fw/fw.h2
-rw-r--r--arch/mips/include/asm/gcmpregs.h125
-rw-r--r--arch/mips/include/asm/gic.h3
-rw-r--r--arch/mips/include/asm/io.h8
-rw-r--r--arch/mips/include/asm/kvm_host.h417
-rw-r--r--arch/mips/include/asm/local.h8
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h12
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h8
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1200.h91
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1300.h40
-rw-r--r--arch/mips/include/asm/mach-loongson/boot_param.h163
-rw-r--r--arch/mips/include/asm/mach-loongson/dma-coherence.h22
-rw-r--r--arch/mips/include/asm/mach-loongson/irq.h44
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h28
-rw-r--r--arch/mips/include/asm/mach-loongson/machine.h6
-rw-r--r--arch/mips/include/asm/mach-loongson/pci.h5
-rw-r--r--arch/mips/include/asm/mach-loongson/spaces.h9
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h115
-rw-r--r--arch/mips/include/asm/mach-malta/spaces.h46
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h12
-rw-r--r--arch/mips/include/asm/mips-boards/malta.h5
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h5
-rw-r--r--arch/mips/include/asm/mips-cm.h322
-rw-r--r--arch/mips/include/asm/mips-cpc.h150
-rw-r--r--arch/mips/include/asm/mips_mt.h5
-rw-r--r--arch/mips/include/asm/mipsmtregs.h11
-rw-r--r--arch/mips/include/asm/mipsregs.h22
-rw-r--r--arch/mips/include/asm/module.h2
-rw-r--r--arch/mips/include/asm/msa.h203
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/pgtable-bits.h9
-rw-r--r--arch/mips/include/asm/processor.h45
-rw-r--r--arch/mips/include/asm/ptrace.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h175
-rw-r--r--arch/mips/include/asm/sigcontext.h2
-rw-r--r--arch/mips/include/asm/smp-cps.h33
-rw-r--r--arch/mips/include/asm/smp-ops.h17
-rw-r--r--arch/mips/include/asm/smp.h1
-rw-r--r--arch/mips/include/asm/stackframe.h2
-rw-r--r--arch/mips/include/asm/switch_to.h22
-rw-r--r--arch/mips/include/asm/syscall.h42
-rw-r--r--arch/mips/include/asm/thread_info.h7
-rw-r--r--arch/mips/include/asm/topology.h4
-rw-r--r--arch/mips/include/asm/uaccess.h559
-rw-r--r--arch/mips/include/asm/unistd.h1
63 files changed, 2831 insertions, 878 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 2d7f65052c1f..05439187891d 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -2,16 +2,17 @@
generic-y += cputime.h
generic-y += current.h
generic-y += emergency-restart.h
+generic-y += hash.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += mutex.h
generic-y += parport.h
generic-y += percpu.h
+generic-y += preempt.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
generic-y += trace_clock.h
-generic-y += preempt.h
generic-y += ucontext.h
generic-y += xor.h
-generic-y += hash.h
diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
new file mode 100644
index 000000000000..e41c56e375b1
--- /dev/null
+++ b/arch/mips/include/asm/asm-eva.h
@@ -0,0 +1,135 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef __ASM_ASM_EVA_H
+#define __ASM_ASM_EVA_H
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_EVA
+
+#define __BUILD_EVA_INSN(insn, reg, addr) \
+ " .set push\n" \
+ " .set mips0\n" \
+ " .set eva\n" \
+ " "insn" "reg", "addr "\n" \
+ " .set pop\n"
+
+#define user_cache(op, base) __BUILD_EVA_INSN("cachee", op, base)
+#define user_ll(reg, addr) __BUILD_EVA_INSN("lle", reg, addr)
+#define user_sc(reg, addr) __BUILD_EVA_INSN("sce", reg, addr)
+#define user_lw(reg, addr) __BUILD_EVA_INSN("lwe", reg, addr)
+#define user_lwl(reg, addr) __BUILD_EVA_INSN("lwle", reg, addr)
+#define user_lwr(reg, addr) __BUILD_EVA_INSN("lwre", reg, addr)
+#define user_lh(reg, addr) __BUILD_EVA_INSN("lhe", reg, addr)
+#define user_lb(reg, addr) __BUILD_EVA_INSN("lbe", reg, addr)
+#define user_lbu(reg, addr) __BUILD_EVA_INSN("lbue", reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_ld(reg, addr) user_lw(reg, addr)
+#define user_sw(reg, addr) __BUILD_EVA_INSN("swe", reg, addr)
+#define user_swl(reg, addr) __BUILD_EVA_INSN("swle", reg, addr)
+#define user_swr(reg, addr) __BUILD_EVA_INSN("swre", reg, addr)
+#define user_sh(reg, addr) __BUILD_EVA_INSN("she", reg, addr)
+#define user_sb(reg, addr) __BUILD_EVA_INSN("sbe", reg, addr)
+/* No 64-bit EVA instruction for storing double words */
+#define user_sd(reg, addr) user_sw(reg, addr)
+
+#else
+
+#define user_cache(op, base) "cache " op ", " base "\n"
+#define user_ll(reg, addr) "ll " reg ", " addr "\n"
+#define user_sc(reg, addr) "sc " reg ", " addr "\n"
+#define user_lw(reg, addr) "lw " reg ", " addr "\n"
+#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
+#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
+#define user_lh(reg, addr) "lh " reg ", " addr "\n"
+#define user_lb(reg, addr) "lb " reg ", " addr "\n"
+#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
+#define user_sw(reg, addr) "sw " reg ", " addr "\n"
+#define user_swl(reg, addr) "swl " reg ", " addr "\n"
+#define user_swr(reg, addr) "swr " reg ", " addr "\n"
+#define user_sh(reg, addr) "sh " reg ", " addr "\n"
+#define user_sb(reg, addr) "sb " reg ", " addr "\n"
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define user_sd(reg, addr) user_sw(reg, addr)
+#define user_ld(reg, addr) user_lw(reg, addr)
+#else
+#define user_sd(reg, addr) "sd " reg", " addr "\n"
+#define user_ld(reg, addr) "ld " reg", " addr "\n"
+#endif /* CONFIG_32BIT */
+
+#endif /* CONFIG_EVA */
+
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_EVA
+
+#define __BUILD_EVA_INSN(insn, reg, addr) \
+ .set push; \
+ .set mips0; \
+ .set eva; \
+ insn reg, addr; \
+ .set pop;
+
+#define user_cache(op, base) __BUILD_EVA_INSN(cachee, op, base)
+#define user_ll(reg, addr) __BUILD_EVA_INSN(lle, reg, addr)
+#define user_sc(reg, addr) __BUILD_EVA_INSN(sce, reg, addr)
+#define user_lw(reg, addr) __BUILD_EVA_INSN(lwe, reg, addr)
+#define user_lwl(reg, addr) __BUILD_EVA_INSN(lwle, reg, addr)
+#define user_lwr(reg, addr) __BUILD_EVA_INSN(lwre, reg, addr)
+#define user_lh(reg, addr) __BUILD_EVA_INSN(lhe, reg, addr)
+#define user_lb(reg, addr) __BUILD_EVA_INSN(lbe, reg, addr)
+#define user_lbu(reg, addr) __BUILD_EVA_INSN(lbue, reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_ld(reg, addr) user_lw(reg, addr)
+#define user_sw(reg, addr) __BUILD_EVA_INSN(swe, reg, addr)
+#define user_swl(reg, addr) __BUILD_EVA_INSN(swle, reg, addr)
+#define user_swr(reg, addr) __BUILD_EVA_INSN(swre, reg, addr)
+#define user_sh(reg, addr) __BUILD_EVA_INSN(she, reg, addr)
+#define user_sb(reg, addr) __BUILD_EVA_INSN(sbe, reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_sd(reg, addr) user_sw(reg, addr)
+#else
+
+#define user_cache(op, base) cache op, base
+#define user_ll(reg, addr) ll reg, addr
+#define user_sc(reg, addr) sc reg, addr
+#define user_lw(reg, addr) lw reg, addr
+#define user_lwl(reg, addr) lwl reg, addr
+#define user_lwr(reg, addr) lwr reg, addr
+#define user_lh(reg, addr) lh reg, addr
+#define user_lb(reg, addr) lb reg, addr
+#define user_lbu(reg, addr) lbu reg, addr
+#define user_sw(reg, addr) sw reg, addr
+#define user_swl(reg, addr) swl reg, addr
+#define user_swr(reg, addr) swr reg, addr
+#define user_sh(reg, addr) sh reg, addr
+#define user_sb(reg, addr) sb reg, addr
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define user_sd(reg, addr) user_sw(reg, addr)
+#define user_ld(reg, addr) user_lw(reg, addr)
+#else
+#define user_sd(reg, addr) sd reg, addr
+#define user_ld(reg, addr) ld reg, addr
+#endif /* CONFIG_32BIT */
+
+#endif /* CONFIG_EVA */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ASM_EVA_H */
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 879691d194af..7c26b28bf252 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -18,6 +18,7 @@
#define __ASM_ASM_H
#include <asm/sgidefs.h>
+#include <asm/asm-eva.h>
#ifndef CAT
#ifdef __STDC__
@@ -145,19 +146,27 @@ symbol = value
#define PREF(hint,addr) \
.set push; \
- .set mips4; \
+ .set arch=r5000; \
pref hint, addr; \
.set pop
+#define PREFE(hint, addr) \
+ .set push; \
+ .set mips0; \
+ .set eva; \
+ prefe hint, addr; \
+ .set pop
+
#define PREFX(hint,addr) \
.set push; \
- .set mips4; \
+ .set arch=r5000; \
prefx hint, addr; \
.set pop
#else /* !CONFIG_CPU_HAS_PREFETCH */
#define PREF(hint, addr)
+#define PREFE(hint, addr)
#define PREFX(hint, addr)
#endif /* !CONFIG_CPU_HAS_PREFETCH */
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index 70e1f176f123..e38c2811d4e2 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -14,75 +14,75 @@
.macro fpu_save_single thread tmp=t0
cfc1 \tmp, fcr31
- swc1 $f0, THREAD_FPR0(\thread)
- swc1 $f1, THREAD_FPR1(\thread)
- swc1 $f2, THREAD_FPR2(\thread)
- swc1 $f3, THREAD_FPR3(\thread)
- swc1 $f4, THREAD_FPR4(\thread)
- swc1 $f5, THREAD_FPR5(\thread)
- swc1 $f6, THREAD_FPR6(\thread)
- swc1 $f7, THREAD_FPR7(\thread)
- swc1 $f8, THREAD_FPR8(\thread)
- swc1 $f9, THREAD_FPR9(\thread)
- swc1 $f10, THREAD_FPR10(\thread)
- swc1 $f11, THREAD_FPR11(\thread)
- swc1 $f12, THREAD_FPR12(\thread)
- swc1 $f13, THREAD_FPR13(\thread)
- swc1 $f14, THREAD_FPR14(\thread)
- swc1 $f15, THREAD_FPR15(\thread)
- swc1 $f16, THREAD_FPR16(\thread)
- swc1 $f17, THREAD_FPR17(\thread)
- swc1 $f18, THREAD_FPR18(\thread)
- swc1 $f19, THREAD_FPR19(\thread)
- swc1 $f20, THREAD_FPR20(\thread)
- swc1 $f21, THREAD_FPR21(\thread)
- swc1 $f22, THREAD_FPR22(\thread)
- swc1 $f23, THREAD_FPR23(\thread)
- swc1 $f24, THREAD_FPR24(\thread)
- swc1 $f25, THREAD_FPR25(\thread)
- swc1 $f26, THREAD_FPR26(\thread)
- swc1 $f27, THREAD_FPR27(\thread)
- swc1 $f28, THREAD_FPR28(\thread)
- swc1 $f29, THREAD_FPR29(\thread)
- swc1 $f30, THREAD_FPR30(\thread)
- swc1 $f31, THREAD_FPR31(\thread)
+ swc1 $f0, THREAD_FPR0_LS64(\thread)
+ swc1 $f1, THREAD_FPR1_LS64(\thread)
+ swc1 $f2, THREAD_FPR2_LS64(\thread)
+ swc1 $f3, THREAD_FPR3_LS64(\thread)
+ swc1 $f4, THREAD_FPR4_LS64(\thread)
+ swc1 $f5, THREAD_FPR5_LS64(\thread)
+ swc1 $f6, THREAD_FPR6_LS64(\thread)
+ swc1 $f7, THREAD_FPR7_LS64(\thread)
+ swc1 $f8, THREAD_FPR8_LS64(\thread)
+ swc1 $f9, THREAD_FPR9_LS64(\thread)
+ swc1 $f10, THREAD_FPR10_LS64(\thread)
+ swc1 $f11, THREAD_FPR11_LS64(\thread)
+ swc1 $f12, THREAD_FPR12_LS64(\thread)
+ swc1 $f13, THREAD_FPR13_LS64(\thread)
+ swc1 $f14, THREAD_FPR14_LS64(\thread)
+ swc1 $f15, THREAD_FPR15_LS64(\thread)
+ swc1 $f16, THREAD_FPR16_LS64(\thread)
+ swc1 $f17, THREAD_FPR17_LS64(\thread)
+ swc1 $f18, THREAD_FPR18_LS64(\thread)
+ swc1 $f19, THREAD_FPR19_LS64(\thread)
+ swc1 $f20, THREAD_FPR20_LS64(\thread)
+ swc1 $f21, THREAD_FPR21_LS64(\thread)
+ swc1 $f22, THREAD_FPR22_LS64(\thread)
+ swc1 $f23, THREAD_FPR23_LS64(\thread)
+ swc1 $f24, THREAD_FPR24_LS64(\thread)
+ swc1 $f25, THREAD_FPR25_LS64(\thread)
+ swc1 $f26, THREAD_FPR26_LS64(\thread)
+ swc1 $f27, THREAD_FPR27_LS64(\thread)
+ swc1 $f28, THREAD_FPR28_LS64(\thread)
+ swc1 $f29, THREAD_FPR29_LS64(\thread)
+ swc1 $f30, THREAD_FPR30_LS64(\thread)
+ swc1 $f31, THREAD_FPR31_LS64(\thread)
sw \tmp, THREAD_FCR31(\thread)
.endm
.macro fpu_restore_single thread tmp=t0
lw \tmp, THREAD_FCR31(\thread)
- lwc1 $f0, THREAD_FPR0(\thread)
- lwc1 $f1, THREAD_FPR1(\thread)
- lwc1 $f2, THREAD_FPR2(\thread)
- lwc1 $f3, THREAD_FPR3(\thread)
- lwc1 $f4, THREAD_FPR4(\thread)
- lwc1 $f5, THREAD_FPR5(\thread)
- lwc1 $f6, THREAD_FPR6(\thread)
- lwc1 $f7, THREAD_FPR7(\thread)
- lwc1 $f8, THREAD_FPR8(\thread)
- lwc1 $f9, THREAD_FPR9(\thread)
- lwc1 $f10, THREAD_FPR10(\thread)
- lwc1 $f11, THREAD_FPR11(\thread)
- lwc1 $f12, THREAD_FPR12(\thread)
- lwc1 $f13, THREAD_FPR13(\thread)
- lwc1 $f14, THREAD_FPR14(\thread)
- lwc1 $f15, THREAD_FPR15(\thread)
- lwc1 $f16, THREAD_FPR16(\thread)
- lwc1 $f17, THREAD_FPR17(\thread)
- lwc1 $f18, THREAD_FPR18(\thread)
- lwc1 $f19, THREAD_FPR19(\thread)
- lwc1 $f20, THREAD_FPR20(\thread)
- lwc1 $f21, THREAD_FPR21(\thread)
- lwc1 $f22, THREAD_FPR22(\thread)
- lwc1 $f23, THREAD_FPR23(\thread)
- lwc1 $f24, THREAD_FPR24(\thread)
- lwc1 $f25, THREAD_FPR25(\thread)
- lwc1 $f26, THREAD_FPR26(\thread)
- lwc1 $f27, THREAD_FPR27(\thread)
- lwc1 $f28, THREAD_FPR28(\thread)
- lwc1 $f29, THREAD_FPR29(\thread)
- lwc1 $f30, THREAD_FPR30(\thread)
- lwc1 $f31, THREAD_FPR31(\thread)
+ lwc1 $f0, THREAD_FPR0_LS64(\thread)
+ lwc1 $f1, THREAD_FPR1_LS64(\thread)
+ lwc1 $f2, THREAD_FPR2_LS64(\thread)
+ lwc1 $f3, THREAD_FPR3_LS64(\thread)
+ lwc1 $f4, THREAD_FPR4_LS64(\thread)
+ lwc1 $f5, THREAD_FPR5_LS64(\thread)
+ lwc1 $f6, THREAD_FPR6_LS64(\thread)
+ lwc1 $f7, THREAD_FPR7_LS64(\thread)
+ lwc1 $f8, THREAD_FPR8_LS64(\thread)
+ lwc1 $f9, THREAD_FPR9_LS64(\thread)
+ lwc1 $f10, THREAD_FPR10_LS64(\thread)
+ lwc1 $f11, THREAD_FPR11_LS64(\thread)
+ lwc1 $f12, THREAD_FPR12_LS64(\thread)
+ lwc1 $f13, THREAD_FPR13_LS64(\thread)
+ lwc1 $f14, THREAD_FPR14_LS64(\thread)
+ lwc1 $f15, THREAD_FPR15_LS64(\thread)
+ lwc1 $f16, THREAD_FPR16_LS64(\thread)
+ lwc1 $f17, THREAD_FPR17_LS64(\thread)
+ lwc1 $f18, THREAD_FPR18_LS64(\thread)
+ lwc1 $f19, THREAD_FPR19_LS64(\thread)
+ lwc1 $f20, THREAD_FPR20_LS64(\thread)
+ lwc1 $f21, THREAD_FPR21_LS64(\thread)
+ lwc1 $f22, THREAD_FPR22_LS64(\thread)
+ lwc1 $f23, THREAD_FPR23_LS64(\thread)
+ lwc1 $f24, THREAD_FPR24_LS64(\thread)
+ lwc1 $f25, THREAD_FPR25_LS64(\thread)
+ lwc1 $f26, THREAD_FPR26_LS64(\thread)
+ lwc1 $f27, THREAD_FPR27_LS64(\thread)
+ lwc1 $f28, THREAD_FPR28_LS64(\thread)
+ lwc1 $f29, THREAD_FPR29_LS64(\thread)
+ lwc1 $f30, THREAD_FPR30_LS64(\thread)
+ lwc1 $f31, THREAD_FPR31_LS64(\thread)
ctc1 \tmp, fcr31
.endm
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 3220c93ea981..b464b8b1147a 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -9,6 +9,7 @@
#define _ASM_ASMMACRO_H
#include <asm/hazards.h>
+#include <asm/asm-offsets.h>
#ifdef CONFIG_32BIT
#include <asm/asmmacro-32.h>
@@ -54,59 +55,69 @@
.endm
.macro local_irq_disable reg=t0
+#ifdef CONFIG_PREEMPT
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, 1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
mfc0 \reg, CP0_STATUS
ori \reg, \reg, 1
xori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_disable_hazard
+#ifdef CONFIG_PREEMPT
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, -1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
.endm
#endif /* CONFIG_MIPS_MT_SMTC */
.macro fpu_save_16even thread tmp=t0
cfc1 \tmp, fcr31
- sdc1 $f0, THREAD_FPR0(\thread)
- sdc1 $f2, THREAD_FPR2(\thread)
- sdc1 $f4, THREAD_FPR4(\thread)
- sdc1 $f6, THREAD_FPR6(\thread)
- sdc1 $f8, THREAD_FPR8(\thread)
- sdc1 $f10, THREAD_FPR10(\thread)
- sdc1 $f12, THREAD_FPR12(\thread)
- sdc1 $f14, THREAD_FPR14(\thread)
- sdc1 $f16, THREAD_FPR16(\thread)
- sdc1 $f18, THREAD_FPR18(\thread)
- sdc1 $f20, THREAD_FPR20(\thread)
- sdc1 $f22, THREAD_FPR22(\thread)
- sdc1 $f24, THREAD_FPR24(\thread)
- sdc1 $f26, THREAD_FPR26(\thread)
- sdc1 $f28, THREAD_FPR28(\thread)
- sdc1 $f30, THREAD_FPR30(\thread)
+ sdc1 $f0, THREAD_FPR0_LS64(\thread)
+ sdc1 $f2, THREAD_FPR2_LS64(\thread)
+ sdc1 $f4, THREAD_FPR4_LS64(\thread)
+ sdc1 $f6, THREAD_FPR6_LS64(\thread)
+ sdc1 $f8, THREAD_FPR8_LS64(\thread)
+ sdc1 $f10, THREAD_FPR10_LS64(\thread)
+ sdc1 $f12, THREAD_FPR12_LS64(\thread)
+ sdc1 $f14, THREAD_FPR14_LS64(\thread)
+ sdc1 $f16, THREAD_FPR16_LS64(\thread)
+ sdc1 $f18, THREAD_FPR18_LS64(\thread)
+ sdc1 $f20, THREAD_FPR20_LS64(\thread)
+ sdc1 $f22, THREAD_FPR22_LS64(\thread)
+ sdc1 $f24, THREAD_FPR24_LS64(\thread)
+ sdc1 $f26, THREAD_FPR26_LS64(\thread)
+ sdc1 $f28, THREAD_FPR28_LS64(\thread)
+ sdc1 $f30, THREAD_FPR30_LS64(\thread)
sw \tmp, THREAD_FCR31(\thread)
.endm
.macro fpu_save_16odd thread
.set push
.set mips64r2
- sdc1 $f1, THREAD_FPR1(\thread)
- sdc1 $f3, THREAD_FPR3(\thread)
- sdc1 $f5, THREAD_FPR5(\thread)
- sdc1 $f7, THREAD_FPR7(\thread)
- sdc1 $f9, THREAD_FPR9(\thread)
- sdc1 $f11, THREAD_FPR11(\thread)
- sdc1 $f13, THREAD_FPR13(\thread)
- sdc1 $f15, THREAD_FPR15(\thread)
- sdc1 $f17, THREAD_FPR17(\thread)
- sdc1 $f19, THREAD_FPR19(\thread)
- sdc1 $f21, THREAD_FPR21(\thread)
- sdc1 $f23, THREAD_FPR23(\thread)
- sdc1 $f25, THREAD_FPR25(\thread)
- sdc1 $f27, THREAD_FPR27(\thread)
- sdc1 $f29, THREAD_FPR29(\thread)
- sdc1 $f31, THREAD_FPR31(\thread)
+ sdc1 $f1, THREAD_FPR1_LS64(\thread)
+ sdc1 $f3, THREAD_FPR3_LS64(\thread)
+ sdc1 $f5, THREAD_FPR5_LS64(\thread)
+ sdc1 $f7, THREAD_FPR7_LS64(\thread)
+ sdc1 $f9, THREAD_FPR9_LS64(\thread)
+ sdc1 $f11, THREAD_FPR11_LS64(\thread)
+ sdc1 $f13, THREAD_FPR13_LS64(\thread)
+ sdc1 $f15, THREAD_FPR15_LS64(\thread)
+ sdc1 $f17, THREAD_FPR17_LS64(\thread)
+ sdc1 $f19, THREAD_FPR19_LS64(\thread)
+ sdc1 $f21, THREAD_FPR21_LS64(\thread)
+ sdc1 $f23, THREAD_FPR23_LS64(\thread)
+ sdc1 $f25, THREAD_FPR25_LS64(\thread)
+ sdc1 $f27, THREAD_FPR27_LS64(\thread)
+ sdc1 $f29, THREAD_FPR29_LS64(\thread)
+ sdc1 $f31, THREAD_FPR31_LS64(\thread)
.set pop
.endm
.macro fpu_save_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f
fpu_save_16odd \thread
@@ -117,49 +128,49 @@
.macro fpu_restore_16even thread tmp=t0
lw \tmp, THREAD_FCR31(\thread)
- ldc1 $f0, THREAD_FPR0(\thread)
- ldc1 $f2, THREAD_FPR2(\thread)
- ldc1 $f4, THREAD_FPR4(\thread)
- ldc1 $f6, THREAD_FPR6(\thread)
- ldc1 $f8, THREAD_FPR8(\thread)
- ldc1 $f10, THREAD_FPR10(\thread)
- ldc1 $f12, THREAD_FPR12(\thread)
- ldc1 $f14, THREAD_FPR14(\thread)
- ldc1 $f16, THREAD_FPR16(\thread)
- ldc1 $f18, THREAD_FPR18(\thread)
- ldc1 $f20, THREAD_FPR20(\thread)
- ldc1 $f22, THREAD_FPR22(\thread)
- ldc1 $f24, THREAD_FPR24(\thread)
- ldc1 $f26, THREAD_FPR26(\thread)
- ldc1 $f28, THREAD_FPR28(\thread)
- ldc1 $f30, THREAD_FPR30(\thread)
+ ldc1 $f0, THREAD_FPR0_LS64(\thread)
+ ldc1 $f2, THREAD_FPR2_LS64(\thread)
+ ldc1 $f4, THREAD_FPR4_LS64(\thread)
+ ldc1 $f6, THREAD_FPR6_LS64(\thread)
+ ldc1 $f8, THREAD_FPR8_LS64(\thread)
+ ldc1 $f10, THREAD_FPR10_LS64(\thread)
+ ldc1 $f12, THREAD_FPR12_LS64(\thread)
+ ldc1 $f14, THREAD_FPR14_LS64(\thread)
+ ldc1 $f16, THREAD_FPR16_LS64(\thread)
+ ldc1 $f18, THREAD_FPR18_LS64(\thread)
+ ldc1 $f20, THREAD_FPR20_LS64(\thread)
+ ldc1 $f22, THREAD_FPR22_LS64(\thread)
+ ldc1 $f24, THREAD_FPR24_LS64(\thread)
+ ldc1 $f26, THREAD_FPR26_LS64(\thread)
+ ldc1 $f28, THREAD_FPR28_LS64(\thread)
+ ldc1 $f30, THREAD_FPR30_LS64(\thread)
ctc1 \tmp, fcr31
.endm
.macro fpu_restore_16odd thread
.set push
.set mips64r2
- ldc1 $f1, THREAD_FPR1(\thread)
- ldc1 $f3, THREAD_FPR3(\thread)
- ldc1 $f5, THREAD_FPR5(\thread)
- ldc1 $f7, THREAD_FPR7(\thread)
- ldc1 $f9, THREAD_FPR9(\thread)
- ldc1 $f11, THREAD_FPR11(\thread)
- ldc1 $f13, THREAD_FPR13(\thread)
- ldc1 $f15, THREAD_FPR15(\thread)
- ldc1 $f17, THREAD_FPR17(\thread)
- ldc1 $f19, THREAD_FPR19(\thread)
- ldc1 $f21, THREAD_FPR21(\thread)
- ldc1 $f23, THREAD_FPR23(\thread)
- ldc1 $f25, THREAD_FPR25(\thread)
- ldc1 $f27, THREAD_FPR27(\thread)
- ldc1 $f29, THREAD_FPR29(\thread)
- ldc1 $f31, THREAD_FPR31(\thread)
+ ldc1 $f1, THREAD_FPR1_LS64(\thread)
+ ldc1 $f3, THREAD_FPR3_LS64(\thread)
+ ldc1 $f5, THREAD_FPR5_LS64(\thread)
+ ldc1 $f7, THREAD_FPR7_LS64(\thread)
+ ldc1 $f9, THREAD_FPR9_LS64(\thread)
+ ldc1 $f11, THREAD_FPR11_LS64(\thread)
+ ldc1 $f13, THREAD_FPR13_LS64(\thread)
+ ldc1 $f15, THREAD_FPR15_LS64(\thread)
+ ldc1 $f17, THREAD_FPR17_LS64(\thread)
+ ldc1 $f19, THREAD_FPR19_LS64(\thread)
+ ldc1 $f21, THREAD_FPR21_LS64(\thread)
+ ldc1 $f23, THREAD_FPR23_LS64(\thread)
+ ldc1 $f25, THREAD_FPR25_LS64(\thread)
+ ldc1 $f27, THREAD_FPR27_LS64(\thread)
+ ldc1 $f29, THREAD_FPR29_LS64(\thread)
+ ldc1 $f31, THREAD_FPR31_LS64(\thread)
.set pop
.endm
.macro fpu_restore_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode?
@@ -169,6 +180,17 @@
fpu_restore_16even \thread \tmp
.endm
+#ifdef CONFIG_CPU_MIPSR2
+ .macro _EXT rd, rs, p, s
+ ext \rd, \rs, \p, \s
+ .endm
+#else /* !CONFIG_CPU_MIPSR2 */
+ .macro _EXT rd, rs, p, s
+ srl \rd, \rs, \p
+ andi \rd, \rd, (1 << \s) - 1
+ .endm
+#endif /* !CONFIG_CPU_MIPSR2 */
+
/*
* Temporary until all gas have MT ASE support
*/
@@ -196,4 +218,195 @@
.word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
.endm
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+ .macro ld_d wd, off, base
+ .set push
+ .set mips32r2
+ .set msa
+ ld.d $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro st_d wd, off, base
+ .set push
+ .set mips32r2
+ .set msa
+ st.d $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro copy_u_w rd, ws, n
+ .set push
+ .set mips32r2
+ .set msa
+ copy_u.w \rd, $w\ws[\n]
+ .set pop
+ .endm
+
+ .macro copy_u_d rd, ws, n
+ .set push
+ .set mips64r2
+ .set msa
+ copy_u.d \rd, $w\ws[\n]
+ .set pop
+ .endm
+
+ .macro insert_w wd, n, rs
+ .set push
+ .set mips32r2
+ .set msa
+ insert.w $w\wd[\n], \rs
+ .set pop
+ .endm
+
+ .macro insert_d wd, n, rs
+ .set push
+ .set mips64r2
+ .set msa
+ insert.d $w\wd[\n], \rs
+ .set pop
+ .endm
+#else
+ /*
+ * Temporary until all toolchains in use include MSA support.
+ */
+ .macro cfcmsa rd, cs
+ .set push
+ .set noat
+ .word 0x787e0059 | (\cs << 11)
+ move \rd, $1
+ .set pop
+ .endm
+
+ .macro ctcmsa cd, rs
+ .set push
+ .set noat
+ move $1, \rs
+ .word 0x783e0819 | (\cd << 6)
+ .set pop
+ .endm
+
+ .macro ld_d wd, off, base
+ .set push
+ .set noat
+ add $1, \base, \off
+ .word 0x78000823 | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro st_d wd, off, base
+ .set push
+ .set noat
+ add $1, \base, \off
+ .word 0x78000827 | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro copy_u_w rd, ws, n
+ .set push
+ .set noat
+ .word 0x78f00059 | (\n << 16) | (\ws << 11)
+ /* move triggers an assembler bug... */
+ or \rd, $1, zero
+ .set pop
+ .endm
+
+ .macro copy_u_d rd, ws, n
+ .set push
+ .set noat
+ .word 0x78f80059 | (\n << 16) | (\ws << 11)
+ /* move triggers an assembler bug... */
+ or \rd, $1, zero
+ .set pop
+ .endm
+
+ .macro insert_w wd, n, rs
+ .set push
+ .set noat
+ /* move triggers an assembler bug... */
+ or $1, \rs, zero
+ .word 0x79300819 | (\n << 16) | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro insert_d wd, n, rs
+ .set push
+ .set noat
+ /* move triggers an assembler bug... */
+ or $1, \rs, zero
+ .word 0x79380819 | (\n << 16) | (\wd << 6)
+ .set pop
+ .endm
+#endif
+
+ .macro msa_save_all thread
+ st_d 0, THREAD_FPR0, \thread
+ st_d 1, THREAD_FPR1, \thread
+ st_d 2, THREAD_FPR2, \thread
+ st_d 3, THREAD_FPR3, \thread
+ st_d 4, THREAD_FPR4, \thread
+ st_d 5, THREAD_FPR5, \thread
+ st_d 6, THREAD_FPR6, \thread
+ st_d 7, THREAD_FPR7, \thread
+ st_d 8, THREAD_FPR8, \thread
+ st_d 9, THREAD_FPR9, \thread
+ st_d 10, THREAD_FPR10, \thread
+ st_d 11, THREAD_FPR11, \thread
+ st_d 12, THREAD_FPR12, \thread
+ st_d 13, THREAD_FPR13, \thread
+ st_d 14, THREAD_FPR14, \thread
+ st_d 15, THREAD_FPR15, \thread
+ st_d 16, THREAD_FPR16, \thread
+ st_d 17, THREAD_FPR17, \thread
+ st_d 18, THREAD_FPR18, \thread
+ st_d 19, THREAD_FPR19, \thread
+ st_d 20, THREAD_FPR20, \thread
+ st_d 21, THREAD_FPR21, \thread
+ st_d 22, THREAD_FPR22, \thread
+ st_d 23, THREAD_FPR23, \thread
+ st_d 24, THREAD_FPR24, \thread
+ st_d 25, THREAD_FPR25, \thread
+ st_d 26, THREAD_FPR26, \thread
+ st_d 27, THREAD_FPR27, \thread
+ st_d 28, THREAD_FPR28, \thread
+ st_d 29, THREAD_FPR29, \thread
+ st_d 30, THREAD_FPR30, \thread
+ st_d 31, THREAD_FPR31, \thread
+ .endm
+
+ .macro msa_restore_all thread
+ ld_d 0, THREAD_FPR0, \thread
+ ld_d 1, THREAD_FPR1, \thread
+ ld_d 2, THREAD_FPR2, \thread
+ ld_d 3, THREAD_FPR3, \thread
+ ld_d 4, THREAD_FPR4, \thread
+ ld_d 5, THREAD_FPR5, \thread
+ ld_d 6, THREAD_FPR6, \thread
+ ld_d 7, THREAD_FPR7, \thread
+ ld_d 8, THREAD_FPR8, \thread
+ ld_d 9, THREAD_FPR9, \thread
+ ld_d 10, THREAD_FPR10, \thread
+ ld_d 11, THREAD_FPR11, \thread
+ ld_d 12, THREAD_FPR12, \thread
+ ld_d 13, THREAD_FPR13, \thread
+ ld_d 14, THREAD_FPR14, \thread
+ ld_d 15, THREAD_FPR15, \thread
+ ld_d 16, THREAD_FPR16, \thread
+ ld_d 17, THREAD_FPR17, \thread
+ ld_d 18, THREAD_FPR18, \thread
+ ld_d 19, THREAD_FPR19, \thread
+ ld_d 20, THREAD_FPR20, \thread
+ ld_d 21, THREAD_FPR21, \thread
+ ld_d 22, THREAD_FPR22, \thread
+ ld_d 23, THREAD_FPR23, \thread
+ ld_d 24, THREAD_FPR24, \thread
+ ld_d 25, THREAD_FPR25, \thread
+ ld_d 26, THREAD_FPR26, \thread
+ ld_d 27, THREAD_FPR27, \thread
+ ld_d 28, THREAD_FPR28, \thread
+ ld_d 29, THREAD_FPR29, \thread
+ ld_d 30, THREAD_FPR30, \thread
+ ld_d 31, THREAD_FPR31, \thread
+ .endm
+
#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 7eed2f261710..e8eb3d53a241 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %1 # atomic_add \n"
" addu %0, %2 \n"
" sc %0, %1 \n"
@@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" ll %0, %1 # atomic_add \n"
" addu %0, %2 \n"
" sc %0, %1 \n"
@@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n"
" sc %0, %1 \n"
@@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n"
" sc %0, %1 \n"
@@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n"
" sc %0, %2 \n"
@@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n"
" sc %0, %2 \n"
@@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
@@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
@@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
@@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
@@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %0, %1 # atomic64_add \n"
" daddu %0, %2 \n"
" scd %0, %1 \n"
@@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" lld %0, %1 # atomic64_add \n"
" daddu %0, %2 \n"
" scd %0, %1 \n"
@@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n"
" scd %0, %1 \n"
@@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n"
" scd %0, %1 \n"
@@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n"
" scd %0, %2 \n"
@@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n"
" scd %0, %2 \n"
@@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
@@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
@@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 71305a8b3d78..6a65d49e2c0d 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -79,7 +79,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -101,7 +101,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -131,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -153,7 +153,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -211,7 +211,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -244,7 +244,7 @@ static inline int test_and_set_bit(unsigned long nr,
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
@@ -260,7 +260,7 @@ static inline int test_and_set_bit(unsigned long nr,
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
@@ -298,7 +298,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
@@ -314,7 +314,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
@@ -353,7 +353,7 @@ static inline int test_and_clear_bit(unsigned long nr,
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
@@ -386,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
@@ -427,7 +427,7 @@ static inline int test_and_change_bit(unsigned long nr,
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "%2, %1 \n"
@@ -443,7 +443,7 @@ static inline int test_and_change_bit(unsigned long nr,
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 4d2cdea5aa37..1f7ca8b00404 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -61,15 +61,21 @@
/*
* Valid machtype for Loongson family
*/
-#define MACH_LOONGSON_UNKNOWN 0
-#define MACH_LEMOTE_FL2E 1
-#define MACH_LEMOTE_FL2F 2
-#define MACH_LEMOTE_ML2F7 3
-#define MACH_LEMOTE_YL2F89 4
-#define MACH_DEXXON_GDIUM2F10 5
-#define MACH_LEMOTE_NAS 6
-#define MACH_LEMOTE_LL2F 7
-#define MACH_LOONGSON_END 8
+enum loongson_machine_type {
+ MACH_LOONGSON_UNKNOWN,
+ MACH_LEMOTE_FL2E,
+ MACH_LEMOTE_FL2F,
+ MACH_LEMOTE_ML2F7,
+ MACH_LEMOTE_YL2F89,
+ MACH_DEXXON_GDIUM2F10,
+ MACH_LEMOTE_NAS,
+ MACH_LEMOTE_LL2F,
+ MACH_LEMOTE_A1004,
+ MACH_LEMOTE_A1101,
+ MACH_LEMOTE_A1201,
+ MACH_LEMOTE_A1205,
+ MACH_LOONGSON_END
+};
/*
* Valid machtype for group INGENIC
@@ -112,6 +118,8 @@ extern void prom_free_prom_memory(void);
extern void free_init_pages(const char *what,
unsigned long begin, unsigned long end);
+extern void (*free_init_pages_eva)(void *begin, void *end);
+
/*
* Initial kernel command line, usually setup by prom_init()
*/
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index ac3d2b8a20d4..3418c51e1151 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -7,6 +7,7 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2001 Thiemo Seufer.
* Copyright (C) 2002 Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*/
#ifndef _ASM_CHECKSUM_H
#define _ASM_CHECKSUM_H
@@ -29,9 +30,13 @@
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
-__wsum __csum_partial_copy_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
+__wsum __csum_partial_copy_kernel(const void *src, void *dst,
+ int len, __wsum sum, int *err_ptr);
+__wsum __csum_partial_copy_from_user(const void *src, void *dst,
+ int len, __wsum sum, int *err_ptr);
+__wsum __csum_partial_copy_to_user(const void *src, void *dst,
+ int len, __wsum sum, int *err_ptr);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
@@ -41,8 +46,26 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err_ptr)
{
might_fault();
- return __csum_partial_copy_user((__force void *)src, dst,
- len, sum, err_ptr);
+ if (segment_eq(get_fs(), get_ds()))
+ return __csum_partial_copy_kernel((__force void *)src, dst,
+ len, sum, err_ptr);
+ else
+ return __csum_partial_copy_from_user((__force void *)src, dst,
+ len, sum, err_ptr);
+}
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
+{
+ if (access_ok(VERIFY_READ, src, len))
+ return csum_partial_copy_from_user(src, dst, len, sum,
+ err_ptr);
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return sum;
}
/*
@@ -54,9 +77,16 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
{
might_fault();
- if (access_ok(VERIFY_WRITE, dst, len))
- return __csum_partial_copy_user(src, (__force void *)dst,
- len, sum, err_ptr);
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ if (segment_eq(get_fs(), get_ds()))
+ return __csum_partial_copy_kernel(src,
+ (__force void *)dst,
+ len, sum, err_ptr);
+ else
+ return __csum_partial_copy_to_user(src,
+ (__force void *)dst,
+ len, sum, err_ptr);
+ }
if (len)
*err_ptr = -EFAULT;
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 466069bd8465..eefcaa363a87 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -22,11 +22,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
unsigned long dummy;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" sc %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
@@ -38,11 +38,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" sc %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
@@ -74,7 +74,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
unsigned long dummy;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
@@ -88,7 +88,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
@@ -145,12 +145,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
" " st " $1, %1 \n" \
" beqzl $1, 1b \n" \
"2: \n" \
@@ -162,12 +162,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
" " st " $1, %1 \n" \
" beqz $1, 1b \n" \
" .set pop \n" \
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 6e70b03b6aab..f56cc975b92f 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -26,7 +26,9 @@
#ifndef cpu_has_segments
#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS)
#endif
-
+#ifndef cpu_has_eva
+#define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA)
+#endif
/*
* For the moment we don't consider R6000 and R8000 so we can assume that
@@ -299,4 +301,10 @@
#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
#endif
+#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
+# define cpu_has_msa (cpu_data[0].ases & MIPS_ASE_MSA)
+#elif !defined(cpu_has_msa)
+# define cpu_has_msa 0
+#endif
+
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index 8f7adf0ac1e3..dc2135be2a3a 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -49,6 +49,7 @@ struct cpuinfo_mips {
unsigned long ases;
unsigned int processor_id;
unsigned int fpu_id;
+ unsigned int msa_id;
unsigned int cputype;
int isa_level;
int tlbsize;
@@ -95,4 +96,31 @@ extern void cpu_report(void);
extern const char *__cpu_name[];
#define cpu_name_string() __cpu_name[smp_processor_id()]
+struct seq_file;
+struct notifier_block;
+
+extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
+extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
+
+#define proc_cpuinfo_notifier(fn, pri) \
+({ \
+ static struct notifier_block fn##_nb = { \
+ .notifier_call = fn, \
+ .priority = pri \
+ }; \
+ \
+ register_proc_cpuinfo_notifier(&fn##_nb); \
+})
+
+struct proc_cpuinfo_notifier_args {
+ struct seq_file *m;
+ unsigned long n;
+};
+
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
+#else
+# define cpu_vpe_id(cpuinfo) 0
+#endif
+
#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 02f591bd95ca..721906130a57 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -20,6 +20,10 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_LOONGSON2:
#endif
+#ifdef CONFIG_SYS_HAS_CPU_LOONGSON3
+ case CPU_LOONGSON3:
+#endif
+
#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
case CPU_LOONGSON1:
#endif
@@ -46,6 +50,8 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_M14KEC:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_M5150:
#endif
#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 76411df3d971..530eb8b3a68e 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -82,10 +82,10 @@
#define PRID_IMP_RM7000 0x2700
#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */
#define PRID_IMP_RM9000 0x3400
-#define PRID_IMP_LOONGSON1 0x4200
+#define PRID_IMP_LOONGSON_32 0x4200 /* Loongson-1 */
#define PRID_IMP_R5432 0x5400
#define PRID_IMP_R5500 0x5500
-#define PRID_IMP_LOONGSON2 0x6300
+#define PRID_IMP_LOONGSON_64 0x6300 /* Loongson-2/3 */
#define PRID_IMP_UNKNOWN 0xff00
@@ -115,6 +115,8 @@
#define PRID_IMP_INTERAPTIV_MP 0xa100
#define PRID_IMP_PROAPTIV_UP 0xa200
#define PRID_IMP_PROAPTIV_MP 0xa300
+#define PRID_IMP_M5150 0xa700
+#define PRID_IMP_P5600 0xa800
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -229,6 +231,7 @@
#define PRID_REV_LOONGSON1B 0x0020
#define PRID_REV_LOONGSON2E 0x0002
#define PRID_REV_LOONGSON2F 0x0003
+#define PRID_REV_LOONGSON3A 0x0005
/*
* Older processors used to encode processor version and revision in two
@@ -296,14 +299,14 @@ enum cpu_type_enum {
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
- CPU_M14KEC, CPU_INTERAPTIV, CPU_PROAPTIV,
+ CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
/*
* MIPS64 class processors
*/
CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
- CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
- CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
+ CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
+ CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
CPU_LAST
};
@@ -358,6 +361,7 @@ enum cpu_type_enum {
#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */
#define MIPS_CPU_TLBINV 0x02000000 /* CPU supports TLBINV/F */
#define MIPS_CPU_SEGMENTS 0x04000000 /* CPU supports Segmentation Control registers */
+#define MIPS_CPU_EVA 0x80000000 /* CPU supports Enhanced Virtual Addressing */
/*
* CPU ASE encodings
@@ -370,5 +374,6 @@ enum cpu_type_enum {
#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */
#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
+#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 84238c574d5e..06412aa9e3fb 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -49,9 +49,14 @@ static inline int dma_mapping_error(struct device *dev, u64 mask)
static inline int
dma_set_mask(struct device *dev, u64 mask)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
if(!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
+ if (ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
+
*dev->dma_mask = mask;
return 0;
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 6b9749540edf..4d86b72750c7 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -57,7 +57,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
return 0;
case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
#endif
@@ -180,7 +180,7 @@ static inline void restore_fp(struct task_struct *tsk)
_restore_fp(tsk);
}
-static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
+static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
{
if (tsk == current) {
preempt_disable();
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index ce35c9af0c28..992aaba603b5 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -22,12 +22,12 @@ extern void _mcount(void);
#define safe_load(load, src, dst, error) \
do { \
asm volatile ( \
- "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
- " li %[" STR(error) "], 0\n" \
+ "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
+ " li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
- "3: li %[" STR(error) "], 1\n" \
+ "3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@@ -35,8 +35,8 @@ do { \
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
- : [dst] "=&r" (dst), [error] "=r" (error)\
- : [src] "r" (src) \
+ : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
+ : [tmp_src] "r" (src) \
: "memory" \
); \
} while (0)
@@ -44,12 +44,12 @@ do { \
#define safe_store(store, src, dst, error) \
do { \
asm volatile ( \
- "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
- " li %[" STR(error) "], 0\n" \
+ "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
+ " li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
- "3: li %[" STR(error) "], 1\n" \
+ "3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@@ -57,8 +57,8 @@ do { \
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
- : [error] "=r" (error) \
- : [dst] "r" (dst), [src] "r" (src)\
+ : [tmp_err] "=r" (error) \
+ : [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
: "memory" \
); \
} while (0)
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 6ea15815d3ee..194cda0396a3 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -12,6 +12,7 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
+#include <asm/asm-eva.h>
#include <asm/barrier.h>
#include <asm/errno.h>
#include <asm/war.h>
@@ -22,11 +23,11 @@
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
"1: ll %1, %4 # __futex_atomic_op \n" \
" .set mips0 \n" \
" " insn " \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
"2: sc $1, %2 \n" \
" beqzl $1, 1b \n" \
__WEAK_LLSC_MB \
@@ -48,12 +49,12 @@
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .set mips3 \n" \
- "1: ll %1, %4 # __futex_atomic_op \n" \
+ " .set arch=r4000 \n" \
+ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
" .set mips0 \n" \
" " insn " \n" \
- " .set mips3 \n" \
- "2: sc $1, %2 \n" \
+ " .set arch=r4000 \n" \
+ "2: "user_sc("$1", "%2")" \n" \
" beqz $1, 1b \n" \
__WEAK_LLSC_MB \
"3: \n" \
@@ -146,12 +147,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %3 \n"
" bne %1, %z4, 3f \n"
" .set mips0 \n"
" move $1, %z5 \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"2: sc $1, %2 \n"
" beqzl $1, 1b \n"
__WEAK_LLSC_MB
@@ -173,13 +174,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
- " .set mips3 \n"
- "1: ll %1, %3 \n"
+ " .set arch=r4000 \n"
+ "1: "user_ll("%1", "%3")" \n"
" bne %1, %z4, 3f \n"
" .set mips0 \n"
" move $1, %z5 \n"
- " .set mips3 \n"
- "2: sc $1, %2 \n"
+ " .set arch=r4000 \n"
+ "2: "user_sc("$1", "%2")" \n"
" beqz $1, 1b \n"
__WEAK_LLSC_MB
"3: \n"
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
index d6c50a7e9ede..f3e6978aad70 100644
--- a/arch/mips/include/asm/fw/fw.h
+++ b/arch/mips/include/asm/fw/fw.h
@@ -38,7 +38,7 @@ extern int *_fw_envp;
extern void fw_init_cmdline(void);
extern char *fw_getcmdline(void);
-extern fw_memblock_t *fw_getmdesc(void);
+extern fw_memblock_t *fw_getmdesc(int);
extern void fw_meminit(void);
extern char *fw_getenv(char *name);
extern unsigned long fw_getenvl(char *name);
diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h
deleted file mode 100644
index a7359f77a48e..000000000000
--- a/arch/mips/include/asm/gcmpregs.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000, 07 MIPS Technologies, Inc.
- *
- * Multiprocessor Subsystem Register Definitions
- *
- */
-#ifndef _ASM_GCMPREGS_H
-#define _ASM_GCMPREGS_H
-
-
-/* Offsets to major blocks within GCMP from GCMP base */
-#define GCMP_GCB_OFS 0x0000 /* Global Control Block */
-#define GCMP_CLCB_OFS 0x2000 /* Core Local Control Block */
-#define GCMP_COCB_OFS 0x4000 /* Core Other Control Block */
-#define GCMP_GDB_OFS 0x8000 /* Global Debug Block */
-
-/* Offsets to individual GCMP registers from GCMP base */
-#define GCMPOFS(block, tag, reg) \
- (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS)
-#define GCMPOFSn(block, tag, reg, n) \
- (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS(n))
-
-#define GCMPGCBOFS(reg) GCMPOFS(GCB, GCB, reg)
-#define GCMPGCBOFSn(reg, n) GCMPOFSn(GCB, GCB, reg, n)
-#define GCMPCLCBOFS(reg) GCMPOFS(CLCB, CCB, reg)
-#define GCMPCOCBOFS(reg) GCMPOFS(COCB, CCB, reg)
-#define GCMPGDBOFS(reg) GCMPOFS(GDB, GDB, reg)
-
-/* GCMP register access */
-#define GCMPGCB(reg) REGP(_gcmp_base, GCMPGCBOFS(reg))
-#define GCMPGCBn(reg, n) REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
-#define GCMPCLCB(reg) REGP(_gcmp_base, GCMPCLCBOFS(reg))
-#define GCMPCOCB(reg) REGP(_gcmp_base, GCMPCOCBOFS(reg))
-#define GCMPGDB(reg) REGP(_gcmp_base, GCMPGDBOFS(reg))
-
-/* Mask generation */
-#define GCMPMSK(block, reg, bits) (MSK(bits)<<GCMP_##block##_##reg##_SHF)
-#define GCMPGCBMSK(reg, bits) GCMPMSK(GCB, reg, bits)
-#define GCMPCCBMSK(reg, bits) GCMPMSK(CCB, reg, bits)
-#define GCMPGDBMSK(reg, bits) GCMPMSK(GDB, reg, bits)
-
-/* GCB registers */
-#define GCMP_GCB_GC_OFS 0x0000 /* Global Config Register */
-#define GCMP_GCB_GC_NUMIOCU_SHF 8
-#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4)
-#define GCMP_GCB_GC_NUMCORES_SHF 0
-#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
-#define GCMP_GCB_GCMPB_OFS 0x0008 /* Global GCMP Base */
-#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
-#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
-#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
-#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
-#define GCMP_GCB_GCMPB_CMDEFTGT_DISABLED 0
-#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 1
-#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
-#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
-#define GCMP_GCB_CCMC_OFS 0x0010 /* Global CM Control */
-#define GCMP_GCB_GCSRAP_OFS 0x0020 /* Global CSR Access Privilege */
-#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
-#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
-#define GCMP_GCB_GCMPREV_OFS 0x0030 /* GCMP Revision Register */
-#define GCMP_GCB_GCMEM_OFS 0x0040 /* Global CM Error Mask */
-#define GCMP_GCB_GCMEC_OFS 0x0048 /* Global CM Error Cause */
-#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
-#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
-#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
-#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
-#define GCMP_GCB_GCMEA_OFS 0x0050 /* Global CM Error Address */
-#define GCMP_GCB_GCMEO_OFS 0x0058 /* Global CM Error Multiple */
-#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
-#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
-#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */
-#define GCMP_GCB_GICBA_BASE_SHF 17
-#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
-#define GCMP_GCB_GICBA_EN_SHF 0
-#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
-
-/* GCB Regions */
-#define GCMP_GCB_CMxBASE_OFS(n) (0x0090+16*(n)) /* Global Region[0-3] Base Address */
-#define GCMP_GCB_CMxBASE_BASE_SHF 16
-#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16)
-#define GCMP_GCB_CMxMASK_OFS(n) (0x0098+16*(n)) /* Global Region[0-3] Address Mask */
-#define GCMP_GCB_CMxMASK_MASK_SHF 16
-#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16)
-#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
-#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
-#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0
-#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1
-#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
-#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
-
-
-/* Core local/Core other control block registers */
-#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */
-#define GCMP_CCB_RESETR_INRESET_SHF 0
-#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
-#define GCMP_CCB_COHCTL_OFS 0x0008 /* Coherence Control */
-#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
-#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
-#define GCMP_CCB_CFG_OFS 0x0010 /* Config */
-#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
-#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
-#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
-#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
-#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2
-#define GCMP_CCB_CFG_NUMVPE_SHF 0
-#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10)
-#define GCMP_CCB_OTHER_OFS 0x0018 /* Other Address */
-#define GCMP_CCB_OTHER_CORENUM_SHF 16
-#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16)
-#define GCMP_CCB_RESETBASE_OFS 0x0020 /* Reset Exception Base */
-#define GCMP_CCB_RESETBASE_BEV_SHF 12
-#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
-#define GCMP_CCB_ID_OFS 0x0028 /* Identification */
-#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */
-#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */
-
-extern int __init gcmp_probe(unsigned long, unsigned long);
-extern int __init gcmp_niocu(void);
-extern void __init gcmp_setregion(int, unsigned long, unsigned long, int);
-#endif /* _ASM_GCMPREGS_H */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index b2e3e93dd7d8..082716690589 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -11,6 +11,9 @@
#ifndef _ASM_GICREGS_H
#define _ASM_GICREGS_H
+#include <linux/bitmap.h>
+#include <linux/threads.h>
+
#undef GICISBYTELITTLEENDIAN
/* Constants */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 3321dd5a8872..933b50e125a0 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -331,7 +331,7 @@ static inline void pfx##write##bwlq(type val, \
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
- ".set mips3" "\t\t# __writeq""\n\t" \
+ ".set arch=r4000" "\t\t# __writeq""\n\t" \
"dsll32 %L0, %L0, 0" "\n\t" \
"dsrl32 %L0, %L0, 0" "\n\t" \
"dsll32 %M0, %M0, 0" "\n\t" \
@@ -361,7 +361,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
- ".set mips3" "\t\t# __readq" "\n\t" \
+ ".set arch=r4000" "\t\t# __readq" "\n\t" \
"ld %L0, %1" "\n\t" \
"dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \
@@ -584,7 +584,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
*
* This API used to be exported; it now is for arch code internal use only.
*/
-#ifdef CONFIG_DMA_NONCOHERENT
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
@@ -603,7 +603,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#define dma_cache_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
-#endif /* CONFIG_DMA_NONCOHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
/*
* Read a 32-bit register that requires a 64-bit read cycle on the bus.
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index a995fce87791..060aaa6348d7 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -30,16 +30,16 @@
/* Special address that contains the comm page, used for reducing # of traps */
-#define KVM_GUEST_COMMPAGE_ADDR 0x0
+#define KVM_GUEST_COMMPAGE_ADDR 0x0
#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
-#define KVM_GUEST_KUSEG 0x00000000UL
-#define KVM_GUEST_KSEG0 0x40000000UL
-#define KVM_GUEST_KSEG23 0x60000000UL
-#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
-#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
+#define KVM_GUEST_KUSEG 0x00000000UL
+#define KVM_GUEST_KSEG0 0x40000000UL
+#define KVM_GUEST_KSEG23 0x60000000UL
+#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
@@ -52,17 +52,17 @@
#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
-#define KVM_INVALID_PAGE 0xdeadbeef
-#define KVM_INVALID_INST 0xdeadbeef
-#define KVM_INVALID_ADDR 0xdeadbeef
+#define KVM_INVALID_PAGE 0xdeadbeef
+#define KVM_INVALID_INST 0xdeadbeef
+#define KVM_INVALID_ADDR 0xdeadbeef
-#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL
+#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL
-#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
-#define MS_TO_NS(x) (x * 1E6L)
+#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
+#define MS_TO_NS(x) (x * 1E6L)
-#define CAUSEB_DC 27
-#define CAUSEF_DC (_ULCAST_(1) << 27)
+#define CAUSEB_DC 27
+#define CAUSEF_DC (_ULCAST_(1) << 27)
struct kvm;
struct kvm_run;
@@ -126,8 +126,8 @@ struct kvm_arch {
int commpage_tlb;
};
-#define N_MIPS_COPROC_REGS 32
-#define N_MIPS_COPROC_SEL 8
+#define N_MIPS_COPROC_REGS 32
+#define N_MIPS_COPROC_SEL 8
struct mips_coproc {
unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
@@ -139,124 +139,124 @@ struct mips_coproc {
/*
* Coprocessor 0 register names
*/
-#define MIPS_CP0_TLB_INDEX 0
-#define MIPS_CP0_TLB_RANDOM 1
-#define MIPS_CP0_TLB_LOW 2
-#define MIPS_CP0_TLB_LO0 2
-#define MIPS_CP0_TLB_LO1 3
-#define MIPS_CP0_TLB_CONTEXT 4
-#define MIPS_CP0_TLB_PG_MASK 5
-#define MIPS_CP0_TLB_WIRED 6
-#define MIPS_CP0_HWRENA 7
-#define MIPS_CP0_BAD_VADDR 8
-#define MIPS_CP0_COUNT 9
-#define MIPS_CP0_TLB_HI 10
-#define MIPS_CP0_COMPARE 11
-#define MIPS_CP0_STATUS 12
-#define MIPS_CP0_CAUSE 13
-#define MIPS_CP0_EXC_PC 14
-#define MIPS_CP0_PRID 15
-#define MIPS_CP0_CONFIG 16
-#define MIPS_CP0_LLADDR 17
-#define MIPS_CP0_WATCH_LO 18
-#define MIPS_CP0_WATCH_HI 19
-#define MIPS_CP0_TLB_XCONTEXT 20
-#define MIPS_CP0_ECC 26
-#define MIPS_CP0_CACHE_ERR 27
-#define MIPS_CP0_TAG_LO 28
-#define MIPS_CP0_TAG_HI 29
-#define MIPS_CP0_ERROR_PC 30
-#define MIPS_CP0_DEBUG 23
-#define MIPS_CP0_DEPC 24
-#define MIPS_CP0_PERFCNT 25
-#define MIPS_CP0_ERRCTL 26
-#define MIPS_CP0_DATA_LO 28
-#define MIPS_CP0_DATA_HI 29
-#define MIPS_CP0_DESAVE 31
-
-#define MIPS_CP0_CONFIG_SEL 0
-#define MIPS_CP0_CONFIG1_SEL 1
-#define MIPS_CP0_CONFIG2_SEL 2
-#define MIPS_CP0_CONFIG3_SEL 3
+#define MIPS_CP0_TLB_INDEX 0
+#define MIPS_CP0_TLB_RANDOM 1
+#define MIPS_CP0_TLB_LOW 2
+#define MIPS_CP0_TLB_LO0 2
+#define MIPS_CP0_TLB_LO1 3
+#define MIPS_CP0_TLB_CONTEXT 4
+#define MIPS_CP0_TLB_PG_MASK 5
+#define MIPS_CP0_TLB_WIRED 6
+#define MIPS_CP0_HWRENA 7
+#define MIPS_CP0_BAD_VADDR 8
+#define MIPS_CP0_COUNT 9
+#define MIPS_CP0_TLB_HI 10
+#define MIPS_CP0_COMPARE 11
+#define MIPS_CP0_STATUS 12
+#define MIPS_CP0_CAUSE 13
+#define MIPS_CP0_EXC_PC 14
+#define MIPS_CP0_PRID 15
+#define MIPS_CP0_CONFIG 16
+#define MIPS_CP0_LLADDR 17
+#define MIPS_CP0_WATCH_LO 18
+#define MIPS_CP0_WATCH_HI 19
+#define MIPS_CP0_TLB_XCONTEXT 20
+#define MIPS_CP0_ECC 26
+#define MIPS_CP0_CACHE_ERR 27
+#define MIPS_CP0_TAG_LO 28
+#define MIPS_CP0_TAG_HI 29
+#define MIPS_CP0_ERROR_PC 30
+#define MIPS_CP0_DEBUG 23
+#define MIPS_CP0_DEPC 24
+#define MIPS_CP0_PERFCNT 25
+#define MIPS_CP0_ERRCTL 26
+#define MIPS_CP0_DATA_LO 28
+#define MIPS_CP0_DATA_HI 29
+#define MIPS_CP0_DESAVE 31
+
+#define MIPS_CP0_CONFIG_SEL 0
+#define MIPS_CP0_CONFIG1_SEL 1
+#define MIPS_CP0_CONFIG2_SEL 2
+#define MIPS_CP0_CONFIG3_SEL 3
/* Config0 register bits */
-#define CP0C0_M 31
-#define CP0C0_K23 28
-#define CP0C0_KU 25
-#define CP0C0_MDU 20
-#define CP0C0_MM 17
-#define CP0C0_BM 16
-#define CP0C0_BE 15
-#define CP0C0_AT 13
-#define CP0C0_AR 10
-#define CP0C0_MT 7
-#define CP0C0_VI 3
-#define CP0C0_K0 0
+#define CP0C0_M 31
+#define CP0C0_K23 28
+#define CP0C0_KU 25
+#define CP0C0_MDU 20
+#define CP0C0_MM 17
+#define CP0C0_BM 16
+#define CP0C0_BE 15
+#define CP0C0_AT 13
+#define CP0C0_AR 10
+#define CP0C0_MT 7
+#define CP0C0_VI 3
+#define CP0C0_K0 0
/* Config1 register bits */
-#define CP0C1_M 31
-#define CP0C1_MMU 25
-#define CP0C1_IS 22
-#define CP0C1_IL 19
-#define CP0C1_IA 16
-#define CP0C1_DS 13
-#define CP0C1_DL 10
-#define CP0C1_DA 7
-#define CP0C1_C2 6
-#define CP0C1_MD 5
-#define CP0C1_PC 4
-#define CP0C1_WR 3
-#define CP0C1_CA 2
-#define CP0C1_EP 1
-#define CP0C1_FP 0
+#define CP0C1_M 31
+#define CP0C1_MMU 25
+#define CP0C1_IS 22
+#define CP0C1_IL 19
+#define CP0C1_IA 16
+#define CP0C1_DS 13
+#define CP0C1_DL 10
+#define CP0C1_DA 7
+#define CP0C1_C2 6
+#define CP0C1_MD 5
+#define CP0C1_PC 4
+#define CP0C1_WR 3
+#define CP0C1_CA 2
+#define CP0C1_EP 1
+#define CP0C1_FP 0
/* Config2 Register bits */
-#define CP0C2_M 31
-#define CP0C2_TU 28
-#define CP0C2_TS 24
-#define CP0C2_TL 20
-#define CP0C2_TA 16
-#define CP0C2_SU 12
-#define CP0C2_SS 8
-#define CP0C2_SL 4
-#define CP0C2_SA 0
+#define CP0C2_M 31
+#define CP0C2_TU 28
+#define CP0C2_TS 24
+#define CP0C2_TL 20
+#define CP0C2_TA 16
+#define CP0C2_SU 12
+#define CP0C2_SS 8
+#define CP0C2_SL 4
+#define CP0C2_SA 0
/* Config3 Register bits */
-#define CP0C3_M 31
-#define CP0C3_ISA_ON_EXC 16
-#define CP0C3_ULRI 13
-#define CP0C3_DSPP 10
-#define CP0C3_LPA 7
-#define CP0C3_VEIC 6
-#define CP0C3_VInt 5
-#define CP0C3_SP 4
-#define CP0C3_MT 2
-#define CP0C3_SM 1
-#define CP0C3_TL 0
+#define CP0C3_M 31
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ULRI 13
+#define CP0C3_DSPP 10
+#define CP0C3_LPA 7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP 4
+#define CP0C3_MT 2
+#define CP0C3_SM 1
+#define CP0C3_TL 0
/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
-#define MIPS_CONFIG0 \
+#define MIPS_CONFIG0 \
((1 << CP0C0_M) | (0x3 << CP0C0_K0))
/* Have config2, no coprocessor2 attached, no MDMX support attached,
no performance counters, watch registers present,
no code compression, EJTAG present, no FPU, no watch registers */
-#define MIPS_CONFIG1 \
-((1 << CP0C1_M) | \
- (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
- (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
+#define MIPS_CONFIG1 \
+((1 << CP0C1_M) | \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
+ (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
(0 << CP0C1_FP))
/* Have config3, no tertiary/secondary caches implemented */
-#define MIPS_CONFIG2 \
+#define MIPS_CONFIG2 \
((1 << CP0C2_M))
/* No config4, no DSP ASE, no large physaddr (PABITS),
no external interrupt controller, no vectored interrupts,
no 1kb pages, no SmartMIPS ASE, no trace logic */
-#define MIPS_CONFIG3 \
-((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
- (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
+#define MIPS_CONFIG3 \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
(0 << CP0C3_SM) | (0 << CP0C3_TL))
/* MMU types, the first four entries have the same layout as the
@@ -274,36 +274,36 @@ enum mips_mmu_types {
/*
* Trap codes
*/
-#define T_INT 0 /* Interrupt pending */
-#define T_TLB_MOD 1 /* TLB modified fault */
-#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
-#define T_TLB_ST_MISS 3 /* TLB miss on a store */
-#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
-#define T_ADDR_ERR_ST 5 /* Address error on a store */
-#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
-#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
-#define T_SYSCALL 8 /* System call */
-#define T_BREAK 9 /* Breakpoint */
-#define T_RES_INST 10 /* Reserved instruction exception */
-#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
-#define T_OVFLOW 12 /* Arithmetic overflow */
+#define T_INT 0 /* Interrupt pending */
+#define T_TLB_MOD 1 /* TLB modified fault */
+#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS 3 /* TLB miss on a store */
+#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST 5 /* Address error on a store */
+#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
+#define T_SYSCALL 8 /* System call */
+#define T_BREAK 9 /* Breakpoint */
+#define T_RES_INST 10 /* Reserved instruction exception */
+#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
+#define T_OVFLOW 12 /* Arithmetic overflow */
/*
* Trap definitions added for r4000 port.
*/
-#define T_TRAP 13 /* Trap instruction */
-#define T_VCEI 14 /* Virtual coherency exception */
-#define T_FPE 15 /* Floating point exception */
-#define T_WATCH 23 /* Watch address reference */
-#define T_VCED 31 /* Virtual coherency data */
+#define T_TRAP 13 /* Trap instruction */
+#define T_VCEI 14 /* Virtual coherency exception */
+#define T_FPE 15 /* Floating point exception */
+#define T_WATCH 23 /* Watch address reference */
+#define T_VCED 31 /* Virtual coherency data */
/* Resume Flags */
-#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
-#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
-#define RESUME_GUEST 0
-#define RESUME_GUEST_DR RESUME_FLAG_DR
-#define RESUME_HOST RESUME_FLAG_HOST
+#define RESUME_GUEST 0
+#define RESUME_GUEST_DR RESUME_FLAG_DR
+#define RESUME_HOST RESUME_FLAG_HOST
enum emulation_result {
EMULATE_DONE, /* no further processing */
@@ -313,24 +313,27 @@ enum emulation_result {
EMULATE_PRIV_FAIL,
};
-#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
-#define MIPS3_PG_V 0x00000002 /* Valid */
-#define MIPS3_PG_NV 0x00000000
-#define MIPS3_PG_D 0x00000004 /* Dirty */
+#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
+#define MIPS3_PG_V 0x00000002 /* Valid */
+#define MIPS3_PG_NV 0x00000000
+#define MIPS3_PG_D 0x00000004 /* Dirty */
#define mips3_paddr_to_tlbpfn(x) \
- (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
+ (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
#define mips3_tlbpfn_to_paddr(x) \
- ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
+ ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
-#define MIPS3_PG_SHIFT 6
-#define MIPS3_PG_FRAME 0x3fffffc0
+#define MIPS3_PG_SHIFT 6
+#define MIPS3_PG_FRAME 0x3fffffc0
-#define VPN2_MASK 0xffffe000
-#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
-#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
-#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
-#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
+#define VPN2_MASK 0xffffe000
+#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
+ ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
+#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
+#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
+ ? ((x).tlb_lo1 & MIPS3_PG_V) \
+ : ((x).tlb_lo0 & MIPS3_PG_V))
struct kvm_mips_tlb {
long tlb_mask;
@@ -339,7 +342,7 @@ struct kvm_mips_tlb {
long tlb_lo1;
};
-#define KVM_MIPS_GUEST_TLB_SIZE 64
+#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase;
unsigned long host_stack;
@@ -400,65 +403,67 @@ struct kvm_vcpu_arch {
};
-#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
-#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
-#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
-#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
-#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
-#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
-#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
-#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
-#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
-#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
-#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
-#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
-#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
-#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
-#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
-#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
-#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
-#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
-#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
-#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
-#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
-#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
-#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
-#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
-#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
-#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
-#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
-#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
-#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
-#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
-#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
-#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
-#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
-#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
-#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
-#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
-#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
-#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
-#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
-#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
-#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
-#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
-#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
-
-#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
-#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
-#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
-#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
-#define kvm_change_c0_guest_cause(cop0, change, val) \
-{ \
- kvm_clear_c0_guest_cause(cop0, change); \
- kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
+#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
+#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
+#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
+#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
+#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
+#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
+#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
+#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
+#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
+#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
+#define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
+#define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
+#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
+#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
+#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
+#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
+#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
+#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
+#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
+#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
+#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
+#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
+#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
+#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
+#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
+#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
+#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
+#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
+#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
+#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
+#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
+#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
+#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
+#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
+#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
+#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
+#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
+#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
+#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
+#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
+#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
+#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+
+#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
+#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
+#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
+#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+#define kvm_change_c0_guest_cause(cop0, change, val) \
+{ \
+ kvm_clear_c0_guest_cause(cop0, change); \
+ kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
}
-#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
-#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
-#define kvm_change_c0_guest_ebase(cop0, change, val) \
-{ \
- kvm_clear_c0_guest_ebase(cop0, change); \
- kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
+#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
+#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
+#define kvm_change_c0_guest_ebase(cop0, change, val) \
+{ \
+ kvm_clear_c0_guest_ebase(cop0, change); \
+ kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
}
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index d44622cd74be..46dfc3c1fd49 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -33,7 +33,7 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n"
__SC "%0, %2 \n"
@@ -47,7 +47,7 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n"
__SC "%0, %2 \n"
@@ -78,7 +78,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n"
__SC "%0, %2 \n"
@@ -92,7 +92,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n"
__SC "%0, %2 \n"
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 54f9e84db8ac..b4c3ecb17d48 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -1161,18 +1161,6 @@ enum soc_au1200_ints {
#define MAC_RX_BUFF3_STATUS 0x30
#define MAC_RX_BUFF3_ADDR 0x34
-#define UART_RX 0 /* Receive buffer */
-#define UART_TX 4 /* Transmit buffer */
-#define UART_IER 8 /* Interrupt Enable Register */
-#define UART_IIR 0xC /* Interrupt ID Register */
-#define UART_FCR 0x10 /* FIFO Control Register */
-#define UART_LCR 0x14 /* Line Control Register */
-#define UART_MCR 0x18 /* Modem Control Register */
-#define UART_LSR 0x1C /* Line Status Register */
-#define UART_MSR 0x20 /* Modem Status Register */
-#define UART_CLK 0x28 /* Baud Rate Clock Divider */
-#define UART_MOD_CNTRL 0x100 /* Module Control */
-
/* SSIO */
#define SSI0_STATUS 0xB1600000
# define SSI_STATUS_BF (1 << 4)
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
index 40005fb39618..bba7399a49a3 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -27,7 +27,11 @@ enum bcm47xx_board {
BCM47XX_BOARD_ASUS_WL700GE,
BCM47XX_BOARD_ASUS_WLHDD,
+ BCM47XX_BOARD_BELKIN_F7D3301,
+ BCM47XX_BOARD_BELKIN_F7D3302,
BCM47XX_BOARD_BELKIN_F7D4301,
+ BCM47XX_BOARD_BELKIN_F7D4302,
+ BCM47XX_BOARD_BELKIN_F7D4401,
BCM47XX_BOARD_BUFFALO_WBR2_G54,
BCM47XX_BOARD_BUFFALO_WHR2_A54G54,
@@ -66,7 +70,7 @@ enum bcm47xx_board {
BCM47XX_BOARD_LINKSYS_WRT310NV1,
BCM47XX_BOARD_LINKSYS_WRT310NV2,
BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
- BCM47XX_BOARD_LINKSYS_WRT54GSV1,
+ BCM47XX_BOARD_LINKSYS_WRT54G,
BCM47XX_BOARD_LINKSYS_WRT610NV1,
BCM47XX_BOARD_LINKSYS_WRT610NV2,
BCM47XX_BOARD_LINKSYS_WRTSL54GS,
@@ -94,6 +98,8 @@ enum bcm47xx_board {
BCM47XX_BOARD_PHICOMM_M1,
+ BCM47XX_BOARD_SIEMENS_SE505V2,
+
BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE,
BCM47XX_BOARD_ZTE_H218N,
diff --git a/arch/mips/include/asm/mach-db1x00/db1200.h b/arch/mips/include/asm/mach-db1x00/db1200.h
deleted file mode 100644
index d3cce7326dd4..000000000000
--- a/arch/mips/include/asm/mach-db1x00/db1200.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * AMD Alchemy DBAu1200 Reference Board
- * Board register defines.
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- *
- */
-#ifndef __ASM_DB1200_H
-#define __ASM_DB1200_H
-
-#include <linux/types.h>
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/au1xxx_psc.h>
-
-/* Bit positions for the different interrupt sources */
-#define BCSR_INT_IDE 0x0001
-#define BCSR_INT_ETH 0x0002
-#define BCSR_INT_PC0 0x0004
-#define BCSR_INT_PC0STSCHG 0x0008
-#define BCSR_INT_PC1 0x0010
-#define BCSR_INT_PC1STSCHG 0x0020
-#define BCSR_INT_DC 0x0040
-#define BCSR_INT_FLASHBUSY 0x0080
-#define BCSR_INT_PC0INSERT 0x0100
-#define BCSR_INT_PC0EJECT 0x0200
-#define BCSR_INT_PC1INSERT 0x0400
-#define BCSR_INT_PC1EJECT 0x0800
-#define BCSR_INT_SD0INSERT 0x1000
-#define BCSR_INT_SD0EJECT 0x2000
-#define BCSR_INT_SD1INSERT 0x4000
-#define BCSR_INT_SD1EJECT 0x8000
-
-#define IDE_REG_SHIFT 5
-
-#define DB1200_IDE_PHYS_ADDR 0x18800000
-#define DB1200_IDE_PHYS_LEN (16 << IDE_REG_SHIFT)
-#define DB1200_ETH_PHYS_ADDR 0x19000300
-#define DB1200_NAND_PHYS_ADDR 0x20000000
-
-#define PB1200_IDE_PHYS_ADDR 0x0C800000
-#define PB1200_ETH_PHYS_ADDR 0x0D000300
-#define PB1200_NAND_PHYS_ADDR 0x1C000000
-
-/*
- * External Interrupts for DBAu1200 as of 8/6/2004.
- * Bit positions in the CPLD registers can be calculated by taking
- * the interrupt define and subtracting the DB1200_INT_BEGIN value.
- *
- * Example: IDE bis pos is = 64 - 64
- * ETH bit pos is = 65 - 64
- */
-enum external_db1200_ints {
- DB1200_INT_BEGIN = AU1000_MAX_INTR + 1,
-
- DB1200_IDE_INT = DB1200_INT_BEGIN,
- DB1200_ETH_INT,
- DB1200_PC0_INT,
- DB1200_PC0_STSCHG_INT,
- DB1200_PC1_INT,
- DB1200_PC1_STSCHG_INT,
- DB1200_DC_INT,
- DB1200_FLASHBUSY_INT,
- DB1200_PC0_INSERT_INT,
- DB1200_PC0_EJECT_INT,
- DB1200_PC1_INSERT_INT,
- DB1200_PC1_EJECT_INT,
- DB1200_SD0_INSERT_INT,
- DB1200_SD0_EJECT_INT,
- PB1200_SD1_INSERT_INT,
- PB1200_SD1_EJECT_INT,
-
- DB1200_INT_END = DB1200_INT_BEGIN + 15,
-};
-
-#endif /* __ASM_DB1200_H */
diff --git a/arch/mips/include/asm/mach-db1x00/db1300.h b/arch/mips/include/asm/mach-db1x00/db1300.h
deleted file mode 100644
index 3d1ede46f059..000000000000
--- a/arch/mips/include/asm/mach-db1x00/db1300.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * NetLogic DB1300 board constants
- */
-
-#ifndef _DB1300_H_
-#define _DB1300_H_
-
-/* FPGA (external mux) interrupt sources */
-#define DB1300_FIRST_INT (ALCHEMY_GPIC_INT_LAST + 1)
-#define DB1300_IDE_INT (DB1300_FIRST_INT + 0)
-#define DB1300_ETH_INT (DB1300_FIRST_INT + 1)
-#define DB1300_CF_INT (DB1300_FIRST_INT + 2)
-#define DB1300_VIDEO_INT (DB1300_FIRST_INT + 4)
-#define DB1300_HDMI_INT (DB1300_FIRST_INT + 5)
-#define DB1300_DC_INT (DB1300_FIRST_INT + 6)
-#define DB1300_FLASH_INT (DB1300_FIRST_INT + 7)
-#define DB1300_CF_INSERT_INT (DB1300_FIRST_INT + 8)
-#define DB1300_CF_EJECT_INT (DB1300_FIRST_INT + 9)
-#define DB1300_AC97_INT (DB1300_FIRST_INT + 10)
-#define DB1300_AC97_PEN_INT (DB1300_FIRST_INT + 11)
-#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
-#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
-#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
-#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
-#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
-
-/* SMSC9210 CS */
-#define DB1300_ETH_PHYS_ADDR 0x19000000
-#define DB1300_ETH_PHYS_END 0x197fffff
-
-/* ATA CS */
-#define DB1300_IDE_PHYS_ADDR 0x18800000
-#define DB1300_IDE_REG_SHIFT 5
-#define DB1300_IDE_PHYS_LEN (16 << DB1300_IDE_REG_SHIFT)
-
-/* NAND CS */
-#define DB1300_NAND_PHYS_ADDR 0x20000000
-#define DB1300_NAND_PHYS_END 0x20000fff
-
-#endif /* _DB1300_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h
new file mode 100644
index 000000000000..829a7ec185fb
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/boot_param.h
@@ -0,0 +1,163 @@
+#ifndef __ASM_MACH_LOONGSON_BOOT_PARAM_H_
+#define __ASM_MACH_LOONGSON_BOOT_PARAM_H_
+
+#define SYSTEM_RAM_LOW 1
+#define SYSTEM_RAM_HIGH 2
+#define MEM_RESERVED 3
+#define PCI_IO 4
+#define PCI_MEM 5
+#define LOONGSON_CFG_REG 6
+#define VIDEO_ROM 7
+#define ADAPTER_ROM 8
+#define ACPI_TABLE 9
+#define MAX_MEMORY_TYPE 10
+
+#define LOONGSON3_BOOT_MEM_MAP_MAX 128
+struct efi_memory_map_loongson {
+ u16 vers; /* version of efi_memory_map */
+ u32 nr_map; /* number of memory_maps */
+ u32 mem_freq; /* memory frequence */
+ struct mem_map {
+ u32 node_id; /* node_id which memory attached to */
+ u32 mem_type; /* system memory, pci memory, pci io, etc. */
+ u64 mem_start; /* memory map start address */
+ u32 mem_size; /* each memory_map size, not the total size */
+ } map[LOONGSON3_BOOT_MEM_MAP_MAX];
+} __packed;
+
+enum loongson_cpu_type {
+ Loongson_2E = 0,
+ Loongson_2F = 1,
+ Loongson_3A = 2,
+ Loongson_3B = 3,
+ Loongson_1A = 4,
+ Loongson_1B = 5
+};
+
+/*
+ * Capability and feature descriptor structure for MIPS CPU
+ */
+struct efi_cpuinfo_loongson {
+ u16 vers; /* version of efi_cpuinfo_loongson */
+ u32 processor_id; /* PRID, e.g. 6305, 6306 */
+ u32 cputype; /* Loongson_3A/3B, etc. */
+ u32 total_node; /* num of total numa nodes */
+ u32 cpu_startup_core_id; /* Core id */
+ u32 cpu_clock_freq; /* cpu_clock */
+ u32 nr_cpus;
+} __packed;
+
+struct system_loongson {
+ u16 vers; /* version of system_loongson */
+ u32 ccnuma_smp; /* 0: no numa; 1: has numa */
+ u32 sing_double_channel; /* 1:single; 2:double */
+} __packed;
+
+struct irq_source_routing_table {
+ u16 vers;
+ u16 size;
+ u16 rtr_bus;
+ u16 rtr_devfn;
+ u32 vendor;
+ u32 device;
+ u32 PIC_type; /* conform use HT or PCI to route to CPU-PIC */
+ u64 ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */
+ u64 ht_enable; /* irqs used in this PIC */
+ u32 node_id; /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */
+ u64 pci_mem_start_addr;
+ u64 pci_mem_end_addr;
+ u64 pci_io_start_addr;
+ u64 pci_io_end_addr;
+ u64 pci_config_addr;
+ u32 dma_mask_bits;
+} __packed;
+
+struct interface_info {
+ u16 vers; /* version of the specificition */
+ u16 size;
+ u8 flag;
+ char description[64];
+} __packed;
+
+#define MAX_RESOURCE_NUMBER 128
+struct resource_loongson {
+ u64 start; /* resource start address */
+ u64 end; /* resource end address */
+ char name[64];
+ u32 flags;
+};
+
+struct archdev_data {}; /* arch specific additions */
+
+struct board_devices {
+ char name[64]; /* hold the device name */
+ u32 num_resources; /* number of device_resource */
+ /* for each device's resource */
+ struct resource_loongson resource[MAX_RESOURCE_NUMBER];
+ /* arch specific additions */
+ struct archdev_data archdata;
+};
+
+struct loongson_special_attribute {
+ u16 vers; /* version of this special */
+ char special_name[64]; /* special_atribute_name */
+ u32 loongson_special_type; /* type of special device */
+ /* for each device's resource */
+ struct resource_loongson resource[MAX_RESOURCE_NUMBER];
+};
+
+struct loongson_params {
+ u64 memory_offset; /* efi_memory_map_loongson struct offset */
+ u64 cpu_offset; /* efi_cpuinfo_loongson struct offset */
+ u64 system_offset; /* system_loongson struct offset */
+ u64 irq_offset; /* irq_source_routing_table struct offset */
+ u64 interface_offset; /* interface_info struct offset */
+ u64 special_offset; /* loongson_special_attribute struct offset */
+ u64 boarddev_table_offset; /* board_devices offset */
+};
+
+struct smbios_tables {
+ u16 vers; /* version of smbios */
+ u64 vga_bios; /* vga_bios address */
+ struct loongson_params lp;
+};
+
+struct efi_reset_system_t {
+ u64 ResetCold;
+ u64 ResetWarm;
+ u64 ResetType;
+ u64 Shutdown;
+ u64 DoSuspend; /* NULL if not support */
+};
+
+struct efi_loongson {
+ u64 mps; /* MPS table */
+ u64 acpi; /* ACPI table (IA64 ext 0.71) */
+ u64 acpi20; /* ACPI table (ACPI 2.0) */
+ struct smbios_tables smbios; /* SM BIOS table */
+ u64 sal_systab; /* SAL system table */
+ u64 boot_info; /* boot info table */
+};
+
+struct boot_params {
+ struct efi_loongson efi;
+ struct efi_reset_system_t reset_system;
+};
+
+struct loongson_system_configuration {
+ u32 nr_cpus;
+ enum loongson_cpu_type cputype;
+ u64 ht_control_base;
+ u64 pci_mem_start_addr;
+ u64 pci_mem_end_addr;
+ u64 pci_io_base;
+ u64 restart_addr;
+ u64 poweroff_addr;
+ u64 suspend_addr;
+ u64 vgabios_addr;
+ u32 dma_mask_bits;
+};
+
+extern struct efi_memory_map_loongson *loongson_memmap;
+extern struct loongson_system_configuration loongson_sysconf;
+#endif
diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h
index aeb2c05d6145..6a902751cc7f 100644
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -11,24 +11,40 @@
#ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H
#define __ASM_MACH_LOONGSON_DMA_COHERENCE_H
+#ifdef CONFIG_SWIOTLB
+#include <linux/swiotlb.h>
+#endif
+
struct device;
+extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{
+#ifdef CONFIG_CPU_LOONGSON3
+ return virt_to_phys(addr);
+#else
return virt_to_phys(addr) | 0x80000000;
+#endif
}
static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
struct page *page)
{
+#ifdef CONFIG_CPU_LOONGSON3
+ return page_to_phys(page);
+#else
return page_to_phys(page) | 0x80000000;
+#endif
}
static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
-#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
+#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
+ return dma_addr;
+#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
#else
return dma_addr & 0x7fffffff;
@@ -55,7 +71,11 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
static inline int plat_device_is_coherent(struct device *dev)
{
+#ifdef CONFIG_DMA_NONCOHERENT
return 0;
+#else
+ return 1;
+#endif /* CONFIG_DMA_NONCOHERENT */
}
#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson/irq.h b/arch/mips/include/asm/mach-loongson/irq.h
new file mode 100644
index 000000000000..34560bda6626
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/irq.h
@@ -0,0 +1,44 @@
+#ifndef __ASM_MACH_LOONGSON_IRQ_H_
+#define __ASM_MACH_LOONGSON_IRQ_H_
+
+#include <boot_param.h>
+
+#ifdef CONFIG_CPU_LOONGSON3
+
+/* cpu core interrupt numbers */
+#define MIPS_CPU_IRQ_BASE 56
+
+#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */
+#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
+#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
+
+#define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base
+#define LOONGSON_HT1_INT_VECTOR_BASE (LOONGSON_HT1_CFG_BASE + 0x80)
+#define LOONGSON_HT1_INT_EN_BASE (LOONGSON_HT1_CFG_BASE + 0xa0)
+#define LOONGSON_HT1_INT_VECTOR(n) \
+ LOONGSON3_REG32(LOONGSON_HT1_INT_VECTOR_BASE, 4 * (n))
+#define LOONGSON_HT1_INTN_EN(n) \
+ LOONGSON3_REG32(LOONGSON_HT1_INT_EN_BASE, 4 * (n))
+
+#define LOONGSON_INT_ROUTER_OFFSET 0x1400
+#define LOONGSON_INT_ROUTER_INTEN \
+ LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x24)
+#define LOONGSON_INT_ROUTER_INTENSET \
+ LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x28)
+#define LOONGSON_INT_ROUTER_INTENCLR \
+ LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x2c)
+#define LOONGSON_INT_ROUTER_ENTRY(n) \
+ LOONGSON3_REG8(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + n)
+#define LOONGSON_INT_ROUTER_LPC LOONGSON_INT_ROUTER_ENTRY(0x0a)
+#define LOONGSON_INT_ROUTER_HT1(n) LOONGSON_INT_ROUTER_ENTRY(n + 0x18)
+
+#define LOONGSON_INT_CORE0_INT0 0x11 /* route to int 0 of core 0 */
+#define LOONGSON_INT_CORE0_INT1 0x21 /* route to int 1 of core 0 */
+
+#endif
+
+extern void fixup_irqs(void);
+extern void loongson3_ipi_interrupt(struct pt_regs *regs);
+
+#include_next <irq.h>
+#endif /* __ASM_MACH_LOONGSON_IRQ_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index b286534fef08..f3fd1eb8e3dd 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kconfig.h>
+#include <boot_param.h>
/* loongson internal northbridge initialization */
extern void bonito_irq_init(void);
@@ -24,8 +25,9 @@ extern void mach_prepare_reboot(void);
extern void mach_prepare_shutdown(void);
/* environment arguments from bootloader */
-extern unsigned long cpu_clock_freq;
-extern unsigned long memsize, highmemsize;
+extern u32 cpu_clock_freq;
+extern u32 memsize, highmemsize;
+extern struct plat_smp_ops loongson3_smp_ops;
/* loongson-specific command line, env and memory initialization */
extern void __init prom_init_memory(void);
@@ -61,6 +63,12 @@ extern int mach_i8259_irq(void);
#define LOONGSON_REG(x) \
(*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
+#define LOONGSON3_REG8(base, x) \
+ (*(volatile u8 *)((char *)TO_UNCAC(base) + (x)))
+
+#define LOONGSON3_REG32(base, x) \
+ (*(volatile u32 *)((char *)TO_UNCAC(base) + (x)))
+
#define LOONGSON_IRQ_BASE 32
#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
@@ -86,6 +94,10 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_REG_BASE 0x1fe00000
#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+/* Loongson-3 specific registers */
+#define LOONGSON3_REG_BASE 0x3ff00000
+#define LOONGSON3_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON3_REG_TOP (LOONGSON3_REG_BASE+LOONGSON3_REG_SIZE-1)
#define LOONGSON_LIO1_BASE 0x1ff00000
#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
@@ -101,7 +113,13 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_PCICFG_BASE 0x1fe80000
#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
+
+#if defined(CONFIG_HT_PCI)
+#define LOONGSON_PCIIO_BASE loongson_sysconf.pci_io_base
+#else
#define LOONGSON_PCIIO_BASE 0x1fd00000
+#endif
+
#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
@@ -231,6 +249,9 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68)
#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
+/* Chip Config */
+#define LOONGSON_CHIPCFG0 LOONGSON_REG(LOONGSON_REGBASE + 0x80)
+
/* pcimap */
#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f
@@ -246,9 +267,6 @@ static inline void do_perfcnt_IRQ(void)
#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
#include <linux/cpufreq.h>
extern struct cpufreq_frequency_table loongson2_clockmod_table[];
-
-/* Chip Config */
-#define LOONGSON_CHIPCFG0 LOONGSON_REG(LOONGSON_REGBASE + 0x80)
#endif
/*
diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h
index 3810d5ca84ac..1b1f592fa2be 100644
--- a/arch/mips/include/asm/mach-loongson/machine.h
+++ b/arch/mips/include/asm/mach-loongson/machine.h
@@ -24,4 +24,10 @@
#endif
+#ifdef CONFIG_LEMOTE_MACH3A
+
+#define LOONGSON_MACHTYPE MACH_LEMOTE_A1101
+
+#endif /* CONFIG_LEMOTE_MACH3A */
+
#endif /* __ASM_MACH_LOONGSON_MACHINE_H */
diff --git a/arch/mips/include/asm/mach-loongson/pci.h b/arch/mips/include/asm/mach-loongson/pci.h
index bc99dab4ef63..1212774f66ef 100644
--- a/arch/mips/include/asm/mach-loongson/pci.h
+++ b/arch/mips/include/asm/mach-loongson/pci.h
@@ -40,8 +40,13 @@ extern struct pci_ops loongson_pci_ops;
#else /* loongson2f/32bit & loongson2e */
/* this pci memory space is mapped by pcimap in pci.c */
+#ifdef CONFIG_CPU_LOONGSON3
+#define LOONGSON_PCI_MEM_START 0x40000000UL
+#define LOONGSON_PCI_MEM_END 0x7effffffUL
+#else
#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
+#endif
/* this is an offset from mips_io_port_base */
#define LOONGSON_PCI_IO_START 0x00004000UL
diff --git a/arch/mips/include/asm/mach-loongson/spaces.h b/arch/mips/include/asm/mach-loongson/spaces.h
new file mode 100644
index 000000000000..e2506ee90044
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/spaces.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_MACH_LOONGSON_SPACES_H_
+#define __ASM_MACH_LOONGSON_SPACES_H_
+
+#if defined(CONFIG_64BIT)
+#define CAC_BASE _AC(0x9800000000000000, UL)
+#endif /* CONFIG_64BIT */
+
+#include <asm/mach-generic/spaces.h>
+#endif
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 0b793e7bf67e..7c5e17a17849 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -5,10 +5,80 @@
*
* Chris Dearman (chris@mips.com)
* Copyright (C) 2007 Mips Technologies, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*/
#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
+ /*
+ * Prepare segments for EVA boot:
+ *
+ * This is in case the processor boots in legacy configuration
+ * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
+ *
+ * On entry, t1 is loaded with CP0_CONFIG
+ *
+ * ========================= Mappings =============================
+ * Virtual memory Physical memory Mapping
+ * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg)
+ * Flat 2GB physical memory
+ *
+ * 0x80000000 - 0x9fffffff 0x00000000 - 0x1ffffffff MUSUK (kseg0)
+ * 0xa0000000 - 0xbf000000 0x00000000 - 0x1ffffffff MUSUK (kseg1)
+ * 0xc0000000 - 0xdfffffff - MK (kseg2)
+ * 0xe0000000 - 0xffffffff - MK (kseg3)
+ *
+ *
+ * Lowmem is expanded to 2GB
+ */
+ .macro eva_entry
+ /*
+ * Get Config.K0 value and use it to program
+ * the segmentation registers
+ */
+ andi t1, 0x7 /* CCA */
+ move t2, t1
+ ins t2, t1, 16, 3
+ /* SegCtl0 */
+ li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or t0, t2
+ mtc0 t0, $5, 2
+
+ /* SegCtl1 */
+ li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (2 << MIPS_SEGCFG_C_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ ins t0, t1, 16, 3
+ mtc0 t0, $5, 3
+
+ /* SegCtl2 */
+ li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (6 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (4 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or t0, t2
+ mtc0 t0, $5, 4
+
+ jal mips_ihb
+ mfc0 t0, $16, 5
+ li t2, 0x40000000 /* K bit */
+ or t0, t0, t2
+ mtc0 t0, $16, 5
+ sync
+ jal mips_ihb
+ .endm
+
.macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
@@ -39,14 +109,57 @@
nonmt_processor:
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
-0:
#endif
+
+#ifdef CONFIG_EVA
+ sync
+ ehb
+
+ mfc0 t1, CP0_CONFIG
+ bgez t1, 9f
+ mfc0 t0, CP0_CONFIG, 1
+ bgez t0, 9f
+ mfc0 t0, CP0_CONFIG, 2
+ bgez t0, 9f
+ mfc0 t0, CP0_CONFIG, 3
+ sll t0, t0, 6 /* SC bit */
+ bgez t0, 9f
+
+ eva_entry
+ b 0f
+9:
+ /* Assume we came from YAMON... */
+ PTR_LA v0, 0x9fc00534 /* YAMON print */
+ lw v0, (v0)
+ move a0, zero
+ PTR_LA a1, nonsc_processor
+ jal v0
+
+ PTR_LA v0, 0x9fc00520 /* YAMON exit */
+ lw v0, (v0)
+ li a0, 1
+ jal v0
+
+1: b 1b
+ nop
+ __INITDATA
+nonsc_processor:
+ .asciz "EVA kernel requires a MIPS core with Segment Control implemented\n"
+ __FINIT
+#endif /* CONFIG_EVA */
+0:
.endm
/*
* Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
+#ifdef CONFIG_EVA
+ sync
+ ehb
+ mfc0 t1, CP0_CONFIG
+ eva_entry
+#endif
.endm
#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-malta/spaces.h b/arch/mips/include/asm/mach-malta/spaces.h
new file mode 100644
index 000000000000..d7e54971ec66
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/spaces.h
@@ -0,0 +1,46 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+
+#ifndef _ASM_MALTA_SPACES_H
+#define _ASM_MALTA_SPACES_H
+
+#ifdef CONFIG_EVA
+
+/*
+ * Traditional Malta Board Memory Map for EVA
+ *
+ * 0x00000000 - 0x0fffffff: 1st RAM region, 256MB
+ * 0x10000000 - 0x1bffffff: GIC and CPC Control Registers
+ * 0x1c000000 - 0x1fffffff: I/O And Flash
+ * 0x20000000 - 0x7fffffff: 2nd RAM region, 1.5GB
+ * 0x80000000 - 0xffffffff: Physical memory aliases to 0x0 (2GB)
+ *
+ * The kernel is still located in 0x80000000(kseg0). However,
+ * the physical mask has been shifted to 0x80000000 which exploits the alias
+ * on the Malta board. As a result of which, we override the __pa_symbol
+ * to peform direct mapping from virtual to physical addresses. In other
+ * words, the 0x80000000 virtual address maps to 0x80000000 physical address
+ * which in turn aliases to 0x0. We do this in order to be able to use a flat
+ * 2GB of memory (0x80000000 - 0xffffffff) so we can avoid the I/O hole in
+ * 0x10000000 - 0x1fffffff.
+ * The last 64KB of physical memory are reserved for correct HIGHMEM
+ * macros arithmetics.
+ *
+ */
+
+#define PAGE_OFFSET _AC(0x0, UL)
+#define PHYS_OFFSET _AC(0x80000000, UL)
+#define HIGHMEM_START _AC(0xffff0000, UL)
+
+#define __pa_symbol(x) (RELOC_HIDE((unsigned long)(x), 0))
+
+#endif /* CONFIG_EVA */
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MALTA_SPACES_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 2dbc7a8cec1a..fc946c835995 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -76,7 +76,7 @@ static inline void set_value_reg32(volatile u32 *const addr,
__asm__ __volatile__(
" .set push \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %1 # set_value_reg32 \n"
" and %0, %2 \n"
" or %0, %3 \n"
@@ -98,7 +98,7 @@ static inline void set_reg32(volatile u32 *const addr,
__asm__ __volatile__(
" .set push \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %1 # set_reg32 \n"
" or %0, %2 \n"
" sc %0, %1 \n"
@@ -119,7 +119,7 @@ static inline void clear_reg32(volatile u32 *const addr,
__asm__ __volatile__(
" .set push \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %1 # clear_reg32 \n"
" and %0, %2 \n"
" sc %0, %1 \n"
@@ -140,7 +140,7 @@ static inline void toggle_reg32(volatile u32 *const addr,
__asm__ __volatile__(
" .set push \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %1 # toggle_reg32 \n"
" xor %0, %2 \n"
" sc %0, %1 \n"
@@ -216,7 +216,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
#define custom_read_reg32(address, tmp) \
__asm__ __volatile__( \
" .set push \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
"1: ll %0, %1 #custom_read_reg32 \n" \
" .set pop \n" \
: "=r" (tmp), "=m" (*address) \
@@ -225,7 +225,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
#define custom_write_reg32(address, tmp) \
__asm__ __volatile__( \
" .set push \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
" sc %0, %1 #custom_write_reg32 \n" \
" "__beqz"%0, 1b \n" \
" nop \n" \
diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h
index 722bc889eab5..fd9774269a5e 100644
--- a/arch/mips/include/asm/mips-boards/malta.h
+++ b/arch/mips/include/asm/mips-boards/malta.h
@@ -64,6 +64,11 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
#define GIC_ADDRSPACE_SZ (128 * 1024)
/*
+ * CPC Specific definitions
+ */
+#define CPC_BASE_ADDR 0x1bde0000
+
+/*
* MSC01 BIU Specific definitions
* FIXME : These should be elsewhere ?
*/
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index 836e2ede24de..9cf54041d416 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -50,4 +50,9 @@
#define PIIX4_FUNC1_IDETIM_SECONDARY_HI 0x43
#define PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN (1 << 7)
+/* Power Management Configuration Space */
+#define PIIX4_FUNC3_PMBA 0x40
+#define PIIX4_FUNC3_PMREGMISC 0x80
+#define PIIX4_FUNC3_PMREGMISC_EN (1 << 0)
+
#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
new file mode 100644
index 000000000000..6a9d2dd005ca
--- /dev/null
+++ b/arch/mips/include/asm/mips-cm.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_MIPS_CM_H__
+#define __MIPS_ASM_MIPS_CM_H__
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+/* The base address of the CM GCR block */
+extern void __iomem *mips_cm_base;
+
+/* The base address of the CM L2-only sync region */
+extern void __iomem *mips_cm_l2sync_base;
+
+/**
+ * __mips_cm_phys_base - retrieve the physical base address of the CM
+ *
+ * This function returns the physical base address of the Coherence Manager
+ * global control block, or 0 if no Coherence Manager is present. It provides
+ * a default implementation which reads the CMGCRBase register where available,
+ * and may be overriden by platforms which determine this address in a
+ * different way by defining a function with the same prototype except for the
+ * name mips_cm_phys_base (without underscores).
+ */
+extern phys_t __mips_cm_phys_base(void);
+
+/**
+ * mips_cm_probe - probe for a Coherence Manager
+ *
+ * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
+ * is successfully detected, else -errno.
+ */
+#ifdef CONFIG_MIPS_CM
+extern int mips_cm_probe(void);
+#else
+static inline int mips_cm_probe(void)
+{
+ return -ENODEV;
+}
+#endif
+
+/**
+ * mips_cm_present - determine whether a Coherence Manager is present
+ *
+ * Returns true if a CM is present in the system, else false.
+ */
+static inline bool mips_cm_present(void)
+{
+#ifdef CONFIG_MIPS_CM
+ return mips_cm_base != NULL;
+#else
+ return false;
+#endif
+}
+
+/**
+ * mips_cm_has_l2sync - determine whether an L2-only sync region is present
+ *
+ * Returns true if the system implements an L2-only sync region, else false.
+ */
+static inline bool mips_cm_has_l2sync(void)
+{
+#ifdef CONFIG_MIPS_CM
+ return mips_cm_l2sync_base != NULL;
+#else
+ return false;
+#endif
+}
+
+/* Offsets to register blocks from the CM base address */
+#define MIPS_CM_GCB_OFS 0x0000 /* Global Control Block */
+#define MIPS_CM_CLCB_OFS 0x2000 /* Core Local Control Block */
+#define MIPS_CM_COCB_OFS 0x4000 /* Core Other Control Block */
+#define MIPS_CM_GDB_OFS 0x6000 /* Global Debug Block */
+
+/* Total size of the CM memory mapped registers */
+#define MIPS_CM_GCR_SIZE 0x8000
+
+/* Size of the L2-only sync region */
+#define MIPS_CM_L2SYNC_SIZE 0x1000
+
+/* Macros to ease the creation of register access functions */
+#define BUILD_CM_R_(name, off) \
+static inline u32 *addr_gcr_##name(void) \
+{ \
+ return (u32 *)(mips_cm_base + (off)); \
+} \
+ \
+static inline u32 read_gcr_##name(void) \
+{ \
+ return __raw_readl(addr_gcr_##name()); \
+}
+
+#define BUILD_CM__W(name, off) \
+static inline void write_gcr_##name(u32 value) \
+{ \
+ __raw_writel(value, addr_gcr_##name()); \
+}
+
+#define BUILD_CM_RW(name, off) \
+ BUILD_CM_R_(name, off) \
+ BUILD_CM__W(name, off)
+
+#define BUILD_CM_Cx_R_(name, off) \
+ BUILD_CM_R_(cl_##name, MIPS_CM_CLCB_OFS + (off)) \
+ BUILD_CM_R_(co_##name, MIPS_CM_COCB_OFS + (off))
+
+#define BUILD_CM_Cx__W(name, off) \
+ BUILD_CM__W(cl_##name, MIPS_CM_CLCB_OFS + (off)) \
+ BUILD_CM__W(co_##name, MIPS_CM_COCB_OFS + (off))
+
+#define BUILD_CM_Cx_RW(name, off) \
+ BUILD_CM_Cx_R_(name, off) \
+ BUILD_CM_Cx__W(name, off)
+
+/* GCB register accessor functions */
+BUILD_CM_R_(config, MIPS_CM_GCB_OFS + 0x00)
+BUILD_CM_RW(base, MIPS_CM_GCB_OFS + 0x08)
+BUILD_CM_RW(access, MIPS_CM_GCB_OFS + 0x20)
+BUILD_CM_R_(rev, MIPS_CM_GCB_OFS + 0x30)
+BUILD_CM_RW(error_mask, MIPS_CM_GCB_OFS + 0x40)
+BUILD_CM_RW(error_cause, MIPS_CM_GCB_OFS + 0x48)
+BUILD_CM_RW(error_addr, MIPS_CM_GCB_OFS + 0x50)
+BUILD_CM_RW(error_mult, MIPS_CM_GCB_OFS + 0x58)
+BUILD_CM_RW(l2_only_sync_base, MIPS_CM_GCB_OFS + 0x70)
+BUILD_CM_RW(gic_base, MIPS_CM_GCB_OFS + 0x80)
+BUILD_CM_RW(cpc_base, MIPS_CM_GCB_OFS + 0x88)
+BUILD_CM_RW(reg0_base, MIPS_CM_GCB_OFS + 0x90)
+BUILD_CM_RW(reg0_mask, MIPS_CM_GCB_OFS + 0x98)
+BUILD_CM_RW(reg1_base, MIPS_CM_GCB_OFS + 0xa0)
+BUILD_CM_RW(reg1_mask, MIPS_CM_GCB_OFS + 0xa8)
+BUILD_CM_RW(reg2_base, MIPS_CM_GCB_OFS + 0xb0)
+BUILD_CM_RW(reg2_mask, MIPS_CM_GCB_OFS + 0xb8)
+BUILD_CM_RW(reg3_base, MIPS_CM_GCB_OFS + 0xc0)
+BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8)
+BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0)
+BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0)
+
+/* Core Local & Core Other register accessor functions */
+BUILD_CM_Cx_RW(reset_release, 0x00)
+BUILD_CM_Cx_RW(coherence, 0x08)
+BUILD_CM_Cx_R_(config, 0x10)
+BUILD_CM_Cx_RW(other, 0x18)
+BUILD_CM_Cx_RW(reset_base, 0x20)
+BUILD_CM_Cx_R_(id, 0x28)
+BUILD_CM_Cx_RW(reset_ext_base, 0x30)
+BUILD_CM_Cx_R_(tcid_0_priority, 0x40)
+BUILD_CM_Cx_R_(tcid_1_priority, 0x48)
+BUILD_CM_Cx_R_(tcid_2_priority, 0x50)
+BUILD_CM_Cx_R_(tcid_3_priority, 0x58)
+BUILD_CM_Cx_R_(tcid_4_priority, 0x60)
+BUILD_CM_Cx_R_(tcid_5_priority, 0x68)
+BUILD_CM_Cx_R_(tcid_6_priority, 0x70)
+BUILD_CM_Cx_R_(tcid_7_priority, 0x78)
+BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
+
+/* GCR_CONFIG register fields */
+#define CM_GCR_CONFIG_NUMIOCU_SHF 8
+#define CM_GCR_CONFIG_NUMIOCU_MSK (_ULCAST_(0xf) << 8)
+#define CM_GCR_CONFIG_PCORES_SHF 0
+#define CM_GCR_CONFIG_PCORES_MSK (_ULCAST_(0xff) << 0)
+
+/* GCR_BASE register fields */
+#define CM_GCR_BASE_GCRBASE_SHF 15
+#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
+#define CM_GCR_BASE_CMDEFTGT_SHF 0
+#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
+#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
+#define CM_GCR_BASE_CMDEFTGT_MEM 1
+#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
+#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
+
+/* GCR_ACCESS register fields */
+#define CM_GCR_ACCESS_ACCESSEN_SHF 0
+#define CM_GCR_ACCESS_ACCESSEN_MSK (_ULCAST_(0xff) << 0)
+
+/* GCR_REV register fields */
+#define CM_GCR_REV_MAJOR_SHF 8
+#define CM_GCR_REV_MAJOR_MSK (_ULCAST_(0xff) << 8)
+#define CM_GCR_REV_MINOR_SHF 0
+#define CM_GCR_REV_MINOR_MSK (_ULCAST_(0xff) << 0)
+
+/* GCR_ERROR_CAUSE register fields */
+#define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF 27
+#define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK (_ULCAST_(0x1f) << 27)
+#define CM_GCR_ERROR_CAUSE_ERRINFO_SHF 0
+#define CM_GCR_ERROR_CAUSE_ERRINGO_MSK (_ULCAST_(0x7ffffff) << 0)
+
+/* GCR_ERROR_MULT register fields */
+#define CM_GCR_ERROR_MULT_ERR2ND_SHF 0
+#define CM_GCR_ERROR_MULT_ERR2ND_MSK (_ULCAST_(0x1f) << 0)
+
+/* GCR_L2_ONLY_SYNC_BASE register fields */
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_SHF 12
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK (_ULCAST_(0xfffff) << 12)
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_SHF 0
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK (_ULCAST_(0x1) << 0)
+
+/* GCR_GIC_BASE register fields */
+#define CM_GCR_GIC_BASE_GICBASE_SHF 17
+#define CM_GCR_GIC_BASE_GICBASE_MSK (_ULCAST_(0x7fff) << 17)
+#define CM_GCR_GIC_BASE_GICEN_SHF 0
+#define CM_GCR_GIC_BASE_GICEN_MSK (_ULCAST_(0x1) << 0)
+
+/* GCR_CPC_BASE register fields */
+#define CM_GCR_CPC_BASE_CPCBASE_SHF 17
+#define CM_GCR_CPC_BASE_CPCBASE_MSK (_ULCAST_(0x7fff) << 17)
+#define CM_GCR_CPC_BASE_CPCEN_SHF 0
+#define CM_GCR_CPC_BASE_CPCEN_MSK (_ULCAST_(0x1) << 0)
+
+/* GCR_REGn_BASE register fields */
+#define CM_GCR_REGn_BASE_BASEADDR_SHF 16
+#define CM_GCR_REGn_BASE_BASEADDR_MSK (_ULCAST_(0xffff) << 16)
+
+/* GCR_REGn_MASK register fields */
+#define CM_GCR_REGn_MASK_ADDRMASK_SHF 16
+#define CM_GCR_REGn_MASK_ADDRMASK_MSK (_ULCAST_(0xffff) << 16)
+#define CM_GCR_REGn_MASK_CCAOVR_SHF 5
+#define CM_GCR_REGn_MASK_CCAOVR_MSK (_ULCAST_(0x3) << 5)
+#define CM_GCR_REGn_MASK_CCAOVREN_SHF 4
+#define CM_GCR_REGn_MASK_CCAOVREN_MSK (_ULCAST_(0x1) << 4)
+#define CM_GCR_REGn_MASK_DROPL2_SHF 2
+#define CM_GCR_REGn_MASK_DROPL2_MSK (_ULCAST_(0x1) << 2)
+#define CM_GCR_REGn_MASK_CMTGT_SHF 0
+#define CM_GCR_REGn_MASK_CMTGT_MSK (_ULCAST_(0x3) << 0)
+#define CM_GCR_REGn_MASK_CMTGT_DISABLED (_ULCAST_(0x0) << 0)
+#define CM_GCR_REGn_MASK_CMTGT_MEM (_ULCAST_(0x1) << 0)
+#define CM_GCR_REGn_MASK_CMTGT_IOCU0 (_ULCAST_(0x2) << 0)
+#define CM_GCR_REGn_MASK_CMTGT_IOCU1 (_ULCAST_(0x3) << 0)
+
+/* GCR_GIC_STATUS register fields */
+#define CM_GCR_GIC_STATUS_EX_SHF 0
+#define CM_GCR_GIC_STATUS_EX_MSK (_ULCAST_(0x1) << 0)
+
+/* GCR_CPC_STATUS register fields */
+#define CM_GCR_CPC_STATUS_EX_SHF 0
+#define CM_GCR_CPC_STATUS_EX_MSK (_ULCAST_(0x1) << 0)
+
+/* GCR_Cx_COHERENCE register fields */
+#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0
+#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0)
+
+/* GCR_Cx_CONFIG register fields */
+#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF 10
+#define CM_GCR_Cx_CONFIG_IOCUTYPE_MSK (_ULCAST_(0x3) << 10)
+#define CM_GCR_Cx_CONFIG_PVPE_SHF 0
+#define CM_GCR_Cx_CONFIG_PVPE_MSK (_ULCAST_(0x1ff) << 0)
+
+/* GCR_Cx_OTHER register fields */
+#define CM_GCR_Cx_OTHER_CORENUM_SHF 16
+#define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16)
+
+/* GCR_Cx_RESET_BASE register fields */
+#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12
+#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_MSK (_ULCAST_(0xfffff) << 12)
+
+/* GCR_Cx_RESET_EXT_BASE register fields */
+#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_SHF 31
+#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_MSK (_ULCAST_(0x1) << 31)
+#define CM_GCR_Cx_RESET_EXT_BASE_UEB_SHF 30
+#define CM_GCR_Cx_RESET_EXT_BASE_UEB_MSK (_ULCAST_(0x1) << 30)
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_SHF 20
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_MSK (_ULCAST_(0xff) << 20)
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_SHF 1
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_MSK (_ULCAST_(0x7f) << 1)
+#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_SHF 0
+#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_MSK (_ULCAST_(0x1) << 0)
+
+/**
+ * mips_cm_numcores - return the number of cores present in the system
+ *
+ * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or
+ * zero if no Coherence Manager is present.
+ */
+static inline unsigned mips_cm_numcores(void)
+{
+ if (!mips_cm_present())
+ return 0;
+
+ return ((read_gcr_config() & CM_GCR_CONFIG_PCORES_MSK)
+ >> CM_GCR_CONFIG_PCORES_SHF) + 1;
+}
+
+/**
+ * mips_cm_numiocu - return the number of IOCUs present in the system
+ *
+ * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero
+ * if no Coherence Manager is present.
+ */
+static inline unsigned mips_cm_numiocu(void)
+{
+ if (!mips_cm_present())
+ return 0;
+
+ return (read_gcr_config() & CM_GCR_CONFIG_NUMIOCU_MSK)
+ >> CM_GCR_CONFIG_NUMIOCU_SHF;
+}
+
+/**
+ * mips_cm_l2sync - perform an L2-only sync operation
+ *
+ * If an L2-only sync region is present in the system then this function
+ * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
+ */
+static inline int mips_cm_l2sync(void)
+{
+ if (!mips_cm_has_l2sync())
+ return -ENODEV;
+
+ writel(0, mips_cm_l2sync_base);
+ return 0;
+}
+
+#endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
new file mode 100644
index 000000000000..988507e46d42
--- /dev/null
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_MIPS_CPC_H__
+#define __MIPS_ASM_MIPS_CPC_H__
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+/* The base address of the CPC registers */
+extern void __iomem *mips_cpc_base;
+
+/**
+ * mips_cpc_default_phys_base - retrieve the default physical base address of
+ * the CPC
+ *
+ * Returns the default physical base address of the Cluster Power Controller
+ * memory mapped registers. This is platform dependant & must therefore be
+ * implemented per-platform.
+ */
+extern phys_t mips_cpc_default_phys_base(void);
+
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present. It may be overriden by individual platforms which determine
+ * this address in a different way.
+ */
+extern phys_t __weak mips_cpc_phys_base(void);
+
+/**
+ * mips_cpc_probe - probe for a Cluster Power Controller
+ *
+ * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if
+ * a CPC is successfully detected, else -errno.
+ */
+#ifdef CONFIG_MIPS_CPC
+extern int mips_cpc_probe(void);
+#else
+static inline int mips_cpc_probe(void)
+{
+ return -ENODEV;
+}
+#endif
+
+/**
+ * mips_cpc_present - determine whether a Cluster Power Controller is present
+ *
+ * Returns true if a CPC is present in the system, else false.
+ */
+static inline bool mips_cpc_present(void)
+{
+#ifdef CONFIG_MIPS_CPC
+ return mips_cpc_base != NULL;
+#else
+ return false;
+#endif
+}
+
+/* Offsets from the CPC base address to various control blocks */
+#define MIPS_CPC_GCB_OFS 0x0000
+#define MIPS_CPC_CLCB_OFS 0x2000
+#define MIPS_CPC_COCB_OFS 0x4000
+
+/* Macros to ease the creation of register access functions */
+#define BUILD_CPC_R_(name, off) \
+static inline u32 read_cpc_##name(void) \
+{ \
+ return __raw_readl(mips_cpc_base + (off)); \
+}
+
+#define BUILD_CPC__W(name, off) \
+static inline void write_cpc_##name(u32 value) \
+{ \
+ __raw_writel(value, mips_cpc_base + (off)); \
+}
+
+#define BUILD_CPC_RW(name, off) \
+ BUILD_CPC_R_(name, off) \
+ BUILD_CPC__W(name, off)
+
+#define BUILD_CPC_Cx_R_(name, off) \
+ BUILD_CPC_R_(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \
+ BUILD_CPC_R_(co_##name, MIPS_CPC_COCB_OFS + (off))
+
+#define BUILD_CPC_Cx__W(name, off) \
+ BUILD_CPC__W(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \
+ BUILD_CPC__W(co_##name, MIPS_CPC_COCB_OFS + (off))
+
+#define BUILD_CPC_Cx_RW(name, off) \
+ BUILD_CPC_Cx_R_(name, off) \
+ BUILD_CPC_Cx__W(name, off)
+
+/* GCB register accessor functions */
+BUILD_CPC_RW(access, MIPS_CPC_GCB_OFS + 0x00)
+BUILD_CPC_RW(seqdel, MIPS_CPC_GCB_OFS + 0x08)
+BUILD_CPC_RW(rail, MIPS_CPC_GCB_OFS + 0x10)
+BUILD_CPC_RW(resetlen, MIPS_CPC_GCB_OFS + 0x18)
+BUILD_CPC_R_(revision, MIPS_CPC_GCB_OFS + 0x20)
+
+/* Core Local & Core Other accessor functions */
+BUILD_CPC_Cx_RW(cmd, 0x00)
+BUILD_CPC_Cx_RW(stat_conf, 0x08)
+BUILD_CPC_Cx_RW(other, 0x10)
+
+/* CPC_Cx_CMD register fields */
+#define CPC_Cx_CMD_SHF 0
+#define CPC_Cx_CMD_MSK (_ULCAST_(0xf) << 0)
+#define CPC_Cx_CMD_CLOCKOFF (_ULCAST_(0x1) << 0)
+#define CPC_Cx_CMD_PWRDOWN (_ULCAST_(0x2) << 0)
+#define CPC_Cx_CMD_PWRUP (_ULCAST_(0x3) << 0)
+#define CPC_Cx_CMD_RESET (_ULCAST_(0x4) << 0)
+
+/* CPC_Cx_STAT_CONF register fields */
+#define CPC_Cx_STAT_CONF_PWRUPE_SHF 23
+#define CPC_Cx_STAT_CONF_PWRUPE_MSK (_ULCAST_(0x1) << 23)
+#define CPC_Cx_STAT_CONF_SEQSTATE_SHF 19
+#define CPC_Cx_STAT_CONF_SEQSTATE_MSK (_ULCAST_(0xf) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_D0 (_ULCAST_(0x0) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_U0 (_ULCAST_(0x1) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_U1 (_ULCAST_(0x2) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_U2 (_ULCAST_(0x3) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_U3 (_ULCAST_(0x4) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_U4 (_ULCAST_(0x5) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_U5 (_ULCAST_(0x6) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_U6 (_ULCAST_(0x7) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_D1 (_ULCAST_(0x8) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_D3 (_ULCAST_(0x9) << 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_D2 (_ULCAST_(0xa) << 19)
+#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_SHF 17
+#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK (_ULCAST_(0x1) << 17)
+#define CPC_Cx_STAT_CONF_PWRDN_IMPL_SHF 16
+#define CPC_Cx_STAT_CONF_PWRDN_IMPL_MSK (_ULCAST_(0x1) << 16)
+#define CPC_Cx_STAT_CONF_EJTAG_PROBE_SHF 15
+#define CPC_Cx_STAT_CONF_EJTAG_PROBE_MSK (_ULCAST_(0x1) << 15)
+
+/* CPC_Cx_OTHER register fields */
+#define CPC_Cx_OTHER_CORENUM_SHF 16
+#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16)
+
+#endif /* __MIPS_ASM_MIPS_CPC_H__ */
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index ac7935203f89..a3df0c3faa0e 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -18,7 +18,12 @@ extern cpumask_t mt_fpu_cpumask;
extern unsigned long mt_fpemul_threshold;
extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
+
+#ifdef CONFIG_MIPS_MT
extern void mips_mt_set_cpuoptions(void);
+#else
+static inline void mips_mt_set_cpuoptions(void) { }
+#endif
struct class;
extern struct class *mt_class;
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 38b7704ee376..6efa79a27b6a 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -176,6 +176,17 @@
#ifndef __ASSEMBLY__
+static inline unsigned core_nvpes(void)
+{
+ unsigned conf0;
+
+ if (!cpu_has_mipsmt)
+ return 1;
+
+ conf0 = read_c0_mvpconf0();
+ return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+}
+
static inline unsigned int dvpe(void)
{
int res = 0;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index bbc3dd4294bc..3e025b5311db 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -568,11 +568,23 @@
#define MIPS_CONF1_PC (_ULCAST_(1) << 4)
#define MIPS_CONF1_MD (_ULCAST_(1) << 5)
#define MIPS_CONF1_C2 (_ULCAST_(1) << 6)
+#define MIPS_CONF1_DA_SHF 7
+#define MIPS_CONF1_DA_SZ 3
#define MIPS_CONF1_DA (_ULCAST_(7) << 7)
+#define MIPS_CONF1_DL_SHF 10
+#define MIPS_CONF1_DL_SZ 3
#define MIPS_CONF1_DL (_ULCAST_(7) << 10)
+#define MIPS_CONF1_DS_SHF 13
+#define MIPS_CONF1_DS_SZ 3
#define MIPS_CONF1_DS (_ULCAST_(7) << 13)
+#define MIPS_CONF1_IA_SHF 16
+#define MIPS_CONF1_IA_SZ 3
#define MIPS_CONF1_IA (_ULCAST_(7) << 16)
+#define MIPS_CONF1_IL_SHF 19
+#define MIPS_CONF1_IL_SZ 3
#define MIPS_CONF1_IL (_ULCAST_(7) << 19)
+#define MIPS_CONF1_IS_SHF 22
+#define MIPS_CONF1_IS_SZ 3
#define MIPS_CONF1_IS (_ULCAST_(7) << 22)
#define MIPS_CONF1_TLBS_SHIFT (25)
#define MIPS_CONF1_TLBS_SIZE (6)
@@ -653,9 +665,16 @@
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
+#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
+#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
+
/* EntryHI bit definition */
#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10)
+/* CMGCRBase bit definitions */
+#define MIPS_CMGCRB_BASE 11
+#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
+
/*
* Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
*/
@@ -1010,6 +1029,8 @@ do { \
#define read_c0_prid() __read_32bit_c0_register($15, 0)
+#define read_c0_cmgcrbase() __read_ulong_c0_register($15, 3)
+
#define read_c0_config() __read_32bit_c0_register($16, 0)
#define read_c0_config1() __read_32bit_c0_register($16, 1)
#define read_c0_config2() __read_32bit_c0_register($16, 2)
@@ -1883,6 +1904,7 @@ change_c0_##name(unsigned int change, unsigned int newbits) \
__BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
+__BUILD_SET_C0(config5)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 44b705d08262..c2edae382d5d 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -126,6 +126,8 @@ search_module_dbetables(unsigned long addr)
#define MODULE_PROC_FAMILY "LOONGSON1 "
#elif defined CONFIG_CPU_LOONGSON2
#define MODULE_PROC_FAMILY "LOONGSON2 "
+#elif defined CONFIG_CPU_LOONGSON3
+#define MODULE_PROC_FAMILY "LOONGSON3 "
#elif defined CONFIG_CPU_CAVIUM_OCTEON
#define MODULE_PROC_FAMILY "OCTEON "
#elif defined CONFIG_CPU_XLR
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
new file mode 100644
index 000000000000..a2aba6c3ec05
--- /dev/null
+++ b/arch/mips/include/asm/msa.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_MSA_H
+#define _ASM_MSA_H
+
+#include <asm/mipsregs.h>
+
+extern void _save_msa(struct task_struct *);
+extern void _restore_msa(struct task_struct *);
+
+static inline void enable_msa(void)
+{
+ if (cpu_has_msa) {
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+ }
+}
+
+static inline void disable_msa(void)
+{
+ if (cpu_has_msa) {
+ clear_c0_config5(MIPS_CONF5_MSAEN);
+ disable_fpu_hazard();
+ }
+}
+
+static inline int is_msa_enabled(void)
+{
+ if (!cpu_has_msa)
+ return 0;
+
+ return read_c0_config5() & MIPS_CONF5_MSAEN;
+}
+
+static inline int thread_msa_context_live(void)
+{
+ /*
+ * Check cpu_has_msa only if it's a constant. This will allow the
+ * compiler to optimise out code for CPUs without MSA without adding
+ * an extra redundant check for CPUs with MSA.
+ */
+ if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
+ return 0;
+
+ return test_thread_flag(TIF_MSA_CTX_LIVE);
+}
+
+static inline void save_msa(struct task_struct *t)
+{
+ if (cpu_has_msa)
+ _save_msa(t);
+}
+
+static inline void restore_msa(struct task_struct *t)
+{
+ if (cpu_has_msa)
+ _restore_msa(t);
+}
+
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+
+#define __BUILD_MSA_CTL_REG(name, cs) \
+static inline unsigned int read_msa_##name(void) \
+{ \
+ unsigned int reg; \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set msa\n" \
+ " cfcmsa %0, $" #cs "\n" \
+ " .set pop\n" \
+ : "=r"(reg)); \
+ return reg; \
+} \
+ \
+static inline void write_msa_##name(unsigned int val) \
+{ \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set msa\n" \
+ " cfcmsa $" #cs ", %0\n" \
+ " .set pop\n" \
+ : : "r"(val)); \
+}
+
+#else /* !TOOLCHAIN_SUPPORTS_MSA */
+
+/*
+ * Define functions using .word for the c[ft]cmsa instructions in order to
+ * allow compilation with toolchains that do not support MSA. Once all
+ * toolchains in use support MSA these can be removed.
+ */
+
+#define __BUILD_MSA_CTL_REG(name, cs) \
+static inline unsigned int read_msa_##name(void) \
+{ \
+ unsigned int reg; \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set noat\n" \
+ " .word 0x787e0059 | (" #cs " << 11)\n" \
+ " move %0, $1\n" \
+ " .set pop\n" \
+ : "=r"(reg)); \
+ return reg; \
+} \
+ \
+static inline void write_msa_##name(unsigned int val) \
+{ \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set noat\n" \
+ " move $1, %0\n" \
+ " .word 0x783e0819 | (" #cs " << 6)\n" \
+ " .set pop\n" \
+ : : "r"(val)); \
+}
+
+#endif /* !TOOLCHAIN_SUPPORTS_MSA */
+
+#define MSA_IR 0
+#define MSA_CSR 1
+#define MSA_ACCESS 2
+#define MSA_SAVE 3
+#define MSA_MODIFY 4
+#define MSA_REQUEST 5
+#define MSA_MAP 6
+#define MSA_UNMAP 7
+
+__BUILD_MSA_CTL_REG(ir, 0)
+__BUILD_MSA_CTL_REG(csr, 1)
+__BUILD_MSA_CTL_REG(access, 2)
+__BUILD_MSA_CTL_REG(save, 3)
+__BUILD_MSA_CTL_REG(modify, 4)
+__BUILD_MSA_CTL_REG(request, 5)
+__BUILD_MSA_CTL_REG(map, 6)
+__BUILD_MSA_CTL_REG(unmap, 7)
+
+/* MSA Implementation Register (MSAIR) */
+#define MSA_IR_REVB 0
+#define MSA_IR_REVF (_ULCAST_(0xff) << MSA_IR_REVB)
+#define MSA_IR_PROCB 8
+#define MSA_IR_PROCF (_ULCAST_(0xff) << MSA_IR_PROCB)
+#define MSA_IR_WRPB 16
+#define MSA_IR_WRPF (_ULCAST_(0x1) << MSA_IR_WRPB)
+
+/* MSA Control & Status Register (MSACSR) */
+#define MSA_CSR_RMB 0
+#define MSA_CSR_RMF (_ULCAST_(0x3) << MSA_CSR_RMB)
+#define MSA_CSR_RM_NEAREST 0
+#define MSA_CSR_RM_TO_ZERO 1
+#define MSA_CSR_RM_TO_POS 2
+#define MSA_CSR_RM_TO_NEG 3
+#define MSA_CSR_FLAGSB 2
+#define MSA_CSR_FLAGSF (_ULCAST_(0x1f) << MSA_CSR_FLAGSB)
+#define MSA_CSR_FLAGS_IB 2
+#define MSA_CSR_FLAGS_IF (_ULCAST_(0x1) << MSA_CSR_FLAGS_IB)
+#define MSA_CSR_FLAGS_UB 3
+#define MSA_CSR_FLAGS_UF (_ULCAST_(0x1) << MSA_CSR_FLAGS_UB)
+#define MSA_CSR_FLAGS_OB 4
+#define MSA_CSR_FLAGS_OF (_ULCAST_(0x1) << MSA_CSR_FLAGS_OB)
+#define MSA_CSR_FLAGS_ZB 5
+#define MSA_CSR_FLAGS_ZF (_ULCAST_(0x1) << MSA_CSR_FLAGS_ZB)
+#define MSA_CSR_FLAGS_VB 6
+#define MSA_CSR_FLAGS_VF (_ULCAST_(0x1) << MSA_CSR_FLAGS_VB)
+#define MSA_CSR_ENABLESB 7
+#define MSA_CSR_ENABLESF (_ULCAST_(0x1f) << MSA_CSR_ENABLESB)
+#define MSA_CSR_ENABLES_IB 7
+#define MSA_CSR_ENABLES_IF (_ULCAST_(0x1) << MSA_CSR_ENABLES_IB)
+#define MSA_CSR_ENABLES_UB 8
+#define MSA_CSR_ENABLES_UF (_ULCAST_(0x1) << MSA_CSR_ENABLES_UB)
+#define MSA_CSR_ENABLES_OB 9
+#define MSA_CSR_ENABLES_OF (_ULCAST_(0x1) << MSA_CSR_ENABLES_OB)
+#define MSA_CSR_ENABLES_ZB 10
+#define MSA_CSR_ENABLES_ZF (_ULCAST_(0x1) << MSA_CSR_ENABLES_ZB)
+#define MSA_CSR_ENABLES_VB 11
+#define MSA_CSR_ENABLES_VF (_ULCAST_(0x1) << MSA_CSR_ENABLES_VB)
+#define MSA_CSR_CAUSEB 12
+#define MSA_CSR_CAUSEF (_ULCAST_(0x3f) << MSA_CSR_CAUSEB)
+#define MSA_CSR_CAUSE_IB 12
+#define MSA_CSR_CAUSE_IF (_ULCAST_(0x1) << MSA_CSR_CAUSE_IB)
+#define MSA_CSR_CAUSE_UB 13
+#define MSA_CSR_CAUSE_UF (_ULCAST_(0x1) << MSA_CSR_CAUSE_UB)
+#define MSA_CSR_CAUSE_OB 14
+#define MSA_CSR_CAUSE_OF (_ULCAST_(0x1) << MSA_CSR_CAUSE_OB)
+#define MSA_CSR_CAUSE_ZB 15
+#define MSA_CSR_CAUSE_ZF (_ULCAST_(0x1) << MSA_CSR_CAUSE_ZB)
+#define MSA_CSR_CAUSE_VB 16
+#define MSA_CSR_CAUSE_VF (_ULCAST_(0x1) << MSA_CSR_CAUSE_VB)
+#define MSA_CSR_CAUSE_EB 17
+#define MSA_CSR_CAUSE_EF (_ULCAST_(0x1) << MSA_CSR_CAUSE_EB)
+#define MSA_CSR_NXB 18
+#define MSA_CSR_NXF (_ULCAST_(0x1) << MSA_CSR_NXB)
+#define MSA_CSR_FSB 24
+#define MSA_CSR_FSF (_ULCAST_(0x1) << MSA_CSR_FSB)
+
+#endif /* _ASM_MSA_H */
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 5e08bcc74897..5699ec3a71af 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -190,7 +190,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;
* https://patchwork.linux-mips.org/patch/1541/
*/
+#ifndef __pa_symbol
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 32aea4852fb0..e592f3687d6f 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -235,6 +235,15 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
+#elif defined(CONFIG_CPU_LOONGSON3)
+
+/* Using COHERENT flag for NONCOHERENT doesn't hurt. */
+
+#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* LOONGSON */
+#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */
+#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* LOONGSON */
+
#else
#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 3605b844ad87..ad70cba8daff 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -97,18 +97,48 @@ extern unsigned int vced_count, vcei_count;
#define NUM_FPU_REGS 32
-typedef __u64 fpureg_t;
+#ifdef CONFIG_CPU_HAS_MSA
+# define FPU_REG_WIDTH 128
+#else
+# define FPU_REG_WIDTH 64
+#endif
+
+union fpureg {
+ __u32 val32[FPU_REG_WIDTH / 32];
+ __u64 val64[FPU_REG_WIDTH / 64];
+};
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# define FPR_IDX(width, idx) (idx)
+#else
+# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx))
+#endif
+
+#define BUILD_FPR_ACCESS(width) \
+static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
+{ \
+ return fpr->val##width[FPR_IDX(width, idx)]; \
+} \
+ \
+static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
+ u##width val) \
+{ \
+ fpr->val##width[FPR_IDX(width, idx)] = val; \
+}
+
+BUILD_FPR_ACCESS(32)
+BUILD_FPR_ACCESS(64)
/*
- * It would be nice to add some more fields for emulator statistics, but there
- * are a number of fixed offsets in offset.h and elsewhere that would have to
- * be recalculated by hand. So the additional information will be private to
- * the FPU emulator for now. See asm-mips/fpu_emulator.h.
+ * It would be nice to add some more fields for emulator statistics,
+ * the additional information is private to the FPU emulator for now.
+ * See arch/mips/include/asm/fpu_emulator.h.
*/
struct mips_fpu_struct {
- fpureg_t fpr[NUM_FPU_REGS];
+ union fpureg fpr[NUM_FPU_REGS];
unsigned int fcr31;
+ unsigned int msacsr;
};
#define NUM_DSP_REGS 6
@@ -284,8 +314,9 @@ struct thread_struct {
* Saved FPU/FPU emulator stuff \
*/ \
.fpu = { \
- .fpr = {0,}, \
+ .fpr = {{{0,},},}, \
.fcr31 = 0, \
+ .msacsr = 0, \
}, \
/* \
* FPU affinity state (null if not FPAFF) \
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 7bba9da110af..bf1ac8d35783 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -82,7 +82,7 @@ static inline long regs_return_value(struct pt_regs *regs)
#define instruction_pointer(regs) ((regs)->cp0_epc)
#define profile_pc(regs) instruction_pointer(regs)
-extern asmlinkage void syscall_trace_enter(struct pt_regs *regs);
+extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
extern void die(const char *, struct pt_regs *) __noreturn;
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index c84caddb8bde..ca64cbe44493 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -17,6 +17,7 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
+#include <asm/uaccess.h> /* for segment_eq() */
/*
* This macro return a properly sign-extended address suitable as base address
@@ -35,7 +36,7 @@
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
- " .set mips3\n\t \n" \
+ " .set arch=r4000 \n" \
" cache %0, %1 \n" \
" .set pop \n" \
: \
@@ -203,7 +204,7 @@ static inline void flush_scache_line(unsigned long addr)
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
- " .set mips3 \n" \
+ " .set arch=r4000 \n" \
"1: cache %0, (%1) \n" \
"2: .set pop \n" \
" .section __ex_table,\"a\" \n" \
@@ -212,6 +213,20 @@ static inline void flush_scache_line(unsigned long addr)
: \
: "i" (op), "r" (addr))
+#define protected_cachee_op(op,addr) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips0 \n" \
+ " .set eva \n" \
+ "1: cachee %0, (%1) \n" \
+ "2: .set pop \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "STR(PTR)" 1b, 2b \n" \
+ " .previous" \
+ : \
+ : "i" (op), "r" (addr))
+
/*
* The next two are for badland addresses like signal trampolines.
*/
@@ -223,7 +238,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
break;
default:
+#ifdef CONFIG_EVA
+ protected_cachee_op(Hit_Invalidate_I, addr);
+#else
protected_cache_op(Hit_Invalidate_I, addr);
+#endif
break;
}
}
@@ -356,6 +375,91 @@ static inline void invalidate_tcache_page(unsigned long addr)
: "r" (base), \
"i" (op));
+/*
+ * Perform the cache operation specified by op using a user mode virtual
+ * address while in kernel mode.
+ */
+#define cache16_unroll32_user(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips0 \n" \
+ " .set eva \n" \
+ " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
+ " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
+ " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
+ " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
+ " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
+ " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
+ " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
+ " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
+ " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
+ " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
+ " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
+ " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
+ " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
+ " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
+ " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
+ " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache32_unroll32_user(base, op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips0 \n" \
+ " .set eva \n" \
+ " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
+ " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
+ " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
+ " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
+ " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
+ " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
+ " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
+ " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
+ " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
+ " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
+ " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
+ " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
+ " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
+ " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
+ " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
+ " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache64_unroll32_user(base, op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips0 \n" \
+ " .set eva \n" \
+ " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
+ " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
+ " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
+ " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
+ " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
+ " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
+ " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
+ " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
+ " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
+ " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
+ " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
+ " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
+ " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
+ " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
+ " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
+ " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
static inline void extra##blast_##pfx##cache##lsize(void) \
@@ -429,6 +533,32 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
+#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
+{ \
+ unsigned long start = page; \
+ unsigned long end = page + PAGE_SIZE; \
+ \
+ __##pfx##flush_prologue \
+ \
+ do { \
+ cache##lsize##_unroll32_user(start, hitop); \
+ start += lsize * 32; \
+ } while (start < end); \
+ \
+ __##pfx##flush_epilogue \
+}
+
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+ 16)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+ 32)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+ 64)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
+
/* build blast_xxx_range, protected_blast_xxx_range */
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
@@ -450,12 +580,51 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
__##pfx##flush_epilogue \
}
+#ifndef CONFIG_EVA
+
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
+
+#else
+
+#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
+static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
+ unsigned long end) \
+{ \
+ unsigned long lsize = cpu_##desc##_line_size(); \
+ unsigned long addr = start & ~(lsize - 1); \
+ unsigned long aend = (end - 1) & ~(lsize - 1); \
+ \
+ __##pfx##flush_prologue \
+ \
+ if (segment_eq(get_fs(), USER_DS)) { \
+ while (1) { \
+ protected_cachee_op(hitop, addr); \
+ if (addr == aend) \
+ break; \
+ addr += lsize; \
+ } \
+ } else { \
+ while (1) { \
+ protected_cache_op(hitop, addr); \
+ if (addr == aend) \
+ break; \
+ addr += lsize; \
+ } \
+ \
+ } \
+ __##pfx##flush_epilogue \
+}
+
+__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
+__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
+
+#endif
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
protected_, loongson2_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
/* blast_inv_dcache_range */
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
index eeeb0f48c767..f54bdbe85c0d 100644
--- a/arch/mips/include/asm/sigcontext.h
+++ b/arch/mips/include/asm/sigcontext.h
@@ -32,6 +32,8 @@ struct sigcontext32 {
__u32 sc_lo2;
__u32 sc_hi3;
__u32 sc_lo3;
+ __u64 sc_msaregs[32]; /* Most significant 64 bits */
+ __u32 sc_msa_csr;
};
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
#endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
new file mode 100644
index 000000000000..d60d1a2180d1
--- /dev/null
+++ b/arch/mips/include/asm/smp-cps.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_SMP_CPS_H__
+#define __MIPS_ASM_SMP_CPS_H__
+
+#ifndef __ASSEMBLY__
+
+struct boot_config {
+ unsigned int core;
+ unsigned int vpe;
+ unsigned long pc;
+ unsigned long sp;
+ unsigned long gp;
+};
+
+extern struct boot_config mips_cps_bootcfg;
+
+extern void mips_cps_core_entry(void);
+
+#else /* __ASSEMBLY__ */
+
+.extern mips_cps_bootcfg;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __MIPS_ASM_SMP_CPS_H__ */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index ef2a8041e78b..73d35b18fb64 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -13,6 +13,8 @@
#include <linux/errno.h>
+#include <asm/mips-cm.h>
+
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
@@ -43,6 +45,9 @@ static inline void plat_smp_setup(void)
mp_ops->smp_setup();
}
+extern void gic_send_ipi_single(int cpu, unsigned int action);
+extern void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+
#else /* !CONFIG_SMP */
struct plat_smp_ops;
@@ -76,6 +81,9 @@ static inline int register_cmp_smp_ops(void)
#ifdef CONFIG_MIPS_CMP
extern struct plat_smp_ops cmp_smp_ops;
+ if (!mips_cm_present())
+ return -ENODEV;
+
register_smp_ops(&cmp_smp_ops);
return 0;
@@ -97,4 +105,13 @@ static inline int register_vsmp_smp_ops(void)
#endif
}
+#ifdef CONFIG_MIPS_CPS
+extern int register_cps_smp_ops(void);
+#else
+static inline int register_cps_smp_ops(void)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* __ASM_SMP_OPS_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index eb6008758484..efa02acd3dd5 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -42,6 +42,7 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_ICACHE_FLUSH 0x4
/* Used by kexec crashdump to save all cpu's state */
#define SMP_DUMP 0x8
+#define SMP_ASK_C0COUNT 0x10
extern volatile cpumask_t cpu_callin_map;
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 4857e2c8df5a..d301e108d5b8 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -435,7 +435,7 @@
.macro RESTORE_SP_AND_RET
LONG_L sp, PT_R29(sp)
- .set mips3
+ .set arch=r4000
eret
.set mips0
.endm
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 278d45a09728..495c1041a2cc 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -16,22 +16,29 @@
#include <asm/watch.h>
#include <asm/dsp.h>
#include <asm/cop2.h>
+#include <asm/msa.h>
struct task_struct;
+enum {
+ FP_SAVE_NONE = 0,
+ FP_SAVE_VECTOR = -1,
+ FP_SAVE_SCALAR = 1,
+};
+
/**
* resume - resume execution of a task
* @prev: The task previously executed.
* @next: The task to begin executing.
* @next_ti: task_thread_info(next).
- * @usedfpu: Non-zero if prev's FP context should be saved.
+ * @fp_save: Which, if any, FP context to save for prev.
*
* This function is used whilst scheduling to save the context of prev & load
* the context of next. Returns prev.
*/
extern asmlinkage struct task_struct *resume(struct task_struct *prev,
struct task_struct *next, struct thread_info *next_ti,
- u32 usedfpu);
+ s32 fp_save);
extern unsigned int ll_bit;
extern struct task_struct *ll_task;
@@ -75,7 +82,8 @@ do { \
#define switch_to(prev, next, last) \
do { \
- u32 __usedfpu, __c0_stat; \
+ u32 __c0_stat; \
+ s32 __fpsave = FP_SAVE_NONE; \
__mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \
__save_dsp(prev); \
@@ -88,8 +96,12 @@ do { \
write_c0_status(__c0_stat & ~ST0_CU2); \
} \
__clear_software_ll_bit(); \
- __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \
- (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
+ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
+ __fpsave = FP_SAVE_SCALAR; \
+ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
+ __fpsave = FP_SAVE_VECTOR; \
+ (last) = resume(prev, next, task_thread_info(next), __fpsave); \
+ disable_msa(); \
} while (0)
#define finish_arch_switch(prev) \
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 33e8dbfc1b63..6c488c85d791 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -13,17 +13,29 @@
#ifndef __ASM_MIPS_SYSCALL_H
#define __ASM_MIPS_SYSCALL_H
+#include <linux/compiler.h>
#include <linux/audit.h>
#include <linux/elf-em.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+#ifndef __NR_syscall /* Only defined if _MIPS_SIM == _MIPS_SIM_ABI32 */
+#define __NR_syscall 4000
+#endif
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
- return regs->regs[2];
+ /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+ if ((config_enabled(CONFIG_32BIT) ||
+ test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+ (regs->regs[2] == __NR_syscall))
+ return regs->regs[4];
+ else
+ return regs->regs[2];
}
static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
@@ -39,14 +51,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
#ifdef CONFIG_32BIT
case 4: case 5: case 6: case 7:
- return get_user(*arg, (int *)usp + 4 * n);
+ return get_user(*arg, (int *)usp + n);
#endif
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
if (test_thread_flag(TIF_32BIT_REGS))
- return get_user(*arg, (int *)usp + 4 * n);
+ return get_user(*arg, (int *)usp + n);
else
#endif
*arg = regs->regs[4 + n];
@@ -57,6 +69,8 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
default:
BUG();
}
+
+ unreachable();
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -65,6 +79,12 @@ static inline long syscall_get_return_value(struct task_struct *task,
return regs->regs[2];
}
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* Do nothing */
+}
+
static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
@@ -83,11 +103,17 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
- unsigned long arg;
int ret;
+ /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+ if ((config_enabled(CONFIG_32BIT) ||
+ test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+ (regs->regs[2] == __NR_syscall)) {
+ i++;
+ n++;
+ }
while (n--)
- ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+ ret |= mips_get_syscall_arg(args++, task, regs, i++);
/*
* No way to communicate an error because this is a void function.
@@ -101,11 +127,13 @@ extern const unsigned long sys_call_table[];
extern const unsigned long sys32_call_table[];
extern const unsigned long sysn32_call_table[];
-static inline int __syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task,
+ struct pt_regs *regs)
{
int arch = EM_MIPS;
#ifdef CONFIG_64BIT
- arch |= __AUDIT_ARCH_64BIT;
+ if (!test_tsk_thread_flag(task, TIF_32BIT_REGS))
+ arch |= __AUDIT_ARCH_64BIT;
#endif
#if defined(__LITTLE_ENDIAN)
arch |= __AUDIT_ARCH_LE;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 24846f9053fe..d2d961d6cb86 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -116,6 +116,8 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */
+#define TIF_USEDMSA 29 /* MSA has been used this quantum */
+#define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -133,10 +135,13 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS)
+#define _TIF_USEDMSA (1<<TIF_USEDMSA)
+#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
+ _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
/* work to do in syscall_trace_leave() */
#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h
index 12609a17dc8b..20ea4859c822 100644
--- a/arch/mips/include/asm/topology.h
+++ b/arch/mips/include/asm/topology.h
@@ -10,8 +10,4 @@
#include <topology.h>
-#ifdef CONFIG_SMP
-#define smt_capable() (smp_num_siblings > 1)
-#endif
-
#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index f3fa3750f577..a10951090234 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -6,6 +6,7 @@
* Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2014, Imagination Technologies Ltd.
*/
#ifndef _ASM_UACCESS_H
#define _ASM_UACCESS_H
@@ -13,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
+#include <asm/asm-eva.h>
/*
* The fs value determines whether argument validity checking should be
@@ -222,11 +224,44 @@ struct __large_struct { unsigned long buf[100]; };
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
+#ifndef CONFIG_EVA
+#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
+#else
+/*
+ * Kernel specific functions for EVA. We need to use normal load instructions
+ * to read data from kernel when operating in EVA mode. We use these macros to
+ * avoid redefining __get_user_asm for EVA.
+ */
+#undef _loadd
+#undef _loadw
+#undef _loadh
+#undef _loadb
#ifdef CONFIG_32BIT
-#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
+#define _loadd _loadw
+#else
+#define _loadd(reg, addr) "ld " reg ", " addr
+#endif
+#define _loadw(reg, addr) "lw " reg ", " addr
+#define _loadh(reg, addr) "lh " reg ", " addr
+#define _loadb(reg, addr) "lb " reg ", " addr
+
+#define __get_kernel_common(val, size, ptr) \
+do { \
+ switch (size) { \
+ case 1: __get_data_asm(val, _loadb, ptr); break; \
+ case 2: __get_data_asm(val, _loadh, ptr); break; \
+ case 4: __get_data_asm(val, _loadw, ptr); break; \
+ case 8: __GET_DW(val, _loadd, ptr); break; \
+ default: __get_user_unknown(); break; \
+ } \
+} while (0)
+#endif
+
+#ifdef CONFIG_32BIT
+#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
#endif
#ifdef CONFIG_64BIT
-#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
+#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
#endif
extern void __get_user_unknown(void);
@@ -234,10 +269,10 @@ extern void __get_user_unknown(void);
#define __get_user_common(val, size, ptr) \
do { \
switch (size) { \
- case 1: __get_user_asm(val, "lb", ptr); break; \
- case 2: __get_user_asm(val, "lh", ptr); break; \
- case 4: __get_user_asm(val, "lw", ptr); break; \
- case 8: __GET_USER_DW(val, ptr); break; \
+ case 1: __get_data_asm(val, user_lb, ptr); break; \
+ case 2: __get_data_asm(val, user_lh, ptr); break; \
+ case 4: __get_data_asm(val, user_lw, ptr); break; \
+ case 8: __GET_DW(val, user_ld, ptr); break; \
default: __get_user_unknown(); break; \
} \
} while (0)
@@ -246,8 +281,12 @@ do { \
({ \
int __gu_err; \
\
- __chk_user_ptr(ptr); \
- __get_user_common((x), size, ptr); \
+ if (segment_eq(get_fs(), get_ds())) { \
+ __get_kernel_common((x), size, ptr); \
+ } else { \
+ __chk_user_ptr(ptr); \
+ __get_user_common((x), size, ptr); \
+ } \
__gu_err; \
})
@@ -257,18 +296,22 @@ do { \
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
\
might_fault(); \
- if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
- __get_user_common((x), size, __gu_ptr); \
+ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
+ if (segment_eq(get_fs(), get_ds())) \
+ __get_kernel_common((x), size, __gu_ptr); \
+ else \
+ __get_user_common((x), size, __gu_ptr); \
+ } \
\
__gu_err; \
})
-#define __get_user_asm(val, insn, addr) \
+#define __get_data_asm(val, insn, addr) \
{ \
long __gu_tmp; \
\
__asm__ __volatile__( \
- "1: " insn " %1, %3 \n" \
+ "1: "insn("%1", "%3")" \n" \
"2: \n" \
" .insn \n" \
" .section .fixup,\"ax\" \n" \
@@ -287,7 +330,7 @@ do { \
/*
* Get a long long 64 using 32 bit registers.
*/
-#define __get_user_asm_ll32(val, addr) \
+#define __get_data_asm_ll32(val, insn, addr) \
{ \
union { \
unsigned long long l; \
@@ -295,8 +338,8 @@ do { \
} __gu_tmp; \
\
__asm__ __volatile__( \
- "1: lw %1, (%3) \n" \
- "2: lw %D1, 4(%3) \n" \
+ "1: " insn("%1", "(%3)")" \n" \
+ "2: " insn("%D1", "4(%3)")" \n" \
"3: \n" \
" .insn \n" \
" .section .fixup,\"ax\" \n" \
@@ -315,30 +358,73 @@ do { \
(val) = __gu_tmp.t; \
}
+#ifndef CONFIG_EVA
+#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
+#else
+/*
+ * Kernel specific functions for EVA. We need to use normal load instructions
+ * to read data from kernel when operating in EVA mode. We use these macros to
+ * avoid redefining __get_data_asm for EVA.
+ */
+#undef _stored
+#undef _storew
+#undef _storeh
+#undef _storeb
+#ifdef CONFIG_32BIT
+#define _stored _storew
+#else
+#define _stored(reg, addr) "ld " reg ", " addr
+#endif
+
+#define _storew(reg, addr) "sw " reg ", " addr
+#define _storeh(reg, addr) "sh " reg ", " addr
+#define _storeb(reg, addr) "sb " reg ", " addr
+
+#define __put_kernel_common(ptr, size) \
+do { \
+ switch (size) { \
+ case 1: __put_data_asm(_storeb, ptr); break; \
+ case 2: __put_data_asm(_storeh, ptr); break; \
+ case 4: __put_data_asm(_storew, ptr); break; \
+ case 8: __PUT_DW(_stored, ptr); break; \
+ default: __put_user_unknown(); break; \
+ } \
+} while(0)
+#endif
+
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
#ifdef CONFIG_32BIT
-#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
+#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
#endif
#ifdef CONFIG_64BIT
-#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
+#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
#endif
+#define __put_user_common(ptr, size) \
+do { \
+ switch (size) { \
+ case 1: __put_data_asm(user_sb, ptr); break; \
+ case 2: __put_data_asm(user_sh, ptr); break; \
+ case 4: __put_data_asm(user_sw, ptr); break; \
+ case 8: __PUT_DW(user_sd, ptr); break; \
+ default: __put_user_unknown(); break; \
+ } \
+} while (0)
+
#define __put_user_nocheck(x, ptr, size) \
({ \
__typeof__(*(ptr)) __pu_val; \
int __pu_err = 0; \
\
- __chk_user_ptr(ptr); \
__pu_val = (x); \
- switch (size) { \
- case 1: __put_user_asm("sb", ptr); break; \
- case 2: __put_user_asm("sh", ptr); break; \
- case 4: __put_user_asm("sw", ptr); break; \
- case 8: __PUT_USER_DW(ptr); break; \
- default: __put_user_unknown(); break; \
+ if (segment_eq(get_fs(), get_ds())) { \
+ __put_kernel_common(ptr, size); \
+ } else { \
+ __chk_user_ptr(ptr); \
+ __put_user_common(ptr, size); \
} \
__pu_err; \
})
@@ -351,21 +437,19 @@ do { \
\
might_fault(); \
if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
- switch (size) { \
- case 1: __put_user_asm("sb", __pu_addr); break; \
- case 2: __put_user_asm("sh", __pu_addr); break; \
- case 4: __put_user_asm("sw", __pu_addr); break; \
- case 8: __PUT_USER_DW(__pu_addr); break; \
- default: __put_user_unknown(); break; \
- } \
+ if (segment_eq(get_fs(), get_ds())) \
+ __put_kernel_common(__pu_addr, size); \
+ else \
+ __put_user_common(__pu_addr, size); \
} \
+ \
__pu_err; \
})
-#define __put_user_asm(insn, ptr) \
+#define __put_data_asm(insn, ptr) \
{ \
__asm__ __volatile__( \
- "1: " insn " %z2, %3 # __put_user_asm\n" \
+ "1: "insn("%z2", "%3")" # __put_data_asm \n" \
"2: \n" \
" .insn \n" \
" .section .fixup,\"ax\" \n" \
@@ -380,11 +464,11 @@ do { \
"i" (-EFAULT)); \
}
-#define __put_user_asm_ll32(ptr) \
+#define __put_data_asm_ll32(insn, ptr) \
{ \
__asm__ __volatile__( \
- "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
- "2: sw %D2, 4(%3) \n" \
+ "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
+ "2: "insn("%D2", "4(%3)")" \n" \
"3: \n" \
" .insn \n" \
" .section .fixup,\"ax\" \n" \
@@ -403,6 +487,11 @@ do { \
extern void __put_user_unknown(void);
/*
+ * ul{b,h,w} are macros and there are no equivalent macros for EVA.
+ * EVA unaligned access is handled in the ADE exception handler.
+ */
+#ifndef CONFIG_EVA
+/*
* put_user_unaligned: - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
@@ -504,7 +593,7 @@ extern void __get_user_unaligned_unknown(void);
#define __get_user_unaligned_common(val, size, ptr) \
do { \
switch (size) { \
- case 1: __get_user_asm(val, "lb", ptr); break; \
+ case 1: __get_data_asm(val, "lb", ptr); break; \
case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
@@ -531,7 +620,7 @@ do { \
__gu_err; \
})
-#define __get_user_unaligned_asm(val, insn, addr) \
+#define __get_data_unaligned_asm(val, insn, addr) \
{ \
long __gu_tmp; \
\
@@ -594,19 +683,23 @@ do { \
#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
#endif
+#define __put_user_unaligned_common(ptr, size) \
+do { \
+ switch (size) { \
+ case 1: __put_data_asm("sb", ptr); break; \
+ case 2: __put_user_unaligned_asm("ush", ptr); break; \
+ case 4: __put_user_unaligned_asm("usw", ptr); break; \
+ case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
+ default: __put_user_unaligned_unknown(); break; \
+} while (0)
+
#define __put_user_unaligned_nocheck(x,ptr,size) \
({ \
__typeof__(*(ptr)) __pu_val; \
int __pu_err = 0; \
\
__pu_val = (x); \
- switch (size) { \
- case 1: __put_user_asm("sb", ptr); break; \
- case 2: __put_user_unaligned_asm("ush", ptr); break; \
- case 4: __put_user_unaligned_asm("usw", ptr); break; \
- case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
- default: __put_user_unaligned_unknown(); break; \
- } \
+ __put_user_unaligned_common(ptr, size); \
__pu_err; \
})
@@ -616,15 +709,9 @@ do { \
__typeof__(*(ptr)) __pu_val = (x); \
int __pu_err = -EFAULT; \
\
- if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
- switch (size) { \
- case 1: __put_user_asm("sb", __pu_addr); break; \
- case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
- case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
- case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
- default: __put_user_unaligned_unknown(); break; \
- } \
- } \
+ if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
+ __put_user_unaligned_common(__pu_addr, size); \
+ \
__pu_err; \
})
@@ -669,6 +756,7 @@ do { \
}
extern void __put_user_unaligned_unknown(void);
+#endif
/*
* We're generating jump to subroutines which will be outside the range of
@@ -693,6 +781,7 @@ extern void __put_user_unaligned_unknown(void);
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
+#ifndef CONFIG_EVA
#define __invoke_copy_to_user(to, from, n) \
({ \
register void __user *__cu_to_r __asm__("$4"); \
@@ -711,6 +800,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__cu_len_r; \
})
+#define __invoke_copy_to_kernel(to, from, n) \
+ __invoke_copy_to_user(to, from, n)
+
+#endif
+
/*
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
@@ -735,7 +829,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ if (segment_eq(get_fs(), get_ds())) \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ else \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
+ __cu_len); \
__cu_len; \
})
@@ -750,7 +849,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ if (segment_eq(get_fs(), get_ds())) \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ else \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
+ __cu_len); \
__cu_len; \
})
@@ -763,8 +867,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
- __cu_len); \
+ if (segment_eq(get_fs(), get_ds())) \
+ __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
+ __cu_from,\
+ __cu_len);\
+ else \
+ __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
+ __cu_from, \
+ __cu_len); \
__cu_len; \
})
@@ -790,14 +900,23 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
- might_fault(); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
- __cu_len); \
+ if (segment_eq(get_fs(), get_ds())) { \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, \
+ __cu_from, \
+ __cu_len); \
+ } else { \
+ if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
+ might_fault(); \
+ __cu_len = __invoke_copy_to_user(__cu_to, \
+ __cu_from, \
+ __cu_len); \
+ } \
} \
__cu_len; \
})
+#ifndef CONFIG_EVA
+
#define __invoke_copy_from_user(to, from, n) \
({ \
register void *__cu_to_r __asm__("$4"); \
@@ -821,6 +940,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \
})
+#define __invoke_copy_from_kernel(to, from, n) \
+ __invoke_copy_from_user(to, from, n)
+
+/* For userland <-> userland operations */
+#define ___invoke_copy_in_user(to, from, n) \
+ __invoke_copy_from_user(to, from, n)
+
+/* For kernel <-> kernel operations */
+#define ___invoke_copy_in_kernel(to, from, n) \
+ __invoke_copy_from_user(to, from, n)
+
#define __invoke_copy_from_user_inatomic(to, from, n) \
({ \
register void *__cu_to_r __asm__("$4"); \
@@ -844,6 +974,97 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \
})
+#define __invoke_copy_from_kernel_inatomic(to, from, n) \
+ __invoke_copy_from_user_inatomic(to, from, n) \
+
+#else
+
+/* EVA specific functions */
+
+extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
+ size_t __n);
+extern size_t __copy_from_user_eva(void *__to, const void *__from,
+ size_t __n);
+extern size_t __copy_to_user_eva(void *__to, const void *__from,
+ size_t __n);
+extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
+
+#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
+({ \
+ register void *__cu_to_r __asm__("$4"); \
+ register const void __user *__cu_from_r __asm__("$5"); \
+ register long __cu_len_r __asm__("$6"); \
+ \
+ __cu_to_r = (to); \
+ __cu_from_r = (from); \
+ __cu_len_r = (n); \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ __MODULE_JAL(func_ptr) \
+ ".set\tnoat\n\t" \
+ __UA_ADDU "\t$1, %1, %2\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
+ : \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
+ DADDI_SCRATCH, "memory"); \
+ __cu_len_r; \
+})
+
+#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
+({ \
+ register void *__cu_to_r __asm__("$4"); \
+ register const void __user *__cu_from_r __asm__("$5"); \
+ register long __cu_len_r __asm__("$6"); \
+ \
+ __cu_to_r = (to); \
+ __cu_from_r = (from); \
+ __cu_len_r = (n); \
+ __asm__ __volatile__( \
+ __MODULE_JAL(func_ptr) \
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
+ : \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
+ DADDI_SCRATCH, "memory"); \
+ __cu_len_r; \
+})
+
+/*
+ * Source or destination address is in userland. We need to go through
+ * the TLB
+ */
+#define __invoke_copy_from_user(to, from, n) \
+ __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
+
+#define __invoke_copy_from_user_inatomic(to, from, n) \
+ __invoke_copy_from_user_eva_generic(to, from, n, \
+ __copy_user_inatomic_eva)
+
+#define __invoke_copy_to_user(to, from, n) \
+ __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
+
+#define ___invoke_copy_in_user(to, from, n) \
+ __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
+
+/*
+ * Source or destination address in the kernel. We are not going through
+ * the TLB
+ */
+#define __invoke_copy_from_kernel(to, from, n) \
+ __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
+
+#define __invoke_copy_from_kernel_inatomic(to, from, n) \
+ __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
+
+#define __invoke_copy_to_kernel(to, from, n) \
+ __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
+
+#define ___invoke_copy_in_kernel(to, from, n) \
+ __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
+
+#endif /* CONFIG_EVA */
+
/*
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
@@ -901,10 +1122,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
- might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ if (segment_eq(get_fs(), get_ds())) { \
+ __cu_len = __invoke_copy_from_kernel(__cu_to, \
+ __cu_from, \
+ __cu_len); \
+ } else { \
+ if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
+ might_fault(); \
+ __cu_len = __invoke_copy_from_user(__cu_to, \
+ __cu_from, \
+ __cu_len); \
+ } \
} \
__cu_len; \
})
@@ -918,9 +1146,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ if (segment_eq(get_fs(), get_ds())) { \
+ __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ } else { \
+ might_fault(); \
+ __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
+ __cu_len); \
+ } \
__cu_len; \
})
@@ -933,11 +1166,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
- access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
- might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ if (segment_eq(get_fs(), get_ds())) { \
+ __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
+ __cu_len); \
+ } else { \
+ if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
+ access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
+ might_fault(); \
+ __cu_len = ___invoke_copy_in_user(__cu_to, \
+ __cu_from, \
+ __cu_len); \
+ } \
} \
__cu_len; \
})
@@ -1007,16 +1246,28 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- "move\t$6, %3\n\t"
- __MODULE_JAL(__strncpy_from_user_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (__to), "r" (__from), "r" (__len)
- : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ if (segment_eq(get_fs(), get_ds())) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ } else {
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_user_nocheck_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ }
return res;
}
@@ -1044,16 +1295,28 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- "move\t$6, %3\n\t"
- __MODULE_JAL(__strncpy_from_user_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (__to), "r" (__from), "r" (__len)
- : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ if (segment_eq(get_fs(), get_ds())) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ } else {
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_user_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ }
return res;
}
@@ -1063,14 +1326,24 @@ static inline long __strlen_user(const char __user *s)
{
long res;
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- __MODULE_JAL(__strlen_user_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s)
- : "$2", "$4", __UA_t0, "$31");
+ if (segment_eq(get_fs(), get_ds())) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ __MODULE_JAL(__strlen_kernel_nocheck_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s)
+ : "$2", "$4", __UA_t0, "$31");
+ } else {
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ __MODULE_JAL(__strlen_user_nocheck_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s)
+ : "$2", "$4", __UA_t0, "$31");
+ }
return res;
}
@@ -1093,14 +1366,24 @@ static inline long strlen_user(const char __user *s)
{
long res;
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- __MODULE_JAL(__strlen_user_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s)
- : "$2", "$4", __UA_t0, "$31");
+ if (segment_eq(get_fs(), get_ds())) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ __MODULE_JAL(__strlen_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s)
+ : "$2", "$4", __UA_t0, "$31");
+ } else {
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ __MODULE_JAL(__strlen_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s)
+ : "$2", "$4", __UA_t0, "$31");
+ }
return res;
}
@@ -1110,15 +1393,26 @@ static inline long __strnlen_user(const char __user *s, long n)
{
long res;
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- __MODULE_JAL(__strnlen_user_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s), "r" (n)
- : "$2", "$4", "$5", __UA_t0, "$31");
+ if (segment_eq(get_fs(), get_ds())) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_kernel_nocheck_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
+ } else {
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_user_nocheck_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
+ }
return res;
}
@@ -1142,14 +1436,25 @@ static inline long strnlen_user(const char __user *s, long n)
long res;
might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- __MODULE_JAL(__strnlen_user_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s), "r" (n)
- : "$2", "$4", "$5", __UA_t0, "$31");
+ if (segment_eq(get_fs(), get_ds())) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
+ } else {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_user_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
+ }
return res;
}
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 4d3b92886665..413d6c612bec 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
-#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME