summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c82
-rw-r--r--arch/mips/kernel/bmips_vec.S2
-rw-r--r--arch/mips/kernel/cps-vec.S191
-rw-r--r--arch/mips/kernel/cpu-probe.c74
-rw-r--r--arch/mips/kernel/ftrace.c14
-rw-r--r--arch/mips/kernel/genex.S8
-rw-r--r--arch/mips/kernel/head.S2
-rw-r--r--arch/mips/kernel/idle.c9
-rw-r--r--arch/mips/kernel/irq-gic.c1
-rw-r--r--arch/mips/kernel/kgdb.c18
-rw-r--r--arch/mips/kernel/mips-cm.c121
-rw-r--r--arch/mips/kernel/mips-cpc.c52
-rw-r--r--arch/mips/kernel/mips_ksyms.c24
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c80
-rw-r--r--arch/mips/kernel/proc.c25
-rw-r--r--arch/mips/kernel/process.c23
-rw-r--r--arch/mips/kernel/ptrace.c161
-rw-r--r--arch/mips/kernel/ptrace32.c67
-rw-r--r--arch/mips/kernel/r4k_fpu.S231
-rw-r--r--arch/mips/kernel/r4k_switch.S60
-rw-r--r--arch/mips/kernel/rtlx-cmp.c3
-rw-r--r--arch/mips/kernel/rtlx-mt.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S24
-rw-r--r--arch/mips/kernel/scall64-64.S5
-rw-r--r--arch/mips/kernel/scall64-n32.S5
-rw-r--r--arch/mips/kernel/scall64-o32.S17
-rw-r--r--arch/mips/kernel/signal.c170
-rw-r--r--arch/mips/kernel/signal32.c137
-rw-r--r--arch/mips/kernel/smp-cmp.c55
-rw-r--r--arch/mips/kernel/smp-cps.c335
-rw-r--r--arch/mips/kernel/smp-gic.c53
-rw-r--r--arch/mips/kernel/smp-mt.c45
-rw-r--r--arch/mips/kernel/smtc-proc.c23
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/spram.c5
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/kernel/traps.c144
-rw-r--r--arch/mips/kernel/unaligned.c135
39 files changed, 2028 insertions, 387 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 26c6175e1379..277dab301cea 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -53,6 +53,8 @@ obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
+obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
+obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
obj-$(CONFIG_CPU_MIPSR2) += spram.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
@@ -102,6 +104,9 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_MIPS_CM) += mips-cm.o
+obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
+
#
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0c2e853c3db4..0ea75c244b48 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -16,6 +16,7 @@
#include <linux/suspend.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
+#include <asm/smp-cps.h>
#include <linux/kvm_host.h>
@@ -168,6 +169,72 @@ void output_thread_fpu_defines(void)
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
+ /* the least significant 64 bits of each FP register */
+ OFFSET(THREAD_FPR0_LS64, task_struct,
+ thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR1_LS64, task_struct,
+ thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR2_LS64, task_struct,
+ thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR3_LS64, task_struct,
+ thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR4_LS64, task_struct,
+ thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR5_LS64, task_struct,
+ thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR6_LS64, task_struct,
+ thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR7_LS64, task_struct,
+ thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR8_LS64, task_struct,
+ thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR9_LS64, task_struct,
+ thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR10_LS64, task_struct,
+ thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR11_LS64, task_struct,
+ thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR12_LS64, task_struct,
+ thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR13_LS64, task_struct,
+ thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR14_LS64, task_struct,
+ thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR15_LS64, task_struct,
+ thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR16_LS64, task_struct,
+ thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR17_LS64, task_struct,
+ thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR18_LS64, task_struct,
+ thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR19_LS64, task_struct,
+ thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR20_LS64, task_struct,
+ thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR21_LS64, task_struct,
+ thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR22_LS64, task_struct,
+ thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR23_LS64, task_struct,
+ thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR24_LS64, task_struct,
+ thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR25_LS64, task_struct,
+ thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR26_LS64, task_struct,
+ thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR27_LS64, task_struct,
+ thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR28_LS64, task_struct,
+ thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR29_LS64, task_struct,
+ thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR30_LS64, task_struct,
+ thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR31_LS64, task_struct,
+ thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
+
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
BLANK();
}
@@ -228,6 +295,7 @@ void output_sc_defines(void)
OFFSET(SC_LO2, sigcontext, sc_lo2);
OFFSET(SC_HI3, sigcontext, sc_hi3);
OFFSET(SC_LO3, sigcontext, sc_lo3);
+ OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
BLANK();
}
#endif
@@ -242,6 +310,7 @@ void output_sc_defines(void)
OFFSET(SC_MDLO, sigcontext, sc_mdlo);
OFFSET(SC_PC, sigcontext, sc_pc);
OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
BLANK();
}
#endif
@@ -253,6 +322,7 @@ void output_sc32_defines(void)
OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
+ OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
BLANK();
}
#endif
@@ -397,3 +467,15 @@ void output_kvm_defines(void)
OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
BLANK();
}
+
+#ifdef CONFIG_MIPS_CPS
+void output_cps_defines(void)
+{
+ COMMENT(" MIPS CPS offsets. ");
+ OFFSET(BOOTCFG_CORE, boot_config, core);
+ OFFSET(BOOTCFG_VPE, boot_config, vpe);
+ OFFSET(BOOTCFG_PC, boot_config, pc);
+ OFFSET(BOOTCFG_SP, boot_config, sp);
+ OFFSET(BOOTCFG_GP, boot_config, gp);
+}
+#endif
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index a5bf73d22fcc..290c23b51678 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -122,7 +122,7 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
jr k0
RESTORE_ALL
- .set mips3
+ .set arch=r4000
eret
/***********************************************************************
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
new file mode 100644
index 000000000000..f7a46db4b161
--- /dev/null
+++ b/arch/mips/kernel/cps-vec.S
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/mipsregs.h>
+
+#define GCR_CL_COHERENCE_OFS 0x2008
+
+.section .text.cps-vec
+.balign 0x1000
+.set noreorder
+
+LEAF(mips_cps_core_entry)
+ /*
+ * These first 8 bytes will be patched by cps_smp_setup to load the
+ * base address of the CM GCRs into register v1.
+ */
+ .quad 0
+
+ /* Check whether we're here due to an NMI */
+ mfc0 k0, CP0_STATUS
+ and k0, k0, ST0_NMI
+ beqz k0, not_nmi
+ nop
+
+ /* This is an NMI */
+ la k0, nmi_handler
+ jr k0
+ nop
+
+not_nmi:
+ /* Setup Cause */
+ li t0, CAUSEF_IV
+ mtc0 t0, CP0_CAUSE
+
+ /* Setup Status */
+ li t0, ST0_CU1 | ST0_CU0
+ mtc0 t0, CP0_STATUS
+
+ /*
+ * Clear the bits used to index the caches. Note that the architecture
+ * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
+ * be valid for all MIPS32 CPUs, even those for which said writes are
+ * unnecessary.
+ */
+ mtc0 zero, CP0_TAGLO, 0
+ mtc0 zero, CP0_TAGHI, 0
+ mtc0 zero, CP0_TAGLO, 2
+ mtc0 zero, CP0_TAGHI, 2
+ ehb
+
+ /* Primary cache configuration is indicated by Config1 */
+ mfc0 v0, CP0_CONFIG, 1
+
+ /* Detect I-cache line size */
+ _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
+ beqz t0, icache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect I-cache size */
+ _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addi t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == I-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+ addi t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, KSEG0
+ add a1, a0, t1
+1: cache Index_Store_Tag_I, 0(a0)
+ add a0, a0, t0
+ bne a0, a1, 1b
+ nop
+icache_done:
+
+ /* Detect D-cache line size */
+ _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
+ beqz t0, dcache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect D-cache size */
+ _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addi t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == D-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+ addi t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, KSEG0
+ addu a1, a0, t1
+ subu a1, a1, t0
+1: cache Index_Store_Tag_D, 0(a0)
+ bne a0, a1, 1b
+ add a0, a0, t0
+dcache_done:
+
+ /* Set Kseg0 cacheable, coherent, write-back, write-allocate */
+ mfc0 t0, CP0_CONFIG
+ ori t0, 0x7
+ xori t0, 0x2
+ mtc0 t0, CP0_CONFIG
+ ehb
+
+ /* Enter the coherent domain */
+ li t0, 0xff
+ sw t0, GCR_CL_COHERENCE_OFS(v1)
+ ehb
+
+ /* Jump to kseg0 */
+ la t0, 1f
+ jr t0
+ nop
+
+1: /* We're up, cached & coherent */
+
+ /*
+ * TODO: We should check the VPE number we intended to boot here, and
+ * if non-zero we should start that VPE and stop this one. For
+ * the moment this doesn't matter since CPUs are brought up
+ * sequentially and in order, but once hotplug is implemented
+ * this will need revisiting.
+ */
+
+ /* Off we go! */
+ la t0, mips_cps_bootcfg
+ lw t1, BOOTCFG_PC(t0)
+ lw gp, BOOTCFG_GP(t0)
+ lw sp, BOOTCFG_SP(t0)
+ jr t1
+ nop
+ END(mips_cps_core_entry)
+
+.org 0x200
+LEAF(excep_tlbfill)
+ b .
+ nop
+ END(excep_tlbfill)
+
+.org 0x280
+LEAF(excep_xtlbfill)
+ b .
+ nop
+ END(excep_xtlbfill)
+
+.org 0x300
+LEAF(excep_cache)
+ b .
+ nop
+ END(excep_cache)
+
+.org 0x380
+LEAF(excep_genex)
+ b .
+ nop
+ END(excep_genex)
+
+.org 0x400
+LEAF(excep_intex)
+ b .
+ nop
+ END(excep_intex)
+
+.org 0x480
+LEAF(excep_ejtag)
+ la k0, ejtag_debug_handler
+ jr k0
+ nop
+ END(excep_ejtag)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 530f832de02c..6e8fb85ce7c3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -23,6 +23,8 @@
#include <asm/cpu-type.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/msa.h>
#include <asm/watch.h>
#include <asm/elf.h>
#include <asm/spram.h>
@@ -126,6 +128,20 @@ static inline int __cpu_has_fpu(void)
return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
}
+static inline unsigned long cpu_get_msa_id(void)
+{
+ unsigned long status, conf5, msa_id;
+
+ status = read_c0_status();
+ __enable_fpu(FPU_64BIT);
+ conf5 = read_c0_config5();
+ enable_msa();
+ msa_id = read_msa_ir();
+ write_c0_config5(conf5);
+ write_c0_status(status);
+ return msa_id;
+}
+
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
{
#ifdef __NEED_VMBITS_PROBE
@@ -166,11 +182,12 @@ static char unknown_isa[] = KERN_ERR \
static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
{
unsigned int config6;
- /*
- * Config6 is implementation dependent and it's currently only
- * used by proAptiv
- */
- if (c->cputype == CPU_PROAPTIV) {
+
+ /* It's implementation dependent how the FTLB can be enabled */
+ switch (c->cputype) {
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ /* proAptiv & related cores use Config6 to enable the FTLB */
config6 = read_c0_config6();
if (enable)
/* Enable FTLB */
@@ -179,6 +196,7 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
/* Disable FTLB */
write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
back_to_back_c0_hazard();
+ break;
}
}
@@ -301,6 +319,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->ases |= MIPS_ASE_VZ;
if (config3 & MIPS_CONF3_SC)
c->options |= MIPS_CPU_SEGMENTS;
+ if (config3 & MIPS_CONF3_MSA)
+ c->ases |= MIPS_ASE_MSA;
return config3 & MIPS_CONF_M;
}
@@ -367,6 +387,9 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
config5 &= ~MIPS_CONF5_UFR;
write_c0_config5(config5);
+ if (config5 & MIPS_CONF5_EVA)
+ c->options |= MIPS_CPU_EVA;
+
return config5 & MIPS_CONF_M;
}
@@ -398,8 +421,13 @@ static void decode_configs(struct cpuinfo_mips *c)
mips_probe_watch_registers(c);
- if (cpu_has_mips_r2)
+#ifndef CONFIG_MIPS_CPS
+ if (cpu_has_mips_r2) {
c->core = read_c0_ebase() & 0x3ff;
+ if (cpu_has_mipsmt)
+ c->core >>= fls(core_nvpes()) - 1;
+ }
+#endif
}
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
@@ -710,17 +738,23 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
- case PRID_IMP_LOONGSON2:
- c->cputype = CPU_LOONGSON2;
- __cpu_name[cpu] = "ICT Loongson-2";
-
+ case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON2E:
+ c->cputype = CPU_LOONGSON2;
+ __cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2e");
break;
case PRID_REV_LOONGSON2F:
+ c->cputype = CPU_LOONGSON2;
+ __cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2f");
break;
+ case PRID_REV_LOONGSON3A:
+ c->cputype = CPU_LOONGSON3;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ break;
}
set_isa(c, MIPS_CPU_ISA_III);
@@ -729,7 +763,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
MIPS_CPU_32FPR;
c->tlbsize = 64;
break;
- case PRID_IMP_LOONGSON1:
+ case PRID_IMP_LOONGSON_32: /* Loongson-1 */
decode_configs(c);
c->cputype = CPU_LOONGSON1;
@@ -806,7 +840,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "MIPS 1004Kc";
break;
case PRID_IMP_1074K:
- c->cputype = CPU_74K;
+ c->cputype = CPU_1074K;
__cpu_name[cpu] = "MIPS 1074Kc";
break;
case PRID_IMP_INTERAPTIV_UP:
@@ -825,6 +859,14 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_PROAPTIV;
__cpu_name[cpu] = "MIPS proAptiv (multi)";
break;
+ case PRID_IMP_P5600:
+ c->cputype = CPU_P5600;
+ __cpu_name[cpu] = "MIPS P5600";
+ break;
+ case PRID_IMP_M5150:
+ c->cputype = CPU_M5150;
+ __cpu_name[cpu] = "MIPS M5150";
+ break;
}
decode_configs(c);
@@ -1176,6 +1218,12 @@ void cpu_probe(void)
else
c->srsets = 1;
+ if (cpu_has_msa) {
+ c->msa_id = cpu_get_msa_id();
+ WARN(c->msa_id & MSA_IR_WRPF,
+ "Vector register partitioning unimplemented!");
+ }
+
cpu_probe_vmbits(c);
#ifdef CONFIG_64BIT
@@ -1192,4 +1240,6 @@ void cpu_report(void)
smp_processor_id(), c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
+ if (cpu_has_msa)
+ pr_info("MSA revision is: %08x\n", c->msa_id);
}
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 185ba258361b..60e7e5e45af1 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -90,6 +90,7 @@ static inline void ftrace_dyn_arch_init_insns(void)
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
+ mm_segment_t old_fs;
/* *(unsigned int *)ip = new_code; */
safe_store_code(new_code, ip, faulted);
@@ -97,7 +98,10 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
if (unlikely(faulted))
return -EFAULT;
+ old_fs = get_fs();
+ set_fs(get_ds());
flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
return 0;
}
@@ -111,11 +115,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
- ip += 4;
- safe_store_code(new_code2, ip, faulted);
+ safe_store_code(new_code2, ip + 4, faulted);
if (unlikely(faulted))
return -EFAULT;
- flush_icache_range(ip, ip + 8); /* original ip + 12 */
+ flush_icache_range(ip, ip + 8);
return 0;
}
#endif
@@ -198,7 +201,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ftrace_modify_code(FTRACE_CALL_IP, new);
}
-int __init ftrace_dyn_arch_init(void *data)
+int __init ftrace_dyn_arch_init(void)
{
/* Encode the instructions when booting */
ftrace_dyn_arch_init_insns();
@@ -206,9 +209,6 @@ int __init ftrace_dyn_arch_init(void *data)
/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
- /* The return code is retured via data */
- *(unsigned long *)data = 0;
-
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index d84f6a509502..a9ce3408be25 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -67,7 +67,7 @@ NESTED(except_vec3_generic, 0, sp)
*/
NESTED(except_vec3_r4000, 0, sp)
.set push
- .set mips3
+ .set arch=r4000
.set noat
mfc0 k1, CP0_CAUSE
li k0, 31<<2
@@ -139,7 +139,7 @@ LEAF(__r4k_wait)
nop
nop
#endif
- .set mips3
+ .set arch=r4000
wait
/* end of rollback region (the region size must be power of two) */
1:
@@ -475,8 +475,10 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
+ BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
BUILD_HANDLER ftlb ftlb none silent /* #16 */
+ BUILD_HANDLER msa msa sti silent /* #21 */
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
#ifdef CONFIG_HARDWARE_WATCHPOINTS
/*
@@ -575,7 +577,7 @@ isrdhwr:
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
- .set mips3
+ .set arch=r4000
eret
.set mips0
#endif
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 7b6a5b3e3acf..e712dcf18b2d 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -175,8 +175,8 @@ NESTED(smp_bootstrap, 16, sp)
DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
- setup_c0_status_sec
smp_slave_setup
+ setup_c0_status_sec
#ifdef CONFIG_MIPS_MT_SMTC
andi t2, t2, VPECONTROL_TE
beqz t2, 2f
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 3553243bf9d6..837ff27950bc 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -64,7 +64,7 @@ void r4k_wait_irqoff(void)
if (!need_resched())
__asm__(
" .set push \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" wait \n"
" .set pop \n");
local_irq_enable();
@@ -82,7 +82,7 @@ static void rm7k_wait_irqoff(void)
if (!need_resched())
__asm__(
" .set push \n"
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" .set noat \n"
" mfc0 $1, $12 \n"
" sync \n"
@@ -103,7 +103,7 @@ static void au1k_wait(void)
unsigned long c0status = read_c0_status() | 1; /* irqs on */
__asm__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n"
" sync \n"
@@ -184,8 +184,11 @@ void __init check_wait(void)
case CPU_24K:
case CPU_34K:
case CPU_1004K:
+ case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_M5150:
cpu_wait = r4k_wait;
if (read_c0_config7() & MIPS_CONF7_WII)
cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 5b5ddb231f26..8520dad6d4e3 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -16,7 +16,6 @@
#include <asm/gic.h>
#include <asm/setup.h>
#include <asm/traps.h>
-#include <asm/gcmpregs.h>
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index fcaac2f132f0..7afcc2f22c0d 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -32,6 +32,7 @@
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
+#include <asm/uaccess.h>
static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
@@ -208,7 +209,14 @@ void arch_kgdb_breakpoint(void)
static void kgdb_call_nmi_hook(void *ignored)
{
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+
kgdb_nmicallback(raw_smp_processor_id(), NULL);
+
+ set_fs(old_fs);
}
void kgdb_roundup_cpus(unsigned long flags)
@@ -282,6 +290,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
+ mm_segment_t old_fs;
#ifdef CONFIG_KPROBES
/*
@@ -296,11 +305,17 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
if (user_mode(regs))
return NOTIFY_DONE;
+ /* Kernel mode. Set correct address limit */
+ old_fs = get_fs();
+ set_fs(get_ds());
+
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
- if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
+ set_fs(old_fs);
return NOTIFY_DONE;
+ }
if (atomic_read(&kgdb_setting_breakpoint))
if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
@@ -310,6 +325,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
local_irq_enable();
__flush_cache_all();
+ set_fs(old_fs);
return NOTIFY_STOP;
}
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
new file mode 100644
index 000000000000..f76f7a08412d
--- /dev/null
+++ b/arch/mips/kernel/mips-cm.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+
+#include <asm/mips-cm.h>
+#include <asm/mipsregs.h>
+
+void __iomem *mips_cm_base;
+void __iomem *mips_cm_l2sync_base;
+
+phys_t __mips_cm_phys_base(void)
+{
+ u32 config3 = read_c0_config3();
+ u32 cmgcr;
+
+ /* Check the CMGCRBase register is implemented */
+ if (!(config3 & MIPS_CONF3_CMGCR))
+ return 0;
+
+ /* Read the address from CMGCRBase */
+ cmgcr = read_c0_cmgcrbase();
+ return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
+}
+
+phys_t mips_cm_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_phys_base")));
+
+phys_t __mips_cm_l2sync_phys_base(void)
+{
+ u32 base_reg;
+
+ /*
+ * If the L2-only sync region is already enabled then leave it at it's
+ * current location.
+ */
+ base_reg = read_gcr_l2_only_sync_base();
+ if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK)
+ return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK;
+
+ /* Default to following the CM */
+ return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
+}
+
+phys_t mips_cm_l2sync_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
+
+static void mips_cm_probe_l2sync(void)
+{
+ unsigned major_rev;
+ phys_t addr;
+
+ /* L2-only sync was introduced with CM major revision 6 */
+ major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
+ CM_GCR_REV_MAJOR_SHF;
+ if (major_rev < 6)
+ return;
+
+ /* Find a location for the L2 sync region */
+ addr = mips_cm_l2sync_phys_base();
+ BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr);
+ if (!addr)
+ return;
+
+ /* Set the region base address & enable it */
+ write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK);
+
+ /* Map the region */
+ mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
+}
+
+int mips_cm_probe(void)
+{
+ phys_t addr;
+ u32 base_reg;
+
+ addr = mips_cm_phys_base();
+ BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
+ if (!addr)
+ return -ENODEV;
+
+ mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
+ if (!mips_cm_base)
+ return -ENXIO;
+
+ /* sanity check that we're looking at a CM */
+ base_reg = read_gcr_base();
+ if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) {
+ pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
+ (unsigned long)addr);
+ mips_cm_base = NULL;
+ return -ENODEV;
+ }
+
+ /* set default target to memory */
+ base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK;
+ base_reg |= CM_GCR_BASE_CMDEFTGT_MEM;
+ write_gcr_base(base_reg);
+
+ /* disable CM regions */
+ write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+
+ /* probe for an L2-only sync region */
+ mips_cm_probe_l2sync();
+
+ return 0;
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
new file mode 100644
index 000000000000..c9dc67402969
--- /dev/null
+++ b/arch/mips/kernel/mips-cpc.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+
+void __iomem *mips_cpc_base;
+
+phys_t __weak mips_cpc_phys_base(void)
+{
+ u32 cpc_base;
+
+ if (!mips_cm_present())
+ return 0;
+
+ if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
+ return 0;
+
+ /* If the CPC is already enabled, leave it so */
+ cpc_base = read_gcr_cpc_base();
+ if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
+ return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
+
+ /* Otherwise, give it the default address & enable it */
+ cpc_base = mips_cpc_default_phys_base();
+ write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
+ return cpc_base;
+}
+
+int mips_cpc_probe(void)
+{
+ phys_t addr;
+
+ addr = mips_cpc_phys_base();
+ if (!addr)
+ return -ENODEV;
+
+ mips_cpc_base = ioremap_nocache(addr, 0x8000);
+ if (!mips_cpc_base)
+ return -ENXIO;
+
+ return 0;
+}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 6e58e97fcd39..2607c3a4ff7e 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -16,12 +16,20 @@
#include <asm/ftrace.h>
extern void *__bzero(void *__s, size_t __count);
+extern long __strncpy_from_kernel_nocheck_asm(char *__to,
+ const char *__from, long __len);
+extern long __strncpy_from_kernel_asm(char *__to, const char *__from,
+ long __len);
extern long __strncpy_from_user_nocheck_asm(char *__to,
const char *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char *__from,
long __len);
+extern long __strlen_kernel_nocheck_asm(const char *s);
+extern long __strlen_kernel_asm(const char *s);
extern long __strlen_user_nocheck_asm(const char *s);
extern long __strlen_user_asm(const char *s);
+extern long __strnlen_kernel_nocheck_asm(const char *s);
+extern long __strnlen_kernel_asm(const char *s);
extern long __strnlen_user_nocheck_asm(const char *s);
extern long __strnlen_user_asm(const char *s);
@@ -43,17 +51,31 @@ EXPORT_SYMBOL(copy_page);
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__copy_user_inatomic);
+#ifdef CONFIG_EVA
+EXPORT_SYMBOL(__copy_from_user_eva);
+EXPORT_SYMBOL(__copy_in_user_eva);
+EXPORT_SYMBOL(__copy_to_user_eva);
+EXPORT_SYMBOL(__copy_user_inatomic_eva);
+#endif
EXPORT_SYMBOL(__bzero);
+EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strncpy_from_kernel_asm);
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_user_asm);
+EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strlen_kernel_asm);
EXPORT_SYMBOL(__strlen_user_nocheck_asm);
EXPORT_SYMBOL(__strlen_user_asm);
+EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strnlen_kernel_asm);
EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
EXPORT_SYMBOL(__strnlen_user_asm);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_partial_copy_user);
+EXPORT_SYMBOL(__csum_partial_copy_kernel);
+EXPORT_SYMBOL(__csum_partial_copy_to_user);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
EXPORT_SYMBOL(invalid_pte_table);
#ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 24cdf64789c3..4f2d9dece7ab 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -805,7 +805,7 @@ static void reset_counters(void *arg)
}
}
-/* 24K/34K/1004K cores can share the same event map. */
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
static const struct mips_perf_event mipsxxcore_event_map
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
@@ -814,8 +814,8 @@ static const struct mips_perf_event mipsxxcore_event_map
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
};
-/* 74K core has different branch event code. */
-static const struct mips_perf_event mipsxx74Kcore_event_map
+/* 74K/proAptiv core has different branch event code. */
+static const struct mips_perf_event mipsxxcore_event_map2
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
@@ -849,7 +849,7 @@ static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
};
-/* 24K/34K/1004K cores can share the same cache event map. */
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
static const struct mips_perf_event mipsxxcore_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -930,8 +930,8 @@ static const struct mips_perf_event mipsxxcore_cache_map
},
};
-/* 74K core has completely different cache event map. */
-static const struct mips_perf_event mipsxx74Kcore_cache_map
+/* 74K/proAptiv core has completely different cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map2
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -978,6 +978,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
},
},
+/*
+ * 74K core does not have specific DTLB events. proAptiv core has
+ * "speculative" DTLB events which are numbered 0x63 (even/odd) and
+ * not included here. One can use raw events if really needed.
+ */
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
@@ -1378,6 +1383,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
#define IS_BOTH_COUNTERS_74K_EVENT(b) \
((b) == 0 || (b) == 1)
+/* proAptiv */
+#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
/* 1004K */
#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
@@ -1391,6 +1400,20 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
#endif
+/* interAptiv */
+#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
+#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
+ (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
+ (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
+#endif
+
/* BMIPS5000 */
#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
((b) == 0 || (b) == 1)
@@ -1442,6 +1465,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_74K:
+ case CPU_1074K:
if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
@@ -1451,6 +1475,16 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
raw_event.range = P;
#endif
break;
+ case CPU_PROAPTIV:
+ if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1466,6 +1500,21 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
raw_event.range = T;
#endif
break;
+ case CPU_INTERAPTIV:
+ if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
case CPU_BMIPS5000:
if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1576,14 +1625,29 @@ init_hw_perf_events(void)
break;
case CPU_74K:
mipspmu.name = "mips/74K";
- mipspmu.general_event_map = &mipsxx74Kcore_event_map;
- mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_PROAPTIV:
+ mipspmu.name = "mips/proAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
+ case CPU_1074K:
+ mipspmu.name = "mips/1074K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_INTERAPTIV:
+ mipspmu.name = "mips/interAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
case CPU_LOONGSON1:
mipspmu.name = "mips/loongson1";
mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 00d20974b3e7..e40971b51d2f 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -17,8 +17,24 @@
unsigned int vced_count, vcei_count;
+/*
+ * * No lock; only written during early bootup by CPU 0.
+ * */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
static int show_cpuinfo(struct seq_file *m, void *v)
{
+ struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
unsigned long n = (unsigned long) v - 1;
unsigned int version = cpu_data[n].processor_id;
unsigned int fp_vers = cpu_data[n].fpu_id;
@@ -95,6 +111,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
if (cpu_has_vz) seq_printf(m, "%s", " vz");
+ if (cpu_has_msa) seq_printf(m, "%s", " msa");
+ if (cpu_has_eva) seq_printf(m, "%s", " eva");
seq_printf(m, "\n");
if (cpu_has_mmips) {
@@ -118,6 +136,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
seq_printf(m, fmt, 'I', vcei_count);
+
+ proc_cpuinfo_notifier_args.m = m;
+ proc_cpuinfo_notifier_args.n = n;
+
+ raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+ &proc_cpuinfo_notifier_args);
+
seq_printf(m, "\n");
return 0;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 6ae540e133b2..60e39dc7f1eb 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
+#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
@@ -65,6 +66,8 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
clear_used_math();
clear_fpu_owner();
init_dsp();
+ clear_thread_flag(TIF_MSA_CTX_LIVE);
+ disable_msa();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
@@ -89,7 +92,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
preempt_disable();
- if (is_fpu_owner())
+ if (is_msa_enabled())
+ save_msa(p);
+ else if (is_fpu_owner())
save_fp(p);
if (cpu_has_dsp)
@@ -157,7 +162,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Fill in the fpu structure for a core dump.. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
{
- memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
+ int i;
+
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ memcpy(&r[i], &current->thread.fpu.fpr[i], sizeof(*r));
+
+ memcpy(&r[NUM_FPU_REGS], &current->thread.fpu.fcr31,
+ sizeof(current->thread.fpu.fcr31));
return 1;
}
@@ -192,7 +203,13 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
{
- memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
+ int i;
+
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ memcpy(&fpr[i], &t->thread.fpu.fpr[i], sizeof(*fpr));
+
+ memcpy(&fpr[NUM_FPU_REGS], &t->thread.fpu.fcr31,
+ sizeof(t->thread.fpu.fcr31));
return 1;
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 7da9b76db4d9..7271e5a83081 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -114,51 +114,30 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
- unsigned int tmp;
if (!access_ok(VERIFY_WRITE, data, 33 * 8))
return -EIO;
if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
- __put_user(fregs[i], i + (__u64 __user *) data);
+ __put_user(get_fpr64(&fregs[i], 0),
+ i + (__u64 __user *)data);
} else {
for (i = 0; i < 32; i++)
__put_user((__u64) -1, i + (__u64 __user *) data);
}
__put_user(child->thread.fpu.fcr31, data + 64);
-
- preempt_disable();
- if (cpu_has_fpu) {
- unsigned int flags;
-
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu(FPU_AS_IS);
- __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu(FPU_AS_IS);
- __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
- write_c0_status(flags);
- }
- } else {
- tmp = 0;
- }
- preempt_enable();
- __put_user(tmp, data + 65);
+ __put_user(current_cpu_data.fpu_id, data + 65);
return 0;
}
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
- fpureg_t *fregs;
+ union fpureg *fregs;
+ u64 fpr_val;
int i;
if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -166,8 +145,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
fregs = get_fpu_regs(child);
- for (i = 0; i < 32; i++)
- __get_user(fregs[i], i + (__u64 __user *) data);
+ for (i = 0; i < 32; i++) {
+ __get_user(fpr_val, i + (__u64 __user *)data);
+ set_fpr64(&fregs[i], 0, fpr_val);
+ }
__get_user(child->thread.fpu.fcr31, data + 64);
@@ -300,10 +281,27 @@ static int fpr_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu,
- 0, sizeof(elf_fpregset_t));
+ unsigned i;
+ int err;
+ u64 fpr_val;
+
/* XXX fcr31 */
+
+ if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu,
+ 0, sizeof(elf_fpregset_t));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int fpr_set(struct task_struct *target,
@@ -311,10 +309,27 @@ static int fpr_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu,
- 0, sizeof(elf_fpregset_t));
+ unsigned i;
+ int err;
+ u64 fpr_val;
+
/* XXX fcr31 */
+
+ if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu,
+ 0, sizeof(elf_fpregset_t));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ }
+
+ return 0;
}
enum mips_regset {
@@ -408,7 +423,7 @@ long arch_ptrace(struct task_struct *child, long request,
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
- fpureg_t *fregs;
+ union fpureg *fregs;
unsigned long tmp = 0;
regs = task_pt_regs(child);
@@ -433,14 +448,12 @@ long arch_ptrace(struct task_struct *child, long request,
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1)
- tmp = fregs[(addr & ~1) - 32] >> 32;
- else
- tmp = fregs[addr - 32];
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
break;
}
#endif
- tmp = fregs[addr - FPR_BASE];
+ tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -465,44 +478,10 @@ long arch_ptrace(struct task_struct *child, long request,
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
- unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long irqflags;
- unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- preempt_disable();
- if (!cpu_has_fpu) {
- preempt_enable();
- break;
- }
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-write of Status must be atomic */
- local_irq_save(irqflags);
- mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu(FPU_AS_IS);
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu(FPU_AS_IS);
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- }
-#ifdef CONFIG_MIPS_MT_SMTC
- emt(mtflags);
- local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
- preempt_enable();
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = current_cpu_data.fpu_id;
break;
- }
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -548,7 +527,7 @@ long arch_ptrace(struct task_struct *child, long request,
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
@@ -563,19 +542,12 @@ long arch_ptrace(struct task_struct *child, long request,
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1) {
- fregs[(addr & ~1) - FPR_BASE] &=
- 0xffffffff;
- fregs[(addr & ~1) - FPR_BASE] |=
- ((u64)data) << 32;
- } else {
- fregs[addr - FPR_BASE] &= ~0xffffffffLL;
- fregs[addr - FPR_BASE] |= data;
- }
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
break;
}
#endif
- fregs[addr - FPR_BASE] = data;
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
@@ -662,13 +634,13 @@ long arch_ptrace(struct task_struct *child, long request,
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
-asmlinkage void syscall_trace_enter(struct pt_regs *regs)
+asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
long ret = 0;
user_exit();
- /* do the secure computing check first */
- secure_computing_strict(regs->regs[2]);
+ if (secure_computing(syscall) == -1)
+ return -1;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
@@ -677,10 +649,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
- audit_syscall_entry(__syscall_get_arch(),
- regs->regs[2],
+ audit_syscall_entry(syscall_get_arch(current, regs),
+ syscall,
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
+ return syscall;
}
/*
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index b8aa2dd5b00b..b40c3ca60ee5 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -80,7 +80,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
- fpureg_t *fregs;
+ union fpureg *fregs;
unsigned int tmp;
regs = task_pt_regs(child);
@@ -103,13 +103,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1)
- tmp = fregs[(addr & ~1) - 32] >> 32;
- else
- tmp = fregs[addr - 32];
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
break;
}
- tmp = fregs[addr - FPR_BASE];
+ tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -129,46 +127,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
- unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned int irqflags;
- unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- preempt_disable();
- if (!cpu_has_fpu) {
- preempt_enable();
- tmp = 0;
- break;
- }
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-write of Status must be atomic */
- local_irq_save(irqflags);
- mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu(FPU_AS_IS);
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu(FPU_AS_IS);
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- }
-#ifdef CONFIG_MIPS_MT_SMTC
- emt(mtflags);
- local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
- preempt_enable();
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = current_cpu_data.fpu_id;
break;
- }
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -233,7 +195,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
@@ -247,18 +209,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1) {
- fregs[(addr & ~1) - FPR_BASE] &=
- 0xffffffff;
- fregs[(addr & ~1) - FPR_BASE] |=
- ((u64)data) << 32;
- } else {
- fregs[addr - FPR_BASE] &= ~0xffffffffLL;
- fregs[addr - FPR_BASE] |= data;
- }
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
break;
}
- fregs[addr - FPR_BASE] = data;
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 253b2fb52026..71814272d148 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,6 +13,7 @@
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*/
#include <asm/asm.h>
+#include <asm/asmmacro.h>
#include <asm/errno.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
@@ -30,14 +31,14 @@
.endm
.set noreorder
- .set mips3
+ .set arch=r4000
LEAF(_save_fp_context)
cfc1 t1, fcr31
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -146,11 +147,11 @@ LEAF(_save_fp_context32)
* - cp1 status/control register
*/
LEAF(_restore_fp_context)
- EX lw t0, SC_FPC_CSR(a0)
+ EX lw t1, SC_FPC_CSR(a0)
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -191,7 +192,7 @@ LEAF(_restore_fp_context)
EX ldc1 $f26, SC_FPREGS+208(a0)
EX ldc1 $f28, SC_FPREGS+224(a0)
EX ldc1 $f30, SC_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context)
@@ -199,7 +200,7 @@ LEAF(_restore_fp_context)
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
- EX lw t0, SC32_FPC_CSR(a0)
+ EX lw t1, SC32_FPC_CSR(a0)
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -239,12 +240,224 @@ LEAF(_restore_fp_context32)
EX ldc1 $f26, SC32_FPREGS+208(a0)
EX ldc1 $f28, SC32_FPREGS+224(a0)
EX ldc1 $f30, SC32_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context32)
#endif
+#ifdef CONFIG_CPU_HAS_MSA
+
+ .macro save_sc_msareg wr, off, sc, tmp
+#ifdef CONFIG_64BIT
+ copy_u_d \tmp, \wr, 1
+ EX sd \tmp, (\off+(\wr*8))(\sc)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ copy_u_w \tmp, \wr, 2
+ EX sw \tmp, (\off+(\wr*8)+0)(\sc)
+ copy_u_w \tmp, \wr, 3
+ EX sw \tmp, (\off+(\wr*8)+4)(\sc)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ copy_u_w \tmp, \wr, 2
+ EX sw \tmp, (\off+(\wr*8)+4)(\sc)
+ copy_u_w \tmp, \wr, 3
+ EX sw \tmp, (\off+(\wr*8)+0)(\sc)
+#endif
+ .endm
+
+/*
+ * int _save_msa_context(struct sigcontext *sc)
+ *
+ * Save the upper 64 bits of each vector register along with the MSA_CSR
+ * register into sc. Returns zero on success, else non-zero.
+ */
+LEAF(_save_msa_context)
+ save_sc_msareg 0, SC_MSAREGS, a0, t0
+ save_sc_msareg 1, SC_MSAREGS, a0, t0
+ save_sc_msareg 2, SC_MSAREGS, a0, t0
+ save_sc_msareg 3, SC_MSAREGS, a0, t0
+ save_sc_msareg 4, SC_MSAREGS, a0, t0
+ save_sc_msareg 5, SC_MSAREGS, a0, t0
+ save_sc_msareg 6, SC_MSAREGS, a0, t0
+ save_sc_msareg 7, SC_MSAREGS, a0, t0
+ save_sc_msareg 8, SC_MSAREGS, a0, t0
+ save_sc_msareg 9, SC_MSAREGS, a0, t0
+ save_sc_msareg 10, SC_MSAREGS, a0, t0
+ save_sc_msareg 11, SC_MSAREGS, a0, t0
+ save_sc_msareg 12, SC_MSAREGS, a0, t0
+ save_sc_msareg 13, SC_MSAREGS, a0, t0
+ save_sc_msareg 14, SC_MSAREGS, a0, t0
+ save_sc_msareg 15, SC_MSAREGS, a0, t0
+ save_sc_msareg 16, SC_MSAREGS, a0, t0
+ save_sc_msareg 17, SC_MSAREGS, a0, t0
+ save_sc_msareg 18, SC_MSAREGS, a0, t0
+ save_sc_msareg 19, SC_MSAREGS, a0, t0
+ save_sc_msareg 20, SC_MSAREGS, a0, t0
+ save_sc_msareg 21, SC_MSAREGS, a0, t0
+ save_sc_msareg 22, SC_MSAREGS, a0, t0
+ save_sc_msareg 23, SC_MSAREGS, a0, t0
+ save_sc_msareg 24, SC_MSAREGS, a0, t0
+ save_sc_msareg 25, SC_MSAREGS, a0, t0
+ save_sc_msareg 26, SC_MSAREGS, a0, t0
+ save_sc_msareg 27, SC_MSAREGS, a0, t0
+ save_sc_msareg 28, SC_MSAREGS, a0, t0
+ save_sc_msareg 29, SC_MSAREGS, a0, t0
+ save_sc_msareg 30, SC_MSAREGS, a0, t0
+ save_sc_msareg 31, SC_MSAREGS, a0, t0
+ jr ra
+ li v0, 0
+ END(_save_msa_context)
+
+#ifdef CONFIG_MIPS32_COMPAT
+
+/*
+ * int _save_msa_context32(struct sigcontext32 *sc)
+ *
+ * Save the upper 64 bits of each vector register along with the MSA_CSR
+ * register into sc. Returns zero on success, else non-zero.
+ */
+LEAF(_save_msa_context32)
+ save_sc_msareg 0, SC32_MSAREGS, a0, t0
+ save_sc_msareg 1, SC32_MSAREGS, a0, t0
+ save_sc_msareg 2, SC32_MSAREGS, a0, t0
+ save_sc_msareg 3, SC32_MSAREGS, a0, t0
+ save_sc_msareg 4, SC32_MSAREGS, a0, t0
+ save_sc_msareg 5, SC32_MSAREGS, a0, t0
+ save_sc_msareg 6, SC32_MSAREGS, a0, t0
+ save_sc_msareg 7, SC32_MSAREGS, a0, t0
+ save_sc_msareg 8, SC32_MSAREGS, a0, t0
+ save_sc_msareg 9, SC32_MSAREGS, a0, t0
+ save_sc_msareg 10, SC32_MSAREGS, a0, t0
+ save_sc_msareg 11, SC32_MSAREGS, a0, t0
+ save_sc_msareg 12, SC32_MSAREGS, a0, t0
+ save_sc_msareg 13, SC32_MSAREGS, a0, t0
+ save_sc_msareg 14, SC32_MSAREGS, a0, t0
+ save_sc_msareg 15, SC32_MSAREGS, a0, t0
+ save_sc_msareg 16, SC32_MSAREGS, a0, t0
+ save_sc_msareg 17, SC32_MSAREGS, a0, t0
+ save_sc_msareg 18, SC32_MSAREGS, a0, t0
+ save_sc_msareg 19, SC32_MSAREGS, a0, t0
+ save_sc_msareg 20, SC32_MSAREGS, a0, t0
+ save_sc_msareg 21, SC32_MSAREGS, a0, t0
+ save_sc_msareg 22, SC32_MSAREGS, a0, t0
+ save_sc_msareg 23, SC32_MSAREGS, a0, t0
+ save_sc_msareg 24, SC32_MSAREGS, a0, t0
+ save_sc_msareg 25, SC32_MSAREGS, a0, t0
+ save_sc_msareg 26, SC32_MSAREGS, a0, t0
+ save_sc_msareg 27, SC32_MSAREGS, a0, t0
+ save_sc_msareg 28, SC32_MSAREGS, a0, t0
+ save_sc_msareg 29, SC32_MSAREGS, a0, t0
+ save_sc_msareg 30, SC32_MSAREGS, a0, t0
+ save_sc_msareg 31, SC32_MSAREGS, a0, t0
+ jr ra
+ li v0, 0
+ END(_save_msa_context32)
+
+#endif /* CONFIG_MIPS32_COMPAT */
+
+ .macro restore_sc_msareg wr, off, sc, tmp
+#ifdef CONFIG_64BIT
+ EX ld \tmp, (\off+(\wr*8))(\sc)
+ insert_d \wr, 1, \tmp
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ EX lw \tmp, (\off+(\wr*8)+0)(\sc)
+ insert_w \wr, 2, \tmp
+ EX lw \tmp, (\off+(\wr*8)+4)(\sc)
+ insert_w \wr, 3, \tmp
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ EX lw \tmp, (\off+(\wr*8)+4)(\sc)
+ insert_w \wr, 2, \tmp
+ EX lw \tmp, (\off+(\wr*8)+0)(\sc)
+ insert_w \wr, 3, \tmp
+#endif
+ .endm
+
+/*
+ * int _restore_msa_context(struct sigcontext *sc)
+ */
+LEAF(_restore_msa_context)
+ restore_sc_msareg 0, SC_MSAREGS, a0, t0
+ restore_sc_msareg 1, SC_MSAREGS, a0, t0
+ restore_sc_msareg 2, SC_MSAREGS, a0, t0
+ restore_sc_msareg 3, SC_MSAREGS, a0, t0
+ restore_sc_msareg 4, SC_MSAREGS, a0, t0
+ restore_sc_msareg 5, SC_MSAREGS, a0, t0
+ restore_sc_msareg 6, SC_MSAREGS, a0, t0
+ restore_sc_msareg 7, SC_MSAREGS, a0, t0
+ restore_sc_msareg 8, SC_MSAREGS, a0, t0
+ restore_sc_msareg 9, SC_MSAREGS, a0, t0
+ restore_sc_msareg 10, SC_MSAREGS, a0, t0
+ restore_sc_msareg 11, SC_MSAREGS, a0, t0
+ restore_sc_msareg 12, SC_MSAREGS, a0, t0
+ restore_sc_msareg 13, SC_MSAREGS, a0, t0
+ restore_sc_msareg 14, SC_MSAREGS, a0, t0
+ restore_sc_msareg 15, SC_MSAREGS, a0, t0
+ restore_sc_msareg 16, SC_MSAREGS, a0, t0
+ restore_sc_msareg 17, SC_MSAREGS, a0, t0
+ restore_sc_msareg 18, SC_MSAREGS, a0, t0
+ restore_sc_msareg 19, SC_MSAREGS, a0, t0
+ restore_sc_msareg 20, SC_MSAREGS, a0, t0
+ restore_sc_msareg 21, SC_MSAREGS, a0, t0
+ restore_sc_msareg 22, SC_MSAREGS, a0, t0
+ restore_sc_msareg 23, SC_MSAREGS, a0, t0
+ restore_sc_msareg 24, SC_MSAREGS, a0, t0
+ restore_sc_msareg 25, SC_MSAREGS, a0, t0
+ restore_sc_msareg 26, SC_MSAREGS, a0, t0
+ restore_sc_msareg 27, SC_MSAREGS, a0, t0
+ restore_sc_msareg 28, SC_MSAREGS, a0, t0
+ restore_sc_msareg 29, SC_MSAREGS, a0, t0
+ restore_sc_msareg 30, SC_MSAREGS, a0, t0
+ restore_sc_msareg 31, SC_MSAREGS, a0, t0
+ jr ra
+ li v0, 0
+ END(_restore_msa_context)
+
+#ifdef CONFIG_MIPS32_COMPAT
+
+/*
+ * int _restore_msa_context32(struct sigcontext32 *sc)
+ */
+LEAF(_restore_msa_context32)
+ restore_sc_msareg 0, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 1, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 2, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 3, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 4, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 5, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 6, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 7, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 8, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 9, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 10, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 11, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 12, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 13, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 14, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 15, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 16, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 17, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 18, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 19, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 20, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 21, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 22, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 23, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 24, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 25, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 26, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 27, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 28, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 29, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 30, SC32_MSAREGS, a0, t0
+ restore_sc_msareg 31, SC32_MSAREGS, a0, t0
+ jr ra
+ li v0, 0
+ END(_restore_msa_context32)
+
+#endif /* CONFIG_MIPS32_COMPAT */
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
.set reorder
.type fault@function
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index cc78dd9a17c7..abacac7c33ef 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -29,18 +29,8 @@
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
-/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti, s32 fp_save)
*/
.align 5
LEAF(resume)
@@ -50,23 +40,37 @@
LONG_S ra, THREAD_REG31(a0)
/*
- * check if we need to save FPU registers
+ * Check whether we need to save any FP context. FP context is saved
+ * iff the process has used the context with the scalar FPU or the MSA
+ * ASE in the current time slice, as indicated by _TIF_USEDFPU and
+ * _TIF_USEDMSA respectively. switch_to will have set fp_save
+ * accordingly to an FP_SAVE_ enum value.
*/
+ beqz a3, 2f
- beqz a3, 1f
-
- PTR_L t3, TASK_THREAD_INFO(a0)
/*
- * clear saved user stack CU1 bit
+ * We do. Clear the saved CU1 bit for prev, such that next time it is
+ * scheduled it will start in userland with the FPU disabled. If the
+ * task uses the FPU then it will be enabled again via the do_cpu trap.
+ * This allows us to lazily restore the FP context.
*/
+ PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, ST_OFF(t3)
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
+ /* Check whether we're saving scalar or vector context. */
+ bgtz a3, 1f
+
+ /* Save 128b MSA vector context. */
+ msa_save_all a0
+ b 2f
+
+1: /* Save 32b/64b scalar FP context. */
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
-1:
+2:
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
@@ -141,6 +145,26 @@ LEAF(_restore_fp)
jr ra
END(_restore_fp)
+#ifdef CONFIG_CPU_HAS_MSA
+
+/*
+ * Save a thread's MSA vector context.
+ */
+LEAF(_save_msa)
+ msa_save_all a0
+ jr ra
+ END(_save_msa)
+
+/*
+ * Restore a thread's MSA vector context.
+ */
+LEAF(_restore_msa)
+ msa_restore_all a0
+ jr ra
+ END(_restore_msa)
+
+#endif
+
/*
* Load the FPU with signalling NANS. This bit pattern we're using has
* the property that no matter whether considered as single or as double
@@ -270,7 +294,7 @@ LEAF(_init_fpu)
1: .set pop
#endif /* CONFIG_CPU_MIPS32_R2 */
#else
- .set mips3
+ .set arch=r4000
dmtc1 t1, $f0
dmtc1 t1, $f2
dmtc1 t1, $f4
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
index 56dc69635153..758fb3cd2326 100644
--- a/arch/mips/kernel/rtlx-cmp.c
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -112,5 +112,8 @@ void __exit rtlx_module_exit(void)
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 91d61ba422b4..9c1aca00fd54 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -144,5 +144,8 @@ void __exit rtlx_module_exit(void)
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a5b14f48e1af..fdc70b400442 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -6,6 +6,7 @@
* Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
#include <asm/asm.h>
@@ -74,10 +75,10 @@ NESTED(handle_sys, PT_SIZE, sp)
.set noreorder
.set nomacro
-1: lw t5, 16(t0) # argument #5 from usp
-4: lw t6, 20(t0) # argument #6 from usp
-3: lw t7, 24(t0) # argument #7 from usp
-2: lw t8, 28(t0) # argument #8 from usp
+1: user_lw(t5, 16(t0)) # argument #5 from usp
+4: user_lw(t6, 20(t0)) # argument #6 from usp
+3: user_lw(t7, 24(t0)) # argument #7 from usp
+2: user_lw(t8, 28(t0)) # argument #8 from usp
sw t5, 16(sp) # argument #5 to ksp
sw t6, 20(sp) # argument #6 to ksp
@@ -118,7 +119,18 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- jal syscall_trace_enter
+
+ /*
+ * syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ */
+ addiu a1, v0, __NR_O32_Linux
+ bnez v0, 1f /* __NR_syscall at offset 0 */
+ lw a1, PT_R4(sp)
+
+1: jal syscall_trace_enter
+
+ bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
@@ -138,7 +150,7 @@ syscall_trace_entry:
sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
- j syscall_exit
+2: j syscall_exit
/* ------------------------------------------------------------------------ */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index b56e254beb15..dd99c3285aea 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,8 +80,11 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
+ daddiu a1, v0, __NR_64_Linux
jal syscall_trace_enter
+ bltz v0, 2f # seccomp failed? Skip syscall
+
move t0, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
@@ -102,7 +105,7 @@ syscall_trace_entry:
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
- j syscall_exit
+2: j syscall_exit
illegal_syscall:
/* This also isn't a 64-bit syscall, throw an error. */
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f7e5b72cf481..f68d2f4f0090 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,8 +72,11 @@ n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
+ daddiu a1, v0, __NR_N32_Linux
jal syscall_trace_enter
+ bltz v0, 2f # seccomp failed? Skip syscall
+
move t0, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
@@ -94,7 +97,7 @@ n32_syscall_trace_entry:
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
- j syscall_exit
+2: j syscall_exit
not_n32_scall:
/* This is not an n32 compatibility syscall, pass it on to
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6788727d91af..70f6acecd928 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -112,7 +112,20 @@ trace_a_syscall:
move s0, t2 # Save syscall pointer
move a0, sp
- jal syscall_trace_enter
+ /*
+ * syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ * note: NR_syscall is the first O32 syscall but the macro is
+ * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+ * therefore __NR_O32_Linux is used (4000)
+ */
+ addiu a1, v0, __NR_O32_Linux
+ bnez v0, 1f /* __NR_syscall at offset 0 */
+ lw a1, PT_R4(sp)
+
+1: jal syscall_trace_enter
+
+ bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
@@ -136,7 +149,7 @@ trace_a_syscall:
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
- j syscall_exit
+2: j syscall_exit
/* ------------------------------------------------------------------------ */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5199563c4403..33133d3df3e5 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -6,6 +6,7 @@
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/cache.h>
#include <linux/context_tracking.h>
@@ -30,6 +31,7 @@
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
+#include <asm/msa.h>
#include <asm/sim.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
@@ -46,8 +48,8 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
-extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
+extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
+extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
@@ -64,17 +66,95 @@ struct rt_sigframe {
};
/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &sc->sc_fpregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+ return err;
+}
+
+static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
+{
+ int i;
+ int err = 0;
+ u64 fpr_val;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+ return err;
+}
+
+/*
+ * These functions will save only the upper 64 bits of the vector registers,
+ * since the lower 64 bits have already been saved as the scalar FP context.
+ */
+static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
+ &sc->sc_msaregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
+
+ return err;
+}
+
+static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
+{
+ int i;
+ int err = 0;
+ u64 val;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(val, &sc->sc_msaregs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+ }
+ err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
+
+ return err;
+}
+
+/*
* Helper routines
*/
-static int protected_save_fp_context(struct sigcontext __user *sc)
+static int protected_save_fp_context(struct sigcontext __user *sc,
+ unsigned used_math)
{
int err;
+ bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
+#ifndef CONFIG_EVA
while (1) {
lock_fpu_owner();
- err = own_fpu_inatomic(1);
- if (!err)
- err = save_fp_context(sc); /* this might fail */
- unlock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = save_fp_context(sc);
+ if (save_msa && !err)
+ err = _save_msa_context(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_to_sigcontext(sc);
+ if (save_msa && !err)
+ err = copy_msa_to_sigcontext(sc);
+ }
if (likely(!err))
break;
/* touch the sigcontext and try again */
@@ -84,18 +164,44 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
if (err)
break; /* really bad sigcontext */
}
+#else
+ /*
+ * EVA does not have FPU EVA instructions so saving fpu context directly
+ * does not work.
+ */
+ disable_msa();
+ lose_fpu(1);
+ err = save_fp_context(sc); /* this might fail */
+ if (save_msa && !err)
+ err = copy_msa_to_sigcontext(sc);
+#endif
return err;
}
-static int protected_restore_fp_context(struct sigcontext __user *sc)
+static int protected_restore_fp_context(struct sigcontext __user *sc,
+ unsigned used_math)
{
int err, tmp __maybe_unused;
+ bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
+#ifndef CONFIG_EVA
while (1) {
lock_fpu_owner();
- err = own_fpu_inatomic(0);
- if (!err)
- err = restore_fp_context(sc); /* this might fail */
- unlock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = restore_fp_context(sc);
+ if (restore_msa && !err) {
+ enable_msa();
+ err = _restore_msa_context(sc);
+ } else {
+ /* signal handler may have used MSA */
+ disable_msa();
+ }
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_from_sigcontext(sc);
+ if (!err && (used_math & USEDMATH_MSA))
+ err = copy_msa_from_sigcontext(sc);
+ }
if (likely(!err))
break;
/* touch the sigcontext and try again */
@@ -105,6 +211,17 @@ static int protected_restore_fp_context(struct sigcontext __user *sc)
if (err)
break; /* really bad sigcontext */
}
+#else
+ /*
+ * EVA does not have FPU EVA instructions so restoring fpu context
+ * directly does not work.
+ */
+ enable_msa();
+ lose_fpu(0);
+ err = restore_fp_context(sc); /* this might fail */
+ if (restore_msa && !err)
+ err = copy_msa_from_sigcontext(sc);
+#endif
return err;
}
@@ -135,7 +252,8 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
- used_math = !!used_math();
+ used_math = used_math() ? USEDMATH_FP : 0;
+ used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math) {
@@ -143,7 +261,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
- err |= protected_save_fp_context(sc);
+ err |= protected_save_fp_context(sc, used_math);
}
return err;
}
@@ -168,14 +286,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
}
static int
-check_and_restore_fp_context(struct sigcontext __user *sc)
+check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
- err |= protected_restore_fp_context(sc);
+ err |= protected_restore_fp_context(sc, used_math);
return err ?: sig;
}
@@ -215,9 +333,10 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
if (used_math) {
/* restore fpu context if we have used it before */
if (!err)
- err = check_and_restore_fp_context(sc);
+ err = check_and_restore_fp_context(sc, used_math);
} else {
- /* signal handler may have used FPU. Give it up. */
+ /* signal handler may have used FPU or MSA. Disable them. */
+ disable_msa();
lose_fpu(0);
}
@@ -591,23 +710,26 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
}
#ifdef CONFIG_SMP
+#ifndef CONFIG_EVA
static int smp_save_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _save_fp_context(sc)
- : fpu_emulator_save_context(sc);
+ : copy_fp_to_sigcontext(sc);
}
static int smp_restore_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _restore_fp_context(sc)
- : fpu_emulator_restore_context(sc);
+ : copy_fp_from_sigcontext(sc);
}
+#endif /* CONFIG_EVA */
#endif
static int signal_setup(void)
{
+#ifndef CONFIG_EVA
#ifdef CONFIG_SMP
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
@@ -617,9 +739,13 @@ static int signal_setup(void)
save_fp_context = _save_fp_context;
restore_fp_context = _restore_fp_context;
} else {
- save_fp_context = fpu_emulator_save_context;
- restore_fp_context = fpu_emulator_restore_context;
+ save_fp_context = copy_fp_from_sigcontext;
+ restore_fp_context = copy_fp_to_sigcontext;
}
+#endif /* CONFIG_SMP */
+#else
+ save_fp_context = copy_fp_from_sigcontext;;
+ restore_fp_context = copy_fp_to_sigcontext;
#endif
return 0;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 3d60f7750fa8..299f956e4db3 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -30,6 +30,7 @@
#include <asm/sim.h>
#include <asm/ucontext.h>
#include <asm/fpu.h>
+#include <asm/msa.h>
#include <asm/war.h>
#include <asm/vdso.h>
#include <asm/dsp.h>
@@ -42,8 +43,8 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
@@ -78,17 +79,96 @@ struct rt_sigframe32 {
};
/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
+{
+ int i;
+ int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &sc->sc_fpregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+ return err;
+}
+
+static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
+{
+ int i;
+ int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+ u64 fpr_val;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+ return err;
+}
+
+/*
+ * These functions will save only the upper 64 bits of the vector registers,
+ * since the lower 64 bits have already been saved as the scalar FP context.
+ */
+static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
+ &sc->sc_msaregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
+
+ return err;
+}
+
+static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
+{
+ int i;
+ int err = 0;
+ u64 val;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(val, &sc->sc_msaregs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+ }
+ err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
+
+ return err;
+}
+
+/*
* sigcontext handlers
*/
-static int protected_save_fp_context32(struct sigcontext32 __user *sc)
+static int protected_save_fp_context32(struct sigcontext32 __user *sc,
+ unsigned used_math)
{
int err;
+ bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
while (1) {
lock_fpu_owner();
- err = own_fpu_inatomic(1);
- if (!err)
- err = save_fp_context32(sc); /* this might fail */
- unlock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = save_fp_context32(sc);
+ if (save_msa && !err)
+ err = _save_msa_context32(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_to_sigcontext32(sc);
+ if (save_msa && !err)
+ err = copy_msa_to_sigcontext32(sc);
+ }
if (likely(!err))
break;
/* touch the sigcontext and try again */
@@ -101,15 +181,29 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
return err;
}
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
+ unsigned used_math)
{
int err, tmp __maybe_unused;
+ bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
while (1) {
lock_fpu_owner();
- err = own_fpu_inatomic(0);
- if (!err)
- err = restore_fp_context32(sc); /* this might fail */
- unlock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = restore_fp_context32(sc);
+ if (restore_msa && !err) {
+ enable_msa();
+ err = _restore_msa_context32(sc);
+ } else {
+ /* signal handler may have used MSA */
+ disable_msa();
+ }
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_from_sigcontext32(sc);
+ if (restore_msa && !err)
+ err = copy_msa_from_sigcontext32(sc);
+ }
if (likely(!err))
break;
/* touch the sigcontext and try again */
@@ -147,7 +241,8 @@ static int setup_sigcontext32(struct pt_regs *regs,
err |= __put_user(mflo3(), &sc->sc_lo3);
}
- used_math = !!used_math();
+ used_math = used_math() ? USEDMATH_FP : 0;
+ used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math) {
@@ -155,20 +250,21 @@ static int setup_sigcontext32(struct pt_regs *regs,
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
- err |= protected_save_fp_context32(sc);
+ err |= protected_save_fp_context32(sc, used_math);
}
return err;
}
static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc)
+check_and_restore_fp_context32(struct sigcontext32 __user *sc,
+ unsigned used_math)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
- err |= protected_restore_fp_context32(sc);
+ err |= protected_restore_fp_context32(sc, used_math);
return err ?: sig;
}
@@ -205,9 +301,10 @@ static int restore_sigcontext32(struct pt_regs *regs,
if (used_math) {
/* restore fpu context if we have used it before */
if (!err)
- err = check_and_restore_fp_context32(sc);
+ err = check_and_restore_fp_context32(sc, used_math);
} else {
- /* signal handler may have used FPU. Give it up. */
+ /* signal handler may have used FPU or MSA. Disable them. */
+ disable_msa();
lose_fpu(0);
}
@@ -566,8 +663,8 @@ static int signal32_init(void)
save_fp_context32 = _save_fp_context32;
restore_fp_context32 = _restore_fp_context32;
} else {
- save_fp_context32 = fpu_emulator_save_context32;
- restore_fp_context32 = fpu_emulator_restore_context32;
+ save_fp_context32 = copy_fp_to_sigcontext32;
+ restore_fp_context32 = copy_fp_from_sigcontext32;
}
return 0;
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 1b925d8a610c..3ef55fb7ac03 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -39,57 +39,9 @@
#include <asm/amon.h>
#include <asm/gic.h>
-static void ipi_call_function(unsigned int cpu)
-{
- pr_debug("CPU%d: %s cpu %d status %08x\n",
- smp_processor_id(), __func__, cpu, read_c0_status());
-
- gic_send_ipi(plat_ipi_call_int_xlate(cpu));
-}
-
-
-static void ipi_resched(unsigned int cpu)
-{
- pr_debug("CPU%d: %s cpu %d status %08x\n",
- smp_processor_id(), __func__, cpu, read_c0_status());
-
- gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
-}
-
-/*
- * FIXME: This isn't restricted to CMP
- * The SMVP kernel could use GIC interrupts if available
- */
-void cmp_send_ipi_single(int cpu, unsigned int action)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- switch (action) {
- case SMP_CALL_FUNCTION:
- ipi_call_function(cpu);
- break;
-
- case SMP_RESCHEDULE_YOURSELF:
- ipi_resched(cpu);
- break;
- }
-
- local_irq_restore(flags);
-}
-
-static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
-{
- unsigned int i;
-
- for_each_cpu(i, mask)
- cmp_send_ipi_single(i, action);
-}
-
static void cmp_init_secondary(void)
{
- struct cpuinfo_mips *c = &current_cpu_data;
+ struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
/* Assume GIC is present */
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
@@ -97,7 +49,6 @@ static void cmp_init_secondary(void)
/* Enable per-cpu interrupts: platform specific */
- c->core = (read_c0_ebase() >> 1) & 0x1ff;
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
if (cpu_has_mipsmt)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
@@ -210,8 +161,8 @@ void __init cmp_prepare_cpus(unsigned int max_cpus)
}
struct plat_smp_ops cmp_smp_ops = {
- .send_ipi_single = cmp_send_ipi_single,
- .send_ipi_mask = cmp_send_ipi_mask,
+ .send_ipi_single = gic_send_ipi_single,
+ .send_ipi_mask = gic_send_ipi_mask,
.init_secondary = cmp_init_secondary,
.smp_finish = cmp_smp_finish,
.cpus_done = cmp_cpus_done,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
new file mode 100644
index 000000000000..536eec0d21b6
--- /dev/null
+++ b/arch/mips/kernel/smp-cps.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/gic.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/mips_mt.h>
+#include <asm/mipsregs.h>
+#include <asm/smp-cps.h>
+#include <asm/time.h>
+#include <asm/uasm.h>
+
+static DECLARE_BITMAP(core_power, NR_CPUS);
+
+struct boot_config mips_cps_bootcfg;
+
+static void init_core(void)
+{
+ unsigned int nvpes, t;
+ u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
+
+ if (!cpu_has_mipsmt)
+ return;
+
+ /* Enter VPE configuration state */
+ dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* Retrieve the count of VPEs in this core */
+ mvpconf0 = read_c0_mvpconf0();
+ nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+ smp_num_siblings = nvpes;
+
+ for (t = 1; t < nvpes; t++) {
+ /* Use a 1:1 mapping of TC index to VPE index */
+ settc(t);
+
+ /* Bind 1 TC to this VPE */
+ tcbind = read_tc_c0_tcbind();
+ tcbind &= ~TCBIND_CURVPE;
+ tcbind |= t << TCBIND_CURVPE_SHIFT;
+ write_tc_c0_tcbind(tcbind);
+
+ /* Set exclusive TC, non-active, master */
+ vpeconf0 = read_vpe_c0_vpeconf0();
+ vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
+ vpeconf0 |= t << VPECONF0_XTC_SHIFT;
+ vpeconf0 |= VPECONF0_MVP;
+ write_vpe_c0_vpeconf0(vpeconf0);
+
+ /* Declare TC non-active, non-allocatable & interrupt exempt */
+ tcstatus = read_tc_c0_tcstatus();
+ tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tcstatus |= TCSTATUS_IXMT;
+ write_tc_c0_tcstatus(tcstatus);
+
+ /* Halt the TC */
+ write_tc_c0_tchalt(TCHALT_H);
+
+ /* Allow only 1 TC to execute */
+ vpecontrol = read_vpe_c0_vpecontrol();
+ vpecontrol &= ~VPECONTROL_TE;
+ write_vpe_c0_vpecontrol(vpecontrol);
+
+ /* Copy (most of) Status from VPE 0 */
+ status = read_c0_status();
+ status &= ~(ST0_IM | ST0_IE | ST0_KSU);
+ status |= ST0_CU0;
+ write_vpe_c0_status(status);
+
+ /* Copy Config from VPE 0 */
+ write_vpe_c0_config(read_c0_config());
+ write_vpe_c0_config7(read_c0_config7());
+
+ /* Ensure no software interrupts are pending */
+ write_vpe_c0_cause(0);
+
+ /* Sync Count */
+ write_vpe_c0_count(read_c0_count());
+ }
+
+ /* Leave VPE configuration state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+}
+
+static void __init cps_smp_setup(void)
+{
+ unsigned int ncores, nvpes, core_vpes;
+ int c, v;
+ u32 core_cfg, *entry_code;
+
+ /* Detect & record VPE topology */
+ ncores = mips_cm_numcores();
+ pr_info("VPE topology ");
+ for (c = nvpes = 0; c < ncores; c++) {
+ if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) {
+ write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
+ core_cfg = read_gcr_co_config();
+ core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
+ CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
+ } else {
+ core_vpes = 1;
+ }
+
+ pr_cont("%c%u", c ? ',' : '{', core_vpes);
+
+ for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
+ cpu_data[nvpes + v].core = c;
+#ifdef CONFIG_MIPS_MT_SMP
+ cpu_data[nvpes + v].vpe_id = v;
+#endif
+ }
+
+ nvpes += core_vpes;
+ }
+ pr_cont("} total %u\n", nvpes);
+
+ /* Indicate present CPUs (CPU being synonymous with VPE) */
+ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
+ set_cpu_possible(v, true);
+ set_cpu_present(v, true);
+ __cpu_number_map[v] = v;
+ __cpu_logical_map[v] = v;
+ }
+
+ /* Core 0 is powered up (we're running on it) */
+ bitmap_set(core_power, 0, 1);
+
+ /* Disable MT - we only want to run 1 TC per VPE */
+ if (cpu_has_mipsmt)
+ dmt();
+
+ /* Initialise core 0 */
+ init_core();
+
+ /* Patch the start of mips_cps_core_entry to provide the CM base */
+ entry_code = (u32 *)&mips_cps_core_entry;
+ UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
+
+ /* Make core 0 coherent with everything */
+ write_gcr_cl_coherence(0xff);
+}
+
+static void __init cps_prepare_cpus(unsigned int max_cpus)
+{
+ mips_mt_set_cpuoptions();
+}
+
+static void boot_core(struct boot_config *cfg)
+{
+ u32 access;
+
+ /* Select the appropriate core */
+ write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+
+ /* Set its reset vector */
+ write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
+
+ /* Ensure its coherency is disabled */
+ write_gcr_co_coherence(0);
+
+ /* Ensure the core can access the GCRs */
+ access = read_gcr_access();
+ access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core);
+ write_gcr_access(access);
+
+ /* Copy cfg */
+ mips_cps_bootcfg = *cfg;
+
+ if (mips_cpc_present()) {
+ /* Select the appropriate core */
+ write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF);
+
+ /* Reset the core */
+ write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+ } else {
+ /* Take the core out of reset */
+ write_gcr_co_reset_release(0);
+ }
+
+ /* The core is now powered up */
+ bitmap_set(core_power, cfg->core, 1);
+}
+
+static void boot_vpe(void *info)
+{
+ struct boot_config *cfg = info;
+ u32 tcstatus, vpeconf0;
+
+ /* Enter VPE configuration state */
+ dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(cfg->vpe);
+
+ /* Set the TC restart PC */
+ write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
+
+ /* Activate the TC, allow interrupts */
+ tcstatus = read_tc_c0_tcstatus();
+ tcstatus &= ~TCSTATUS_IXMT;
+ tcstatus |= TCSTATUS_A;
+ write_tc_c0_tcstatus(tcstatus);
+
+ /* Clear the TC halt bit */
+ write_tc_c0_tchalt(0);
+
+ /* Activate the VPE */
+ vpeconf0 = read_vpe_c0_vpeconf0();
+ vpeconf0 |= VPECONF0_VPA;
+ write_vpe_c0_vpeconf0(vpeconf0);
+
+ /* Set the stack & global pointer registers */
+ write_tc_gpr_sp(cfg->sp);
+ write_tc_gpr_gp(cfg->gp);
+
+ /* Leave VPE configuration state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* Enable other VPEs to execute */
+ evpe(EVPE_ENABLE);
+}
+
+static void cps_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct boot_config cfg;
+ unsigned int remote;
+ int err;
+
+ cfg.core = cpu_data[cpu].core;
+ cfg.vpe = cpu_vpe_id(&cpu_data[cpu]);
+ cfg.pc = (unsigned long)&smp_bootstrap;
+ cfg.sp = __KSTK_TOS(idle);
+ cfg.gp = (unsigned long)task_thread_info(idle);
+
+ if (!test_bit(cfg.core, core_power)) {
+ /* Boot a VPE on a powered down core */
+ boot_core(&cfg);
+ return;
+ }
+
+ if (cfg.core != current_cpu_data.core) {
+ /* Boot a VPE on another powered up core */
+ for (remote = 0; remote < NR_CPUS; remote++) {
+ if (cpu_data[remote].core != cfg.core)
+ continue;
+ if (cpu_online(remote))
+ break;
+ }
+ BUG_ON(remote >= NR_CPUS);
+
+ err = smp_call_function_single(remote, boot_vpe, &cfg, 1);
+ if (err)
+ panic("Failed to call remote CPU\n");
+ return;
+ }
+
+ BUG_ON(!cpu_has_mipsmt);
+
+ /* Boot a VPE on this core */
+ boot_vpe(&cfg);
+}
+
+static void cps_init_secondary(void)
+{
+ /* Disable MT - we only want to run 1 TC per VPE */
+ if (cpu_has_mipsmt)
+ dmt();
+
+ /* TODO: revisit this assumption once hotplug is implemented */
+ if (cpu_vpe_id(&current_cpu_data) == 0)
+ init_core();
+
+ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+ STATUSF_IP6 | STATUSF_IP7);
+}
+
+static void cps_smp_finish(void)
+{
+ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpu_set(smp_processor_id(), mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+static void cps_cpus_done(void)
+{
+}
+
+static struct plat_smp_ops cps_smp_ops = {
+ .smp_setup = cps_smp_setup,
+ .prepare_cpus = cps_prepare_cpus,
+ .boot_secondary = cps_boot_secondary,
+ .init_secondary = cps_init_secondary,
+ .smp_finish = cps_smp_finish,
+ .send_ipi_single = gic_send_ipi_single,
+ .send_ipi_mask = gic_send_ipi_mask,
+ .cpus_done = cps_cpus_done,
+};
+
+int register_cps_smp_ops(void)
+{
+ if (!mips_cm_present()) {
+ pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
+ return -ENODEV;
+ }
+
+ /* check we have a GIC - we need one for IPIs */
+ if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
+ pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
+ return -ENODEV;
+ }
+
+ register_smp_ops(&cps_smp_ops);
+ return 0;
+}
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
new file mode 100644
index 000000000000..3bb1f92ab525
--- /dev/null
+++ b/arch/mips/kernel/smp-gic.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * Based on smp-cmp.c:
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Author: Chris Dearman (chris@mips.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/printk.h>
+
+#include <asm/gic.h>
+#include <asm/smp-ops.h>
+
+void gic_send_ipi_single(int cpu, unsigned int action)
+{
+ unsigned long flags;
+ unsigned int intr;
+
+ pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
+ smp_processor_id(), __func__, cpu, action, read_c0_status());
+
+ local_irq_save(flags);
+
+ switch (action) {
+ case SMP_CALL_FUNCTION:
+ intr = plat_ipi_call_int_xlate(cpu);
+ break;
+
+ case SMP_RESCHEDULE_YOURSELF:
+ intr = plat_ipi_resched_int_xlate(cpu);
+ break;
+
+ default:
+ BUG();
+ }
+
+ gic_send_ipi(intr);
+ local_irq_restore(flags);
+}
+
+void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ gic_send_ipi_single(i, action);
+}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 0fb8cefc9114..f8e13149604d 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -113,27 +113,6 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
write_tc_c0_tchalt(TCHALT_H);
}
-#ifdef CONFIG_IRQ_GIC
-static void mp_send_ipi_single(int cpu, unsigned int action)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- switch (action) {
- case SMP_CALL_FUNCTION:
- gic_send_ipi(plat_ipi_call_int_xlate(cpu));
- break;
-
- case SMP_RESCHEDULE_YOURSELF:
- gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
- break;
- }
-
- local_irq_restore(flags);
-}
-#endif
-
static void vsmp_send_ipi_single(int cpu, unsigned int action)
{
int i;
@@ -142,7 +121,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
#ifdef CONFIG_IRQ_GIC
if (gic_present) {
- mp_send_ipi_single(cpu, action);
+ gic_send_ipi_single(cpu, action);
return;
}
#endif
@@ -313,3 +292,25 @@ struct plat_smp_ops vsmp_smp_ops = {
.smp_setup = vsmp_smp_setup,
.prepare_cpus = vsmp_prepare_cpus,
};
+
+static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
+ unsigned long action_unused, void *data)
+{
+ struct proc_cpuinfo_notifier_args *pcn = data;
+ struct seq_file *m = pcn->m;
+ unsigned long n = pcn->n;
+
+ if (!cpu_has_mipsmt)
+ return NOTIFY_OK;
+
+ seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+
+ return NOTIFY_OK;
+}
+
+static int __init proc_cpuinfo_notifier_init(void)
+{
+ return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
+}
+
+subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
index c10aa84c9fa9..38635a996cbf 100644
--- a/arch/mips/kernel/smtc-proc.c
+++ b/arch/mips/kernel/smtc-proc.c
@@ -77,3 +77,26 @@ void init_smtc_stats(void)
proc_create("smtc", 0444, NULL, &smtc_proc_fops);
}
+
+static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
+ unsigned long action_unused, void *data)
+{
+ struct proc_cpuinfo_notifier_args *pcn = data;
+ struct seq_file *m = pcn->m;
+ unsigned long n = pcn->n;
+
+ if (!cpu_has_mipsmt)
+ return NOTIFY_OK;
+
+ seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+ seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
+
+ return NOTIFY_OK;
+}
+
+static int __init proc_cpuinfo_notifier_init(void)
+{
+ return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
+}
+
+subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index dfc1b911be04..c1681d65dd5c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1007,7 +1007,7 @@ static void __irq_entry smtc_clock_tick_interrupt(void)
int irq = MIPS_CPU_IRQ_BASE + 1;
irq_enter();
- kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+ kstat_incr_irq_this_cpu(irq);
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
irq_exit();
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index b242e2c10ea0..67f2495def1c 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -197,16 +197,17 @@ static void probe_spram(char *type,
}
void spram_config(void)
{
- struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config0;
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
+ case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
+ case CPU_P5600:
config0 = read_c0_config();
/* FIXME: addresses are Malta specific */
if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b79d13f95bf0..4a4f9dda5658 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -110,7 +110,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
@@ -135,7 +135,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__ (
- " .set mips3 \n"
+ " .set arch=r4000 \n"
" li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e0b499694d18..074e857ced28 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -10,6 +10,7 @@
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/bug.h>
#include <linux/compiler.h>
@@ -47,6 +48,7 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
+#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
@@ -77,8 +79,10 @@ extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
+extern asmlinkage void handle_msa_fpe(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_ftlb(void);
+extern asmlinkage void handle_msa(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
@@ -861,6 +865,11 @@ asmlinkage void do_bp(struct pt_regs *regs)
enum ctx_state prev_state;
unsigned long epc;
u16 instr[2];
+ mm_segment_t seg;
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
prev_state = exception_enter();
if (get_isa16_mode(regs->cp0_epc)) {
@@ -870,17 +879,19 @@ asmlinkage void do_bp(struct pt_regs *regs)
if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
(__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
goto out_sigsegv;
- opcode = (instr[0] << 16) | instr[1];
+ opcode = (instr[0] << 16) | instr[1];
} else {
- /* MIPS16e mode */
- if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+ /* MIPS16e mode */
+ if (__get_user(instr[0],
+ (u16 __user *)msk_isa16_mode(epc)))
goto out_sigsegv;
- bcode = (instr[0] >> 6) & 0x3f;
- do_trap_or_bp(regs, bcode, "Break");
- goto out;
+ bcode = (instr[0] >> 6) & 0x3f;
+ do_trap_or_bp(regs, bcode, "Break");
+ goto out;
}
} else {
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode,
+ (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
}
@@ -918,6 +929,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
do_trap_or_bp(regs, bcode, "Break");
out:
+ set_fs(seg);
exception_exit(prev_state);
return;
@@ -931,8 +943,13 @@ asmlinkage void do_tr(struct pt_regs *regs)
u32 opcode, tcode = 0;
enum ctx_state prev_state;
u16 instr[2];
+ mm_segment_t seg;
unsigned long epc = msk_isa16_mode(exception_epc(regs));
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(get_ds());
+
prev_state = exception_enter();
if (get_isa16_mode(regs->cp0_epc)) {
if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
@@ -953,6 +970,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
do_trap_or_bp(regs, tcode, "Trap");
out:
+ set_fs(seg);
exception_exit(prev_state);
return;
@@ -1074,6 +1092,76 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK;
}
+static int enable_restore_fp_context(int msa)
+{
+ int err, was_fpu_owner;
+
+ if (!used_math()) {
+ /* First time FP context user. */
+ err = init_fpu();
+ if (msa && !err)
+ enable_msa();
+ if (!err)
+ set_used_math();
+ return err;
+ }
+
+ /*
+ * This task has formerly used the FP context.
+ *
+ * If this thread has no live MSA vector context then we can simply
+ * restore the scalar FP context. If it has live MSA vector context
+ * (that is, it has or may have used MSA since last performing a
+ * function call) then we'll need to restore the vector context. This
+ * applies even if we're currently only executing a scalar FP
+ * instruction. This is because if we were to later execute an MSA
+ * instruction then we'd either have to:
+ *
+ * - Restore the vector context & clobber any registers modified by
+ * scalar FP instructions between now & then.
+ *
+ * or
+ *
+ * - Not restore the vector context & lose the most significant bits
+ * of all vector registers.
+ *
+ * Neither of those options is acceptable. We cannot restore the least
+ * significant bits of the registers now & only restore the most
+ * significant bits later because the most significant bits of any
+ * vector registers whose aliased FP register is modified now will have
+ * been zeroed. We'd have no way to know that when restoring the vector
+ * context & thus may load an outdated value for the most significant
+ * bits of a vector register.
+ */
+ if (!msa && !thread_msa_context_live())
+ return own_fpu(1);
+
+ /*
+ * This task is using or has previously used MSA. Thus we require
+ * that Status.FR == 1.
+ */
+ was_fpu_owner = is_fpu_owner();
+ err = own_fpu(0);
+ if (err)
+ return err;
+
+ enable_msa();
+ write_msa_csr(current->thread.fpu.msacsr);
+ set_thread_flag(TIF_USEDMSA);
+
+ /*
+ * If this is the first time that the task is using MSA and it has
+ * previously used scalar FP in this time slice then we already nave
+ * FP context which we shouldn't clobber.
+ */
+ if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
+ return 0;
+
+ /* We need to restore the vector context. */
+ restore_msa(current);
+ return 0;
+}
+
asmlinkage void do_cpu(struct pt_regs *regs)
{
enum ctx_state prev_state;
@@ -1153,12 +1241,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
/* Fall through. */
case 1:
- if (used_math()) /* Using the FPU again. */
- err = own_fpu(1);
- else { /* First time FPU user. */
- err = init_fpu();
- set_used_math();
- }
+ err = enable_restore_fp_context(0);
if (!raw_cpu_has_fpu || err) {
int sig;
@@ -1183,6 +1266,37 @@ out:
exception_exit(prev_state);
}
+asmlinkage void do_msa_fpe(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
+ force_sig(SIGFPE, current);
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_msa(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ int err;
+
+ prev_state = exception_enter();
+
+ if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
+ force_sig(SIGILL, current);
+ goto out;
+ }
+
+ die_if_kernel("do_msa invoked from kernel context!", regs);
+
+ err = enable_restore_fp_context(1);
+ if (err)
+ force_sig(SIGILL, current);
+out:
+ exception_exit(prev_state);
+}
+
asmlinkage void do_mdmx(struct pt_regs *regs)
{
enum ctx_state prev_state;
@@ -1337,8 +1451,10 @@ static inline void parity_protection_init(void)
case CPU_34K:
case CPU_74K:
case CPU_1004K:
+ case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
+ case CPU_P5600:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
@@ -2017,6 +2133,7 @@ void __init trap_init(void)
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
+ set_except_vector(14, handle_msa_fpe);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
@@ -2040,6 +2157,7 @@ void __init trap_init(void)
set_except_vector(15, handle_fpe);
set_except_vector(16, handle_ftlb);
+ set_except_vector(21, handle_msa);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index c369a5d35527..2b3517214d6d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
@@ -110,8 +111,8 @@ extern void show_registers(struct pt_regs *regs);
#ifdef __BIG_ENDIAN
#define LoadHW(addr, value, res) \
__asm__ __volatile__ (".set\tnoat\n" \
- "1:\tlb\t%0, 0(%2)\n" \
- "2:\tlbu\t$1, 1(%2)\n\t" \
+ "1:\t"user_lb("%0", "0(%2)")"\n" \
+ "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -130,8 +131,8 @@ extern void show_registers(struct pt_regs *regs);
#define LoadW(addr, value, res) \
__asm__ __volatile__ ( \
- "1:\tlwl\t%0, (%2)\n" \
- "2:\tlwr\t%0, 3(%2)\n\t" \
+ "1:\t"user_lwl("%0", "(%2)")"\n" \
+ "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
"li\t%1, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -149,8 +150,8 @@ extern void show_registers(struct pt_regs *regs);
#define LoadHWU(addr, value, res) \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\tlbu\t%0, 0(%2)\n" \
- "2:\tlbu\t$1, 1(%2)\n\t" \
+ "1:\t"user_lbu("%0", "0(%2)")"\n" \
+ "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -170,8 +171,8 @@ extern void show_registers(struct pt_regs *regs);
#define LoadWU(addr, value, res) \
__asm__ __volatile__ ( \
- "1:\tlwl\t%0, (%2)\n" \
- "2:\tlwr\t%0, 3(%2)\n\t" \
+ "1:\t"user_lwl("%0", "(%2)")"\n" \
+ "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
"dsll\t%0, %0, 32\n\t" \
"dsrl\t%0, %0, 32\n\t" \
"li\t%1, 0\n" \
@@ -209,9 +210,9 @@ extern void show_registers(struct pt_regs *regs);
#define StoreHW(addr, value, res) \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\tsb\t%1, 1(%2)\n\t" \
+ "1:\t"user_sb("%1", "1(%2)")"\n" \
"srl\t$1, %1, 0x8\n" \
- "2:\tsb\t$1, 0(%2)\n\t" \
+ "2:\t"user_sb("$1", "0(%2)")"\n" \
".set\tat\n\t" \
"li\t%0, 0\n" \
"3:\n\t" \
@@ -229,8 +230,8 @@ extern void show_registers(struct pt_regs *regs);
#define StoreW(addr, value, res) \
__asm__ __volatile__ ( \
- "1:\tswl\t%1,(%2)\n" \
- "2:\tswr\t%1, 3(%2)\n\t" \
+ "1:\t"user_swl("%1", "(%2)")"\n" \
+ "2:\t"user_swr("%1", "3(%2)")"\n\t" \
"li\t%0, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -267,8 +268,8 @@ extern void show_registers(struct pt_regs *regs);
#ifdef __LITTLE_ENDIAN
#define LoadHW(addr, value, res) \
__asm__ __volatile__ (".set\tnoat\n" \
- "1:\tlb\t%0, 1(%2)\n" \
- "2:\tlbu\t$1, 0(%2)\n\t" \
+ "1:\t"user_lb("%0", "1(%2)")"\n" \
+ "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -287,8 +288,8 @@ extern void show_registers(struct pt_regs *regs);
#define LoadW(addr, value, res) \
__asm__ __volatile__ ( \
- "1:\tlwl\t%0, 3(%2)\n" \
- "2:\tlwr\t%0, (%2)\n\t" \
+ "1:\t"user_lwl("%0", "3(%2)")"\n" \
+ "2:\t"user_lwr("%0", "(%2)")"\n\t" \
"li\t%1, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -306,8 +307,8 @@ extern void show_registers(struct pt_regs *regs);
#define LoadHWU(addr, value, res) \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\tlbu\t%0, 1(%2)\n" \
- "2:\tlbu\t$1, 0(%2)\n\t" \
+ "1:\t"user_lbu("%0", "1(%2)")"\n" \
+ "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -327,8 +328,8 @@ extern void show_registers(struct pt_regs *regs);
#define LoadWU(addr, value, res) \
__asm__ __volatile__ ( \
- "1:\tlwl\t%0, 3(%2)\n" \
- "2:\tlwr\t%0, (%2)\n\t" \
+ "1:\t"user_lwl("%0", "3(%2)")"\n" \
+ "2:\t"user_lwr("%0", "(%2)")"\n\t" \
"dsll\t%0, %0, 32\n\t" \
"dsrl\t%0, %0, 32\n\t" \
"li\t%1, 0\n" \
@@ -366,9 +367,9 @@ extern void show_registers(struct pt_regs *regs);
#define StoreHW(addr, value, res) \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\tsb\t%1, 0(%2)\n\t" \
+ "1:\t"user_sb("%1", "0(%2)")"\n" \
"srl\t$1,%1, 0x8\n" \
- "2:\tsb\t$1, 1(%2)\n\t" \
+ "2:\t"user_sb("$1", "1(%2)")"\n" \
".set\tat\n\t" \
"li\t%0, 0\n" \
"3:\n\t" \
@@ -386,8 +387,8 @@ extern void show_registers(struct pt_regs *regs);
#define StoreW(addr, value, res) \
__asm__ __volatile__ ( \
- "1:\tswl\t%1, 3(%2)\n" \
- "2:\tswr\t%1, (%2)\n\t" \
+ "1:\t"user_swl("%1", "3(%2)")"\n" \
+ "2:\t"user_swr("%1", "(%2)")"\n\t" \
"li\t%0, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -430,7 +431,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
unsigned long origpc;
unsigned long orig31;
void __user *fault_addr = NULL;
-
+#ifdef CONFIG_EVA
+ mm_segment_t seg;
+#endif
origpc = (unsigned long)pc;
orig31 = regs->regs[31];
@@ -475,6 +478,88 @@ static void emulate_load_store_insn(struct pt_regs *regs,
* The remaining opcodes are the ones that are really of
* interest.
*/
+#ifdef CONFIG_EVA
+ case spec3_op:
+ /*
+ * we can land here only from kernel accessing user memory,
+ * so we need to "switch" the address limit to user space, so
+ * address check can work properly.
+ */
+ seg = get_fs();
+ set_fs(USER_DS);
+ switch (insn.spec3_format.func) {
+ case lhe_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadHW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lwe_op:
+ if (!access_ok(VERIFY_READ, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lhue_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadHWU(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case she_op:
+ if (!access_ok(VERIFY_WRITE, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreHW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+ case swe_op:
+ if (!access_ok(VERIFY_WRITE, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+ default:
+ set_fs(seg);
+ goto sigill;
+ }
+ set_fs(seg);
+ break;
+#endif
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;