summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r--arch/mips/kernel/traps.c144
1 files changed, 131 insertions, 13 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e0b499694d18..074e857ced28 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -10,6 +10,7 @@
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/bug.h>
#include <linux/compiler.h>
@@ -47,6 +48,7 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
+#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
@@ -77,8 +79,10 @@ extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
+extern asmlinkage void handle_msa_fpe(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_ftlb(void);
+extern asmlinkage void handle_msa(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
@@ -861,6 +865,11 @@ asmlinkage void do_bp(struct pt_regs *regs)
enum ctx_state prev_state;
unsigned long epc;
u16 instr[2];
+ mm_segment_t seg;
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
prev_state = exception_enter();
if (get_isa16_mode(regs->cp0_epc)) {
@@ -870,17 +879,19 @@ asmlinkage void do_bp(struct pt_regs *regs)
if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
(__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
goto out_sigsegv;
- opcode = (instr[0] << 16) | instr[1];
+ opcode = (instr[0] << 16) | instr[1];
} else {
- /* MIPS16e mode */
- if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+ /* MIPS16e mode */
+ if (__get_user(instr[0],
+ (u16 __user *)msk_isa16_mode(epc)))
goto out_sigsegv;
- bcode = (instr[0] >> 6) & 0x3f;
- do_trap_or_bp(regs, bcode, "Break");
- goto out;
+ bcode = (instr[0] >> 6) & 0x3f;
+ do_trap_or_bp(regs, bcode, "Break");
+ goto out;
}
} else {
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode,
+ (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
}
@@ -918,6 +929,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
do_trap_or_bp(regs, bcode, "Break");
out:
+ set_fs(seg);
exception_exit(prev_state);
return;
@@ -931,8 +943,13 @@ asmlinkage void do_tr(struct pt_regs *regs)
u32 opcode, tcode = 0;
enum ctx_state prev_state;
u16 instr[2];
+ mm_segment_t seg;
unsigned long epc = msk_isa16_mode(exception_epc(regs));
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(get_ds());
+
prev_state = exception_enter();
if (get_isa16_mode(regs->cp0_epc)) {
if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
@@ -953,6 +970,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
do_trap_or_bp(regs, tcode, "Trap");
out:
+ set_fs(seg);
exception_exit(prev_state);
return;
@@ -1074,6 +1092,76 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK;
}
+static int enable_restore_fp_context(int msa)
+{
+ int err, was_fpu_owner;
+
+ if (!used_math()) {
+ /* First time FP context user. */
+ err = init_fpu();
+ if (msa && !err)
+ enable_msa();
+ if (!err)
+ set_used_math();
+ return err;
+ }
+
+ /*
+ * This task has formerly used the FP context.
+ *
+ * If this thread has no live MSA vector context then we can simply
+ * restore the scalar FP context. If it has live MSA vector context
+ * (that is, it has or may have used MSA since last performing a
+ * function call) then we'll need to restore the vector context. This
+ * applies even if we're currently only executing a scalar FP
+ * instruction. This is because if we were to later execute an MSA
+ * instruction then we'd either have to:
+ *
+ * - Restore the vector context & clobber any registers modified by
+ * scalar FP instructions between now & then.
+ *
+ * or
+ *
+ * - Not restore the vector context & lose the most significant bits
+ * of all vector registers.
+ *
+ * Neither of those options is acceptable. We cannot restore the least
+ * significant bits of the registers now & only restore the most
+ * significant bits later because the most significant bits of any
+ * vector registers whose aliased FP register is modified now will have
+ * been zeroed. We'd have no way to know that when restoring the vector
+ * context & thus may load an outdated value for the most significant
+ * bits of a vector register.
+ */
+ if (!msa && !thread_msa_context_live())
+ return own_fpu(1);
+
+ /*
+ * This task is using or has previously used MSA. Thus we require
+ * that Status.FR == 1.
+ */
+ was_fpu_owner = is_fpu_owner();
+ err = own_fpu(0);
+ if (err)
+ return err;
+
+ enable_msa();
+ write_msa_csr(current->thread.fpu.msacsr);
+ set_thread_flag(TIF_USEDMSA);
+
+ /*
+ * If this is the first time that the task is using MSA and it has
+ * previously used scalar FP in this time slice then we already nave
+ * FP context which we shouldn't clobber.
+ */
+ if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
+ return 0;
+
+ /* We need to restore the vector context. */
+ restore_msa(current);
+ return 0;
+}
+
asmlinkage void do_cpu(struct pt_regs *regs)
{
enum ctx_state prev_state;
@@ -1153,12 +1241,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
/* Fall through. */
case 1:
- if (used_math()) /* Using the FPU again. */
- err = own_fpu(1);
- else { /* First time FPU user. */
- err = init_fpu();
- set_used_math();
- }
+ err = enable_restore_fp_context(0);
if (!raw_cpu_has_fpu || err) {
int sig;
@@ -1183,6 +1266,37 @@ out:
exception_exit(prev_state);
}
+asmlinkage void do_msa_fpe(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
+ force_sig(SIGFPE, current);
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_msa(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ int err;
+
+ prev_state = exception_enter();
+
+ if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
+ force_sig(SIGILL, current);
+ goto out;
+ }
+
+ die_if_kernel("do_msa invoked from kernel context!", regs);
+
+ err = enable_restore_fp_context(1);
+ if (err)
+ force_sig(SIGILL, current);
+out:
+ exception_exit(prev_state);
+}
+
asmlinkage void do_mdmx(struct pt_regs *regs)
{
enum ctx_state prev_state;
@@ -1337,8 +1451,10 @@ static inline void parity_protection_init(void)
case CPU_34K:
case CPU_74K:
case CPU_1004K:
+ case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
+ case CPU_P5600:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
@@ -2017,6 +2133,7 @@ void __init trap_init(void)
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
+ set_except_vector(14, handle_msa_fpe);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
@@ -2040,6 +2157,7 @@ void __init trap_init(void)
set_except_vector(15, handle_fpe);
set_except_vector(16, handle_ftlb);
+ set_except_vector(21, handle_msa);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)