diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/mips/kernel/branch.c | 73 | ||||
-rw-r--r-- | arch/mips/kernel/cmpxchg.c | 109 | ||||
-rw-r--r-- | arch/mips/kernel/cps-vec.S | 7 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 24 | ||||
-rw-r--r-- | arch/mips/kernel/mips-cm.c | 40 | ||||
-rw-r--r-- | arch/mips/kernel/module-rela.c | 202 | ||||
-rw-r--r-- | arch/mips/kernel/module.c | 221 | ||||
-rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 14 | ||||
-rw-r--r-- | arch/mips/kernel/proc.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/ptrace.c | 31 | ||||
-rw-r--r-- | arch/mips/kernel/scall32-o32.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-64.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-n32.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-o32.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/setup.c | 40 | ||||
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 35 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/syscall.c | 19 | ||||
-rw-r--r-- | arch/mips/kernel/unaligned.c | 213 |
20 files changed, 636 insertions, 409 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 9a0e37b92ce0..46c0581256f1 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o vmlinux.lds -obj-y += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \ +obj-y += cmpxchg.o cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \ process.o prom.o ptrace.o reset.o setup.o signal.o \ syscall.o time.o topology.o traps.o unaligned.o watch.o \ vdso.o cacheinfo.o @@ -31,7 +31,6 @@ obj-$(CONFIG_SYNC_R4K) += sync-r4k.o obj-$(CONFIG_DEBUG_FS) += segment.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index f702a459a830..b79ed9af9886 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) * * @regs: Pointer to pt_regs * @insn: branch instruction to decode - * @returns: -EFAULT on error and forces SIGBUS, and on success + * @returns: -EFAULT on error and forces SIGILL, and on success * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after * evaluating the branch. * @@ -431,7 +431,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* Fall through */ case jr_op: if (NO_R6EMU && insn.r_format.func == jr_op) - goto sigill_r6; + goto sigill_r2r6; regs->cp0_epc = regs->regs[insn.r_format.rs]; break; } @@ -446,7 +446,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, switch (insn.i_format.rt) { case bltzl_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -459,7 +459,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezl_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -473,10 +473,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bltzal_op: case bltzall_op: if (NO_R6EMU && (insn.i_format.rs || - insn.i_format.rt == bltzall_op)) { - ret = -SIGILL; - break; - } + insn.i_format.rt == bltzall_op)) + goto sigill_r2r6; regs->regs[31] = epc + 8; /* * OK we are here either because we hit a NAL @@ -507,10 +505,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezal_op: case bgezall_op: if (NO_R6EMU && (insn.i_format.rs || - insn.i_format.rt == bgezall_op)) { - ret = -SIGILL; - break; - } + insn.i_format.rt == bgezall_op)) + goto sigill_r2r6; regs->regs[31] = epc + 8; /* * OK we are here either because we hit a BAL @@ -556,6 +552,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* * These are unconditional and in j_format. */ + case jalx_op: case jal_op: regs->regs[31] = regs->cp0_epc + 8; case j_op: @@ -573,7 +570,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, */ case beql_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { @@ -587,7 +584,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bnel_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { @@ -601,7 +598,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case blezl_op: /* not really i_format */ if (!insn.i_format.rt && NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case blez_op: /* * Compact branches for R6 for the @@ -636,7 +633,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgtzl_op: if (!insn.i_format.rt && NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bgtz_op: /* * Compact branches for R6 for the @@ -774,35 +771,27 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, #else case bc6_op: /* Only valid for MIPS R6 */ - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; regs->cp0_epc += 8; break; case balc6_op: - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; /* Compact branch: BALC */ regs->regs[31] = epc + 4; epc += 4 + (insn.i_format.simmediate << 2); regs->cp0_epc = epc; break; case pop66_op: - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; /* Compact branch: BEQZC || JIC */ regs->cp0_epc += 8; break; case pop76_op: - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; /* Compact branch: BNEZC || JIALC */ if (!insn.i_format.rs) { /* JIALC: set $31/ra */ @@ -814,10 +803,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case pop10_op: case pop30_op: /* Only valid for MIPS R6 */ - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; /* * Compact branches: * bovc, beqc, beqzalc, bnvc, bnec, bnezlac @@ -831,12 +818,18 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, return ret; sigill_dsp: - printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); - force_sig(SIGBUS, current); + pr_debug("%s: DSP branch but not DSP ASE - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); + return -EFAULT; +sigill_r2r6: + pr_debug("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); return -EFAULT; sigill_r6: - pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", - current->comm); + pr_debug("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n", + current->comm); force_sig(SIGILL, current); return -EFAULT; } diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c new file mode 100644 index 000000000000..7730f1d3434f --- /dev/null +++ b/arch/mips/kernel/cmpxchg.c @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2017 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/bitops.h> +#include <asm/cmpxchg.h> + +unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size) +{ + u32 old32, new32, load32, mask; + volatile u32 *ptr32; + unsigned int shift; + + /* Check that ptr is naturally aligned */ + WARN_ON((unsigned long)ptr & (size - 1)); + + /* Mask value to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + val &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * exchange within the naturally aligned 4 byte integerthat includes + * it. + */ + shift = (unsigned long)ptr & 0x3; + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + shift ^= sizeof(u32) - size; + shift *= BITS_PER_BYTE; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + load32 = *ptr32; + + do { + old32 = load32; + new32 = (load32 & ~mask) | (val << shift); + load32 = cmpxchg(ptr32, old32, new32); + } while (load32 != old32); + + return (load32 & mask) >> shift; +} + +unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, + unsigned long new, unsigned int size) +{ + u32 mask, old32, new32, load32; + volatile u32 *ptr32; + unsigned int shift; + u8 load; + + /* Check that ptr is naturally aligned */ + WARN_ON((unsigned long)ptr & (size - 1)); + + /* Mask inputs to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + old &= mask; + new &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * compare & exchange within the naturally aligned 4 byte integer + * that includes it. + */ + shift = (unsigned long)ptr & 0x3; + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + shift ^= sizeof(u32) - size; + shift *= BITS_PER_BYTE; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + load32 = *ptr32; + + while (true) { + /* + * Ensure the byte we want to exchange matches the expected + * old value, and if not then bail. + */ + load = (load32 & mask) >> shift; + if (load != old) + return load; + + /* + * Calculate the old & new values of the naturally aligned + * 4 byte integer that include the byte we want to exchange. + * Attempt to exchange the old value for the new value, and + * return if we succeed. + */ + old32 = (load32 & ~mask) | (old << shift); + new32 = (load32 & ~mask) | (new << shift); + load32 = cmpxchg(ptr32, old32, new32); + if (load32 == old32) + return old; + } +} diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index a00e87b0256d..b849fe6aad94 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -22,6 +22,7 @@ #define GCR_CL_COHERENCE_OFS 0x2008 #define GCR_CL_ID_OFS 0x2028 +#define CPC_CL_VC_STOP_OFS 0x2020 #define CPC_CL_VC_RUN_OFS 0x2028 .extern mips_cm_base @@ -376,8 +377,12 @@ LEAF(mips_cps_boot_vpes) PTR_LI t2, UNCAC_BASE PTR_ADD t1, t1, t2 - /* Set VC_RUN to the VPE mask */ + /* Start any other VPs that ought to be running */ PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) + + /* Ensure this VP stops running if it shouldn't be */ + not ta2 + PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) ehb #elif defined(CONFIG_MIPS_MT) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 1aba27786bd5..d08afc7dc507 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -564,6 +564,7 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags) back_to_back_c0_hazard(); break; case CPU_I6400: + case CPU_I6500: /* There's no way to disable the FTLB */ if (!(flags & FTLB_EN)) return 1; @@ -844,6 +845,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) c->options |= MIPS_CPU_MVH; if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP)) c->options |= MIPS_CPU_VP; + if (config5 & MIPS_CONF5_CA2) + c->ases |= MIPS_ASE_MIPS16E2; return config5 & MIPS_CONF_M; } @@ -1635,6 +1638,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_I6400; __cpu_name[cpu] = "MIPS I6400"; break; + case PRID_IMP_I6500: + c->cputype = CPU_I6500; + __cpu_name[cpu] = "MIPS I6500"; + break; case PRID_IMP_M5150: c->cputype = CPU_M5150; __cpu_name[cpu] = "MIPS M5150"; @@ -1648,6 +1655,17 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) decode_configs(c); spram_config(); + + switch (__get_cpu_type(c->cputype)) { + case CPU_I6500: + c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES; + /* fall-through */ + case CPU_I6400: + c->options |= MIPS_CPU_SHARED_FTLB_RAM; + /* fall-through */ + default: + break; + } } static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) @@ -1831,6 +1849,12 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); break; + case PRID_REV_LOONGSON3A_R3: + c->cputype = CPU_LOONGSON3; + __cpu_name[cpu] = "ICT Loongson-3"; + set_elf_platform(cpu, "loongson3a"); + set_isa(c, MIPS_CPU_ISA_M64R2); + break; } decode_configs(c); diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 659e6d3ae335..cb0c57f860d4 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -265,15 +265,34 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp) u32 val; preempt_disable(); - curr_core = current_cpu_data.core; - spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), - per_cpu(cm_core_lock_flags, curr_core)); if (mips_cm_revision() >= CM_REV_CM3) { val = core << CM3_GCR_Cx_OTHER_CORE_SHF; val |= vp << CM3_GCR_Cx_OTHER_VP_SHF; + + /* + * We need to disable interrupts in SMP systems in order to + * ensure that we don't interrupt the caller with code which + * may modify the redirect register. We do so here in a + * slightly obscure way by using a spin lock, since this has + * the neat property of also catching any nested uses of + * mips_cm_lock_other() leading to a deadlock or a nice warning + * with lockdep enabled. + */ + spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), + *this_cpu_ptr(&cm_core_lock_flags)); } else { - BUG_ON(vp != 0); + WARN_ON(vp != 0); + + /* + * We only have a GCR_CL_OTHER per core in systems with + * CM 2.5 & older, so have to ensure other VP(E)s don't + * race with us. + */ + curr_core = current_cpu_data.core; + spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), + per_cpu(cm_core_lock_flags, curr_core)); + val = core << CM_GCR_Cx_OTHER_CORENUM_SHF; } @@ -288,10 +307,17 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp) void mips_cm_unlock_other(void) { - unsigned curr_core = current_cpu_data.core; + unsigned int curr_core; + + if (mips_cm_revision() < CM_REV_CM3) { + curr_core = current_cpu_data.core; + spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), + per_cpu(cm_core_lock_flags, curr_core)); + } else { + spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock), + *this_cpu_ptr(&cm_core_lock_flags)); + } - spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), - per_cpu(cm_core_lock_flags, curr_core)); preempt_enable(); } diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c deleted file mode 100644 index 781168834456..000000000000 --- a/arch/mips/kernel/module-rela.c +++ /dev/null @@ -1,202 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Copyright (C) 2001 Rusty Russell. - * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2005 Thiemo Seufer - * Copyright (C) 2015 Imagination Technologies Ltd. - */ - -#include <linux/elf.h> -#include <linux/err.h> -#include <linux/errno.h> -#include <linux/moduleloader.h> - -extern int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v); - -static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v) -{ - *location = v; - - return 0; -} - -static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v) -{ - if (v % 4) { - pr_err("module %s: dangerous R_MIPS_26 RELA relocation\n", - me->name); - return -ENOEXEC; - } - - if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { - pr_err("module %s: relocation overflow\n", me->name); - return -ENOEXEC; - } - - *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff); - - return 0; -} - -static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) -{ - *location = (*location & 0xffff0000) | - ((((long long) v + 0x8000LL) >> 16) & 0xffff); - - return 0; -} - -static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v) -{ - *location = (*location & 0xffff0000) | (v & 0xffff); - - return 0; -} - -static int apply_r_mips_pc_rela(struct module *me, u32 *location, Elf_Addr v, - unsigned bits) -{ - unsigned long mask = GENMASK(bits - 1, 0); - unsigned long se_bits; - long offset; - - if (v % 4) { - pr_err("module %s: dangerous R_MIPS_PC%u RELA relocation\n", - me->name, bits); - return -ENOEXEC; - } - - offset = ((long)v - (long)location) >> 2; - - /* check the sign bit onwards are identical - ie. we didn't overflow */ - se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0; - if ((offset & ~mask) != (se_bits & ~mask)) { - pr_err("module %s: relocation overflow\n", me->name); - return -ENOEXEC; - } - - *location = (*location & ~mask) | (offset & mask); - - return 0; -} - -static int apply_r_mips_pc16_rela(struct module *me, u32 *location, Elf_Addr v) -{ - return apply_r_mips_pc_rela(me, location, v, 16); -} - -static int apply_r_mips_pc21_rela(struct module *me, u32 *location, Elf_Addr v) -{ - return apply_r_mips_pc_rela(me, location, v, 21); -} - -static int apply_r_mips_pc26_rela(struct module *me, u32 *location, Elf_Addr v) -{ - return apply_r_mips_pc_rela(me, location, v, 26); -} - -static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v) -{ - *(Elf_Addr *)location = v; - - return 0; -} - -static int apply_r_mips_higher_rela(struct module *me, u32 *location, - Elf_Addr v) -{ - *location = (*location & 0xffff0000) | - ((((long long) v + 0x80008000LL) >> 32) & 0xffff); - - return 0; -} - -static int apply_r_mips_highest_rela(struct module *me, u32 *location, - Elf_Addr v) -{ - *location = (*location & 0xffff0000) | - ((((long long) v + 0x800080008000LL) >> 48) & 0xffff); - - return 0; -} - -static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, - Elf_Addr v) = { - [R_MIPS_NONE] = apply_r_mips_none, - [R_MIPS_32] = apply_r_mips_32_rela, - [R_MIPS_26] = apply_r_mips_26_rela, - [R_MIPS_HI16] = apply_r_mips_hi16_rela, - [R_MIPS_LO16] = apply_r_mips_lo16_rela, - [R_MIPS_PC16] = apply_r_mips_pc16_rela, - [R_MIPS_64] = apply_r_mips_64_rela, - [R_MIPS_HIGHER] = apply_r_mips_higher_rela, - [R_MIPS_HIGHEST] = apply_r_mips_highest_rela, - [R_MIPS_PC21_S2] = apply_r_mips_pc21_rela, - [R_MIPS_PC26_S2] = apply_r_mips_pc26_rela, -}; - -int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, - unsigned int symindex, unsigned int relsec, - struct module *me) -{ - Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; - int (*handler)(struct module *me, u32 *location, Elf_Addr v); - Elf_Sym *sym; - u32 *location; - unsigned int i, type; - Elf_Addr v; - int res; - - pr_debug("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); - - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { - /* This is where to make the change */ - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[i].r_offset; - /* This is the symbol it is referring to */ - sym = (Elf_Sym *)sechdrs[symindex].sh_addr - + ELF_MIPS_R_SYM(rel[i]); - if (sym->st_value >= -MAX_ERRNO) { - /* Ignore unresolved weak symbol */ - if (ELF_ST_BIND(sym->st_info) == STB_WEAK) - continue; - pr_warn("%s: Unknown symbol %s\n", - me->name, strtab + sym->st_name); - return -ENOENT; - } - - type = ELF_MIPS_R_TYPE(rel[i]); - - if (type < ARRAY_SIZE(reloc_handlers_rela)) - handler = reloc_handlers_rela[type]; - else - handler = NULL; - - if (!handler) { - pr_err("%s: Unknown relocation type %u\n", - me->name, type); - return -EINVAL; - } - - v = sym->st_value + rel[i].r_addend; - res = handler(me, location, v); - if (res) - return res; - } - - return 0; -} diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 50c020c47e54..491605137b03 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -53,22 +53,25 @@ void *module_alloc(unsigned long size) } #endif -int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_mips_none(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) { return 0; } -static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_mips_32(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) { - *location += v; + *location = base + v; return 0; } -static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_mips_26(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) { if (v % 4) { - pr_err("module %s: dangerous R_MIPS_26 REL relocation\n", + pr_err("module %s: dangerous R_MIPS_26 relocation\n", me->name); return -ENOEXEC; } @@ -80,15 +83,22 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) } *location = (*location & ~0x03ffffff) | - ((*location + (v >> 2)) & 0x03ffffff); + ((base + (v >> 2)) & 0x03ffffff); return 0; } -static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_mips_hi16(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) { struct mips_hi16 *n; + if (rela) { + *location = (*location & 0xffff0000) | + ((((long long) v + 0x8000LL) >> 16) & 0xffff); + return 0; + } + /* * We cannot relocate this one now because we don't know the value of * the carry we need to add. Save the information, and let LO16 do the @@ -117,12 +127,18 @@ static void free_relocation_chain(struct mips_hi16 *l) } } -static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_mips_lo16(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) { - unsigned long insnlo = *location; + unsigned long insnlo = base; struct mips_hi16 *l; Elf_Addr val, vallo; + if (rela) { + *location = (*location & 0xffff0000) | (v & 0xffff); + return 0; + } + /* Sign extend the addend we extract from the lo insn. */ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; @@ -178,26 +194,26 @@ out_danger: free_relocation_chain(l); me->arch.r_mips_hi16_list = NULL; - pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); + pr_err("module %s: dangerous R_MIPS_LO16 relocation\n", me->name); return -ENOEXEC; } -static int apply_r_mips_pc_rel(struct module *me, u32 *location, Elf_Addr v, - unsigned bits) +static int apply_r_mips_pc(struct module *me, u32 *location, u32 base, + Elf_Addr v, unsigned int bits) { unsigned long mask = GENMASK(bits - 1, 0); unsigned long se_bits; long offset; if (v % 4) { - pr_err("module %s: dangerous R_MIPS_PC%u REL relocation\n", + pr_err("module %s: dangerous R_MIPS_PC%u relocation\n", me->name, bits); return -ENOEXEC; } - /* retrieve & sign extend implicit addend */ - offset = *location & mask; + /* retrieve & sign extend implicit addend if any */ + offset = base & mask; offset |= (offset & BIT(bits - 1)) ? ~mask : 0; offset += ((long)v - (long)location) >> 2; @@ -214,99 +230,192 @@ static int apply_r_mips_pc_rel(struct module *me, u32 *location, Elf_Addr v, return 0; } -static int apply_r_mips_pc16_rel(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_mips_pc16(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) +{ + return apply_r_mips_pc(me, location, base, v, 16); +} + +static int apply_r_mips_pc21(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) +{ + return apply_r_mips_pc(me, location, base, v, 21); +} + +static int apply_r_mips_pc26(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) +{ + return apply_r_mips_pc(me, location, base, v, 26); +} + +static int apply_r_mips_64(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) { - return apply_r_mips_pc_rel(me, location, v, 16); + if (WARN_ON(!rela)) + return -EINVAL; + + *(Elf_Addr *)location = v; + + return 0; } -static int apply_r_mips_pc21_rel(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_mips_higher(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) { - return apply_r_mips_pc_rel(me, location, v, 21); + if (WARN_ON(!rela)) + return -EINVAL; + + *location = (*location & 0xffff0000) | + ((((long long)v + 0x80008000LL) >> 32) & 0xffff); + + return 0; } -static int apply_r_mips_pc26_rel(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_mips_highest(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) { - return apply_r_mips_pc_rel(me, location, v, 26); + if (WARN_ON(!rela)) + return -EINVAL; + + *location = (*location & 0xffff0000) | + ((((long long)v + 0x800080008000LL) >> 48) & 0xffff); + + return 0; } -static int (*reloc_handlers_rel[]) (struct module *me, u32 *location, - Elf_Addr v) = { +/** + * reloc_handler() - Apply a particular relocation to a module + * @me: the module to apply the reloc to + * @location: the address at which the reloc is to be applied + * @base: the existing value at location for REL-style; 0 for RELA-style + * @v: the value of the reloc, with addend for RELA-style + * + * Each implemented reloc_handler function applies a particular type of + * relocation to the module @me. Relocs that may be found in either REL or RELA + * variants can be handled by making use of the @base & @v parameters which are + * set to values which abstract the difference away from the particular reloc + * implementations. + * + * Return: 0 upon success, else -ERRNO + */ +typedef int (*reloc_handler)(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela); + +/* The handlers for known reloc types */ +static reloc_handler reloc_handlers[] = { [R_MIPS_NONE] = apply_r_mips_none, - [R_MIPS_32] = apply_r_mips_32_rel, - [R_MIPS_26] = apply_r_mips_26_rel, - [R_MIPS_HI16] = apply_r_mips_hi16_rel, - [R_MIPS_LO16] = apply_r_mips_lo16_rel, - [R_MIPS_PC16] = apply_r_mips_pc16_rel, - [R_MIPS_PC21_S2] = apply_r_mips_pc21_rel, - [R_MIPS_PC26_S2] = apply_r_mips_pc26_rel, + [R_MIPS_32] = apply_r_mips_32, + [R_MIPS_26] = apply_r_mips_26, + [R_MIPS_HI16] = apply_r_mips_hi16, + [R_MIPS_LO16] = apply_r_mips_lo16, + [R_MIPS_PC16] = apply_r_mips_pc16, + [R_MIPS_64] = apply_r_mips_64, + [R_MIPS_HIGHER] = apply_r_mips_higher, + [R_MIPS_HIGHEST] = apply_r_mips_highest, + [R_MIPS_PC21_S2] = apply_r_mips_pc21, + [R_MIPS_PC26_S2] = apply_r_mips_pc26, }; -int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, - unsigned int symindex, unsigned int relsec, - struct module *me) +static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me, bool rela) { - Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr; - int (*handler)(struct module *me, u32 *location, Elf_Addr v); + union { + Elf_Mips_Rel *rel; + Elf_Mips_Rela *rela; + } r; + reloc_handler handler; Elf_Sym *sym; - u32 *location; + u32 *location, base; unsigned int i, type; Elf_Addr v; - int res; + int err = 0; + size_t reloc_sz; pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); + r.rel = (void *)sechdrs[relsec].sh_addr; + reloc_sz = rela ? sizeof(*r.rela) : sizeof(*r.rel); me->arch.r_mips_hi16_list = NULL; - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + for (i = 0; i < sechdrs[relsec].sh_size / reloc_sz; i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[i].r_offset; + + r.rel->r_offset; /* This is the symbol it is referring to */ sym = (Elf_Sym *)sechdrs[symindex].sh_addr - + ELF_MIPS_R_SYM(rel[i]); + + ELF_MIPS_R_SYM(*r.rel); if (sym->st_value >= -MAX_ERRNO) { /* Ignore unresolved weak symbol */ if (ELF_ST_BIND(sym->st_info) == STB_WEAK) continue; pr_warn("%s: Unknown symbol %s\n", me->name, strtab + sym->st_name); - return -ENOENT; + err = -ENOENT; + goto out; } - type = ELF_MIPS_R_TYPE(rel[i]); - - if (type < ARRAY_SIZE(reloc_handlers_rel)) - handler = reloc_handlers_rel[type]; + type = ELF_MIPS_R_TYPE(*r.rel); + if (type < ARRAY_SIZE(reloc_handlers)) + handler = reloc_handlers[type]; else handler = NULL; if (!handler) { pr_err("%s: Unknown relocation type %u\n", me->name, type); - return -EINVAL; + err = -EINVAL; + goto out; } - v = sym->st_value; - res = handler(me, location, v); - if (res) - return res; + if (rela) { + v = sym->st_value + r.rela->r_addend; + base = 0; + r.rela = &r.rela[1]; + } else { + v = sym->st_value; + base = *location; + r.rel = &r.rel[1]; + } + + err = handler(me, location, base, v, rela); + if (err) + goto out; } +out: /* - * Normally the hi16 list should be deallocated at this point. A + * Normally the hi16 list should be deallocated at this point. A * malformed binary however could contain a series of R_MIPS_HI16 - * relocations not followed by a R_MIPS_LO16 relocation. In that - * case, free up the list and return an error. + * relocations not followed by a R_MIPS_LO16 relocation, or if we hit + * an error processing a reloc we might have gotten here before + * reaching the R_MIPS_LO16. In either case, free up the list and + * return an error. */ if (me->arch.r_mips_hi16_list) { free_relocation_chain(me->arch.r_mips_hi16_list); me->arch.r_mips_hi16_list = NULL; - - return -ENOEXEC; + err = err ?: -ENOEXEC; } - return 0; + return err; +} + +int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + return __apply_relocate(sechdrs, strtab, symindex, relsec, me, false); +} + +#ifdef CONFIG_MODULES_USE_ELF_RELA +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + return __apply_relocate(sechdrs, strtab, symindex, relsec, me, true); } +#endif /* CONFIG_MODULES_USE_ELF_RELA */ /* Given an address, look for it in the module exception tables. */ const struct exception_table_entry *search_module_dbetables(unsigned long addr) diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index f3e301f95aef..9e6c74bf66c4 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -814,7 +814,7 @@ static const struct mips_perf_event mipsxxcore_event_map2 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, }; -static const struct mips_perf_event i6400_event_map[PERF_COUNT_HW_MAX] = { +static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD }, /* These only count dcache, not icache */ @@ -1014,7 +1014,7 @@ static const struct mips_perf_event mipsxxcore_cache_map2 }, }; -static const struct mips_perf_event i6400_cache_map +static const struct mips_perf_event i6x00_cache_map [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -1610,6 +1610,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) #endif break; case CPU_I6400: + case CPU_I6500: /* 8-bit event numbers */ base_id = config & 0xff; raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; @@ -1770,8 +1771,13 @@ init_hw_perf_events(void) break; case CPU_I6400: mipspmu.name = "mips/I6400"; - mipspmu.general_event_map = &i6400_event_map; - mipspmu.cache_event_map = &i6400_cache_map; + mipspmu.general_event_map = &i6x00_event_map; + mipspmu.cache_event_map = &i6x00_cache_map; + break; + case CPU_I6500: + mipspmu.name = "mips/I6500"; + mipspmu.general_event_map = &i6x00_event_map; + mipspmu.cache_event_map = &i6x00_cache_map; break; case CPU_1004K: mipspmu.name = "mips/1004K"; diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 4eff2aed7360..70604c753aa4 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) } seq_printf(m, "isa\t\t\t:"); - if (cpu_has_mips_r1) + if (cpu_has_mips_1) seq_printf(m, " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); @@ -109,6 +109,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "ASEs implemented\t:"); if (cpu_has_mips16) seq_printf(m, "%s", " mips16"); + if (cpu_has_mips16e2) seq_printf(m, "%s", " mips16e2"); if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx"); if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d"); if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips"); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 6931fe722a0b..6dd13641a418 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -868,14 +868,39 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) tracehook_report_syscall_entry(regs)) return -1; - if (secure_computing(NULL) == -1) - return -1; +#ifdef CONFIG_SECCOMP + if (unlikely(test_thread_flag(TIF_SECCOMP))) { + int ret, i; + struct seccomp_data sd; + + sd.nr = syscall; + sd.arch = syscall_get_arch(); + for (i = 0; i < 6; i++) { + unsigned long v, r; + + r = mips_get_syscall_arg(&v, current, regs, i); + sd.args[i] = r ? 0 : v; + } + sd.instruction_pointer = KSTK_EIP(current); + + ret = __secure_computing(&sd); + if (ret == -1) + return ret; + } +#endif if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); + + /* + * Negative syscall numbers are mistaken for rejected syscalls, but + * won't have had the return value set appropriately, so we do so now. + */ + if (syscall < 0) + syscall_set_return_value(current, regs, -ENOSYS, 0); return syscall; } @@ -895,7 +920,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_exit(regs, regs->regs[2]); + trace_sys_exit(regs, regs_return_value(regs)); if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 80ed68b2c95e..27c2f90eeb21 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -371,7 +371,7 @@ EXPORT(sys_call_table) PTR sys_writev PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_ni_syscall /* 4150 */ PTR sys_getsid PTR sys_fdatasync diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 49765b44aa9b..65d5aeeb9bdb 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -311,7 +311,7 @@ EXPORT(sys_call_table) PTR sys_sched_getaffinity PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_io_setup /* 5200 */ PTR sys_io_destroy PTR sys_io_getevents diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 90bad2d1b2d3..cbf190ef9e8a 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -302,7 +302,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_sched_getaffinity PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR compat_sys_io_setup /* 6200 */ PTR sys_io_destroy PTR compat_sys_io_getevents diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 2dd70bd104e1..c30bc520885f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -371,7 +371,7 @@ EXPORT(sys32_call_table) PTR compat_sys_writev PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_ni_syscall /* 4150 */ PTR sys_getsid PTR sys_fdatasync diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 01d1dbde5fbf..fe3939726765 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -670,6 +670,46 @@ static int __init early_parse_mem(char *p) } early_param("mem", early_parse_mem); +static int __init early_parse_memmap(char *p) +{ + char *oldp; + u64 start_at, mem_size; + + if (!p) + return -EINVAL; + + if (!strncmp(p, "exactmap", 8)) { + pr_err("\"memmap=exactmap\" invalid on MIPS\n"); + return 0; + } + + oldp = p; + mem_size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') { + start_at = memparse(p+1, &p); + add_memory_region(start_at, mem_size, BOOT_MEM_RAM); + } else if (*p == '#') { + pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n"); + return -EINVAL; + } else if (*p == '$') { + start_at = memparse(p+1, &p); + add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED); + } else { + pr_err("\"memmap\" invalid format!\n"); + return -EINVAL; + } + + if (*p == '\0') { + usermem = 1; + return 0; + } else + return -EINVAL; +} +early_param("memmap", early_parse_memmap); + #ifdef CONFIG_PROC_VMCORE unsigned long setup_elfcorehdr, setup_elfcorehdr_size; static int __init early_parse_elfcorehdr(char *p) diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 36954ddd0b9f..f832e99ad4c3 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -142,9 +142,11 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) /* Warn the user if the CCA prevents multi-core */ ncores = mips_cm_numcores(); - if (cca_unsuitable && ncores > 1) { - pr_warn("Using only one core due to unsuitable CCA 0x%x\n", - cca); + if ((cca_unsuitable || cpu_has_dc_aliases) && ncores > 1) { + pr_warn("Using only one core due to %s%s%s\n", + cca_unsuitable ? "unsuitable CCA" : "", + (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", + cpu_has_dc_aliases ? "dcache aliasing" : ""); for_each_present_cpu(c) { if (cpu_data[c].core) @@ -488,6 +490,7 @@ static void cps_cpu_die(unsigned int cpu) { unsigned core = cpu_data[cpu].core; unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); + ktime_t fail_time; unsigned stat; int err; @@ -514,6 +517,7 @@ static void cps_cpu_die(unsigned int cpu) * state, the latter happening when a JTAG probe is connected * in which case the CPC will refuse to power down the core. */ + fail_time = ktime_add_ms(ktime_get(), 2000); do { mips_cm_lock_other(core, 0); mips_cpc_lock_other(core); @@ -521,9 +525,28 @@ static void cps_cpu_die(unsigned int cpu) stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; mips_cpc_unlock_other(); mips_cm_unlock_other(); - } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && - stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && - stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); + + if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 || + stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 || + stat == CPC_Cx_STAT_CONF_SEQSTATE_U2) + break; + + /* + * The core ought to have powered down, but didn't & + * now we don't really know what state it's in. It's + * likely that its _pwr_up pin has been wired to logic + * 1 & it powered back up as soon as we powered it + * down... + * + * The best we can do is warn the user & continue in + * the hope that the core is doing nothing harmful & + * might behave properly if we online it later. + */ + if (WARN(ktime_after(ktime_get(), fail_time), + "CPU%u hasn't powered down, seq. state %u\n", + cpu, stat >> CPC_Cx_STAT_CONF_SEQSTATE_SHF)) + break; + } while (1); /* Indicate the core is powered off */ bitmap_clear(core_power, core, 1); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index aba1afb64b62..770d4d1516cb 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -335,6 +335,9 @@ int mips_smp_ipi_free(const struct cpumask *mask) static int __init mips_smp_ipi_init(void) { + if (num_possible_cpus() == 1) + return 0; + mips_smp_ipi_allocate(cpu_possible_mask); call_desc = irq_to_desc(call_virq); diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dfa7f5796c7..58c6f634b550 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -29,6 +29,7 @@ #include <linux/sched/task_stack.h> #include <asm/asm.h> +#include <asm/asm-eva.h> #include <asm/branch.h> #include <asm/cachectl.h> #include <asm/cacheflush.h> @@ -131,16 +132,14 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) __asm__ __volatile__ ( " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" - "1: ll %[old], (%[addr]) \n" + "1: \n" + user_ll("%[old]", "(%[addr])") " move %[tmp], %[new] \n" - "2: sc %[tmp], (%[addr]) \n" - " bnez %[tmp], 4f \n" + "2: \n" + user_sc("%[tmp]", "(%[addr])") + " beqz %[tmp], 1b \n" "3: \n" " .insn \n" - " .subsection 2 \n" - "4: b 1b \n" - " .previous \n" - " \n" " .section .fixup,\"ax\" \n" "5: li %[err], %[efault] \n" " j 3b \n" @@ -192,6 +191,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) unreachable(); } +/* + * mips_atomic_set() normally returns directly via syscall_exit potentially + * clobbering static registers, so be sure to preserve them. + */ +save_static_function(sys_sysmips); + SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2) { switch (cmd) { diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index f806ee56e639..5eaf2578ac04 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -939,88 +939,114 @@ static void emulate_load_store_insn(struct pt_regs *regs, * The remaining opcodes are the ones that are really of * interest. */ -#ifdef CONFIG_EVA case spec3_op: - /* - * we can land here only from kernel accessing user memory, - * so we need to "switch" the address limit to user space, so - * address check can work properly. - */ - seg = get_fs(); - set_fs(USER_DS); - switch (insn.spec3_format.func) { - case lhe_op: - if (!access_ok(VERIFY_READ, addr, 2)) { - set_fs(seg); - goto sigbus; - } - LoadHWE(addr, value, res); - if (res) { - set_fs(seg); - goto fault; - } - compute_return_epc(regs); - regs->regs[insn.spec3_format.rt] = value; - break; - case lwe_op: - if (!access_ok(VERIFY_READ, addr, 4)) { - set_fs(seg); - goto sigbus; + if (insn.dsp_format.func == lx_op) { + switch (insn.dsp_format.op) { + case lwx_op: + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + LoadW(addr, value, res); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.dsp_format.rd] = value; + break; + case lhx_op: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + LoadHW(addr, value, res); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.dsp_format.rd] = value; + break; + default: + goto sigill; } + } +#ifdef CONFIG_EVA + else { + /* + * we can land here only from kernel accessing user + * memory, so we need to "switch" the address limit to + * user space, so that address check can work properly. + */ + seg = get_fs(); + set_fs(USER_DS); + switch (insn.spec3_format.func) { + case lhe_op: + if (!access_ok(VERIFY_READ, addr, 2)) { + set_fs(seg); + goto sigbus; + } + LoadHWE(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + compute_return_epc(regs); + regs->regs[insn.spec3_format.rt] = value; + break; + case lwe_op: + if (!access_ok(VERIFY_READ, addr, 4)) { + set_fs(seg); + goto sigbus; + } LoadWE(addr, value, res); - if (res) { - set_fs(seg); - goto fault; - } - compute_return_epc(regs); - regs->regs[insn.spec3_format.rt] = value; - break; - case lhue_op: - if (!access_ok(VERIFY_READ, addr, 2)) { - set_fs(seg); - goto sigbus; - } - LoadHWUE(addr, value, res); - if (res) { - set_fs(seg); - goto fault; - } - compute_return_epc(regs); - regs->regs[insn.spec3_format.rt] = value; - break; - case she_op: - if (!access_ok(VERIFY_WRITE, addr, 2)) { - set_fs(seg); - goto sigbus; - } - compute_return_epc(regs); - value = regs->regs[insn.spec3_format.rt]; - StoreHWE(addr, value, res); - if (res) { - set_fs(seg); - goto fault; - } - break; - case swe_op: - if (!access_ok(VERIFY_WRITE, addr, 4)) { - set_fs(seg); - goto sigbus; - } - compute_return_epc(regs); - value = regs->regs[insn.spec3_format.rt]; - StoreWE(addr, value, res); - if (res) { + if (res) { + set_fs(seg); + goto fault; + } + compute_return_epc(regs); + regs->regs[insn.spec3_format.rt] = value; + break; + case lhue_op: + if (!access_ok(VERIFY_READ, addr, 2)) { + set_fs(seg); + goto sigbus; + } + LoadHWUE(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + compute_return_epc(regs); + regs->regs[insn.spec3_format.rt] = value; + break; + case she_op: + if (!access_ok(VERIFY_WRITE, addr, 2)) { + set_fs(seg); + goto sigbus; + } + compute_return_epc(regs); + value = regs->regs[insn.spec3_format.rt]; + StoreHWE(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + break; + case swe_op: + if (!access_ok(VERIFY_WRITE, addr, 4)) { + set_fs(seg); + goto sigbus; + } + compute_return_epc(regs); + value = regs->regs[insn.spec3_format.rt]; + StoreWE(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + break; + default: set_fs(seg); - goto fault; + goto sigill; } - break; - default: set_fs(seg); - goto sigill; } - set_fs(seg); - break; #endif + break; case lh_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; @@ -1984,6 +2010,8 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) u16 __user *pc16; unsigned long origpc; union mips16e_instruction mips16inst, oldinst; + unsigned int opcode; + int extended = 0; origpc = regs->cp0_epc; orig31 = regs->regs[31]; @@ -1996,6 +2024,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) /* skip EXTEND instruction */ if (mips16inst.ri.opcode == MIPS16e_extend_op) { + extended = 1; pc16++; __get_user(mips16inst.full, pc16); } else if (delay_slot(regs)) { @@ -2008,7 +2037,8 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) goto sigbus; } - switch (mips16inst.ri.opcode) { + opcode = mips16inst.ri.opcode; + switch (opcode) { case MIPS16e_i64_op: /* I64 or RI64 instruction */ switch (mips16inst.i64.func) { /* I64/RI64 func field check */ case MIPS16e_ldpc_func: @@ -2028,9 +2058,40 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) goto sigbus; case MIPS16e_swsp_op: + reg = reg16to32[mips16inst.ri.rx]; + if (extended && cpu_has_mips16e2) + switch (mips16inst.ri.imm >> 5) { + case 0: /* SWSP */ + case 1: /* SWGP */ + break; + case 2: /* SHGP */ + opcode = MIPS16e_sh_op; + break; + default: + goto sigbus; + } + break; + case MIPS16e_lwpc_op: + reg = reg16to32[mips16inst.ri.rx]; + break; + case MIPS16e_lwsp_op: reg = reg16to32[mips16inst.ri.rx]; + if (extended && cpu_has_mips16e2) + switch (mips16inst.ri.imm >> 5) { + case 0: /* LWSP */ + case 1: /* LWGP */ + break; + case 2: /* LHGP */ + opcode = MIPS16e_lh_op; + break; + case 4: /* LHUGP */ + opcode = MIPS16e_lhu_op; + break; + default: + goto sigbus; + } break; case MIPS16e_i8_op: @@ -2044,7 +2105,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) break; } - switch (mips16inst.ri.opcode) { + switch (opcode) { case MIPS16e_lb_op: case MIPS16e_lbu_op: |