summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorMatt Redfearn <matt.redfearn@imgtec.com>2016-09-07 10:45:17 +0100
committerRalf Baechle <ralf@linux-mips.org>2016-10-04 16:13:57 +0200
commit929d4f51e6b87900c9179eb62d6b43db6ce4930d (patch)
tree50dfa2a95d187d530d42529e0801f7437c33a289 /arch/mips/kernel
parent15ea26cf510ce07c9397579ce869f7f3713eaae4 (diff)
downloadlinux-929d4f51e6b87900c9179eb62d6b43db6ce4930d.tar.bz2
MIPS: pm-cps: Add MIPSr6 CPU support
This patch adds support for CPUs implementing the MIPSr6 ISA to the CPS power management code. Three changes are necessary: 1. In MIPSr6, coupled coherence is necessary when CPUS implement multiple Virtual Processors (VPs). 2. MIPSr6 virtual processors are more like real cores and cannot yield to other VPs on the same core, so drop the MT ASE yield instruction. 3. To halt a MIPSr6 VP, the CPC VP_STOP register is used rather than the MT ASE TCHalt CP0 register. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Reviewed-by: Paul Burton <paul.burton@imgtec.com> Cc: Adam Buchbinder <adam.buchbinder@gmail.com> Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Kees Cook <keescook@chromium.org> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14225/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/pm-cps.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index b3a7d36ada5a..440e79259566 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -129,7 +129,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
return -EINVAL;
/* Calculate which coupled CPUs (VPEs) are online */
-#ifdef CONFIG_MIPS_MT
+#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
if (cpu_online(cpu)) {
cpumask_and(coupled_mask, cpu_online_mask,
&cpu_sibling_map[cpu]);
@@ -431,7 +431,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_lw(&p, t0, 0, r_nc_count);
uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
uasm_i_ehb(&p);
- uasm_i_yield(&p, zero, t1);
+ if (cpu_has_mipsmt)
+ uasm_i_yield(&p, zero, t1);
uasm_il_b(&p, &r, lbl_poll_cont);
uasm_i_nop(&p);
} else {
@@ -439,8 +440,21 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* The core will lose power & this VPE will not continue
* so it can simply halt here.
*/
- uasm_i_addiu(&p, t0, zero, TCHALT_H);
- uasm_i_mtc0(&p, t0, 2, 4);
+ if (cpu_has_mipsmt) {
+ /* Halt the VPE via C0 tchalt register */
+ uasm_i_addiu(&p, t0, zero, TCHALT_H);
+ uasm_i_mtc0(&p, t0, 2, 4);
+ } else if (cpu_has_vp) {
+ /* Halt the VP via the CPC VP_STOP register */
+ unsigned int vpe_id;
+
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
+ UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
+ uasm_i_sw(&p, t0, 0, t1);
+ } else {
+ BUG();
+ }
uasm_build_label(&l, p, lbl_secondary_hang);
uasm_il_b(&p, &r, lbl_secondary_hang);
uasm_i_nop(&p);