summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorMatt Redfearn <matt.redfearn@imgtec.com>2016-09-07 10:45:14 +0100
committerRalf Baechle <ralf@linux-mips.org>2016-10-04 16:13:57 +0200
commit85e540be7549c8eda90f056d30534be8f58777a7 (patch)
tree5da5844913701b981c161d9c6375b4040ffb0df5 /arch/mips/kernel
parent6622ada354ba2c865c6ee1854e130c3abb430808 (diff)
downloadlinux-85e540be7549c8eda90f056d30534be8f58777a7.tar.bz2
MIPS: pm-cps: Use MIPS standard lightweight ordering barrier
Since R2 of the MIPS architecture, SYNC(0x10) has been an optional but architecturally defined ordering barrier. If a CPU does not implement it, the arch specifies that it must fall back to SYNC(0). In places where we require that the instruction stream not be reordered, but do not require that loads / stores are gloablly completed, use the defined standard sync stype. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Reviewed-by: Paul Burton <paul.burton@imgtec.com> Cc: Adam Buchbinder <adam.buchbinder@gmail.com> Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14221/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/pm-cps.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 7e8d4aa22233..d7037fe00d1c 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -76,7 +76,6 @@ static struct uasm_reloc relocs[32] __initdata;
/* CPU dependant sync types */
static unsigned stype_intervention;
static unsigned stype_memory;
-static unsigned stype_ordering;
enum mips_reg {
zero, at, v0, v1, a0, a1, a2, a3,
@@ -406,7 +405,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
if (coupled_coherence) {
/* Increment ready_count */
- uasm_i_sync(&p, stype_ordering);
+ uasm_i_sync(&p, STYPE_SYNC_MB);
uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, 1);
@@ -415,7 +414,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_addiu(&p, t1, t1, 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
- uasm_i_sync(&p, stype_ordering);
+ uasm_i_sync(&p, STYPE_SYNC_MB);
/*
* If this is the last VPE to become ready for non-coherence
@@ -568,7 +567,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
/* Decrement ready_count */
uasm_build_label(&l, p, lbl_decready);
- uasm_i_sync(&p, stype_ordering);
+ uasm_i_sync(&p, STYPE_SYNC_MB);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, -1);
uasm_i_sc(&p, t2, 0, r_nc_count);
@@ -576,7 +575,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
- uasm_i_sync(&p, stype_ordering);
+ uasm_i_sync(&p, STYPE_SYNC_MB);
}
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
@@ -598,7 +597,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_build_label(&l, p, lbl_secondary_cont);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
- uasm_i_sync(&p, stype_ordering);
+ uasm_i_sync(&p, STYPE_SYNC_MB);
}
/* The core is coherent, time to return to C code */
@@ -677,7 +676,6 @@ static int __init cps_pm_init(void)
case CPU_I6400:
stype_intervention = 0x2;
stype_memory = 0x3;
- stype_ordering = 0x10;
break;
default: