summaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include/asm/spinlock.h')
-rw-r--r--arch/arc/include/asm/spinlock.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index a325e6a36523..47efc8451b70 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -247,9 +247,15 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ex %0, [%1] \n"
+#ifdef CONFIG_EZNPS_MTM_EXT
+ " .word %3 \n"
+#endif
" breq %0, %2, 1b \n"
: "+&r" (val)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+#ifdef CONFIG_EZNPS_MTM_EXT
+ , "i"(CTOP_INST_SCHD_RW)
+#endif
: "memory");
/*
@@ -291,6 +297,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
*/
smp_mb();
+ /*
+ * EX is not really required here, a simple STore of 0 suffices.
+ * However this causes tasklist livelocks in SystemC based SMP virtual
+ * platforms where the systemc core scheduler uses EX as a cue for
+ * moving to next core. Do a git log of this file for details
+ */
__asm__ __volatile__(
" ex %0, [%1] \n"
: "+r" (val)