summaryrefslogtreecommitdiffstats
path: root/arch/arm/common/vlock.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 09:13:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 09:13:19 -0700
commit8546dc1d4b671480961c3eaf4c0c102ae6848340 (patch)
treec646079fb48811b22b742deb6bd2e907f9e6c3d4 /arch/arm/common/vlock.S
parent9992ba72327fa0d8bdc9fb624e80f5cce338a711 (diff)
parent33b9f582c5c1db515412cc7efff28f7d1779321f (diff)
downloadlinux-8546dc1d4b671480961c3eaf4c0c102ae6848340.tar.bz2
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "The major items included in here are: - MCPM, multi-cluster power management, part of the infrastructure required for ARMs big.LITTLE support. - A rework of the ARM KVM code to allow re-use by ARM64. - Error handling cleanups of the IS_ERR_OR_NULL() madness and fixes of that stuff for arch/arm - Preparatory patches for Cortex-M3 support from Uwe Kleine-König. There is also a set of three patches in here from Hugh/Catalin to address freeing of inappropriate page tables on LPAE. You already have these from akpm, but they were already part of my tree at the time he sent them, so unfortunately they'll end up with duplicate commits" * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (77 commits) ARM: EXYNOS: remove unnecessary use of IS_ERR_VALUE() ARM: IMX: remove unnecessary use of IS_ERR_VALUE() ARM: OMAP: use consistent error checking ARM: cleanup: OMAP hwmod error checking ARM: 7709/1: mcpm: Add explicit AFLAGS to support v6/v7 multiplatform kernels ARM: 7700/2: Make cpu_init() notrace ARM: 7702/1: Set the page table freeing ceiling to TASK_SIZE ARM: 7701/1: mm: Allow arch code to control the user page table ceiling ARM: 7703/1: Disable preemption in broadcast_tlb*_a15_erratum() ARM: mcpm: provide an interface to set the SMP ops at run time ARM: mcpm: generic SMP secondary bringup and hotplug support ARM: mcpm_head.S: vlock-based first man election ARM: mcpm: Add baremetal voting mutexes ARM: mcpm: introduce helpers for platform coherency exit/setup ARM: mcpm: introduce the CPU/cluster power API ARM: multi-cluster PM: secondary kernel entry code ARM: cacheflush: add synchronization helpers for mixed cache state accesses ARM: cpu hotplug: remove majority of cache flushing from platforms ARM: smp: flush L1 cache in cpu_die() ARM: tegra: remove tegra specific cpu_disable() ...
Diffstat (limited to 'arch/arm/common/vlock.S')
-rw-r--r--arch/arm/common/vlock.S108
1 files changed, 108 insertions, 0 deletions
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
new file mode 100644
index 000000000000..ff198583f683
--- /dev/null
+++ b/arch/arm/common/vlock.S
@@ -0,0 +1,108 @@
+/*
+ * vlock.S - simple voting lock implementation for ARM
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * This algorithm is described in more detail in
+ * Documentation/arm/vlocks.txt.
+ */
+
+#include <linux/linkage.h>
+#include "vlock.h"
+
+/* Select different code if voting flags can fit in a single word. */
+#if VLOCK_VOTING_SIZE > 4
+#define FEW(x...)
+#define MANY(x...) x
+#else
+#define FEW(x...) x
+#define MANY(x...)
+#endif
+
+@ voting lock for first-man coordination
+
+.macro voting_begin rbase:req, rcpu:req, rscratch:req
+ mov \rscratch, #1
+ strb \rscratch, [\rbase, \rcpu]
+ dmb
+.endm
+
+.macro voting_end rbase:req, rcpu:req, rscratch:req
+ dmb
+ mov \rscratch, #0
+ strb \rscratch, [\rbase, \rcpu]
+ dsb
+ sev
+.endm
+
+/*
+ * The vlock structure must reside in Strongly-Ordered or Device memory.
+ * This implementation deliberately eliminates most of the barriers which
+ * would be required for other memory types, and assumes that independent
+ * writes to neighbouring locations within a cacheline do not interfere
+ * with one another.
+ */
+
+@ r0: lock structure base
+@ r1: CPU ID (0-based index within cluster)
+ENTRY(vlock_trylock)
+ add r1, r1, #VLOCK_VOTING_OFFSET
+
+ voting_begin r0, r1, r2
+
+ ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
+ cmp r2, #VLOCK_OWNER_NONE
+ bne trylock_fail @ fail if so
+
+ @ Control dependency implies strb not observable before previous ldrb.
+
+ strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
+
+ voting_end r0, r1, r2 @ implies DMB
+
+ @ Wait for the current round of voting to finish:
+
+ MANY( mov r3, #VLOCK_VOTING_OFFSET )
+0:
+ MANY( ldr r2, [r0, r3] )
+ FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
+ cmp r2, #0
+ wfene
+ bne 0b
+ MANY( add r3, r3, #4 )
+ MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
+ MANY( bne 0b )
+
+ @ Check who won:
+
+ dmb
+ ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
+ eor r0, r1, r2 @ zero if I won, else nonzero
+ bx lr
+
+trylock_fail:
+ voting_end r0, r1, r2
+ mov r0, #1 @ nonzero indicates that I lost
+ bx lr
+ENDPROC(vlock_trylock)
+
+@ r0: lock structure base
+ENTRY(vlock_unlock)
+ dmb
+ mov r1, #VLOCK_OWNER_NONE
+ strb r1, [r0, #VLOCK_OWNER_OFFSET]
+ dsb
+ sev
+ bx lr
+ENDPROC(vlock_unlock)