summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/sync_bitops.h
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2012-08-08 16:34:01 +0000
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>2012-08-08 16:34:01 +0000
commite54d2f61528165bbe0e5f628b111eab3be31c3b5 (patch)
treef8233f60976e66c3e09ee5f2bf76da5d3f3c0477 /arch/arm/include/asm/sync_bitops.h
parent36a67abce227af005b4a595735556942b74f6741 (diff)
downloadlinux-e54d2f61528165bbe0e5f628b111eab3be31c3b5.tar.bz2
xen/arm: sync_bitops
sync_bitops functions are equivalent to the SMP implementation of the original functions, independently from CONFIG_SMP being defined. We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But under Xen you might be communicating with a completely external entity who might be on another CPU (e.g. two uniprocessor guests communicating via event channels and grant tables). So we need a variant of the bit ops which are SMP safe even on a UP kernel. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/arm/include/asm/sync_bitops.h')
-rw-r--r--arch/arm/include/asm/sync_bitops.h27
1 files changed, 27 insertions, 0 deletions
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
new file mode 100644
index 000000000000..63479eecbf76
--- /dev/null
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_SYNC_BITOPS_H__
+#define __ASM_SYNC_BITOPS_H__
+
+#include <asm/bitops.h>
+#include <asm/system.h>
+
+/* sync_bitops functions are equivalent to the SMP implementation of the
+ * original functions, independently from CONFIG_SMP being defined.
+ *
+ * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But
+ * under Xen you might be communicating with a completely external entity
+ * who might be on another CPU (e.g. two uniprocessor guests communicating
+ * via event channels and grant tables). So we need a variant of the bit
+ * ops which are SMP safe even on a UP kernel.
+ */
+
+#define sync_set_bit(nr, p) _set_bit(nr, p)
+#define sync_clear_bit(nr, p) _clear_bit(nr, p)
+#define sync_change_bit(nr, p) _change_bit(nr, p)
+#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
+#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
+#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
+#define sync_test_bit(nr, addr) test_bit(nr, addr)
+#define sync_cmpxchg cmpxchg
+
+
+#endif