summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/uaccess.h
diff options
context:
space:
mode:
authorVincent Whitchurch <vincent.whitchurch@axis.com>2018-11-09 10:12:30 +0100
committerRussell King <rmk+kernel@armlinux.org.uk>2018-11-12 10:52:04 +0000
commit344eb5539abf3e0b6ce22568c03e86450073e097 (patch)
tree7ed1b8d9e68977e903e738c518e64cecb5936b8a /arch/arm/include/asm/uaccess.h
parentf441882a5229ffaef61a47bccd4518f7e2749cbc (diff)
downloadlinux-344eb5539abf3e0b6ce22568c03e86450073e097.tar.bz2
ARM: 8813/1: Make aligned 2-byte getuser()/putuser() atomic on ARMv6+
getuser() and putuser() (and there underscored variants) use two strb[t]/ldrb[t] instructions when they are asked to get/put 16-bits. This means that the read/write is not atomic even when performed to a 16-bit-aligned address. This leads to problems with vhost: vhost uses __getuser() to read the vring's 16-bit avail.index field, and if it happens to observe a partial update of the index, wrong descriptors will be used which will lead to a breakdown of the virtio communication. A similar problem exists for __putuser() which is used to write to the vring's used.index field. The reason these functions use strb[t]/ldrb[t] is because strht/ldrht instructions did not exist until ARMv6T2/ARMv7. So we should be easily able to fix this on ARMv7. Also, since all ARMv6 processors also don't actually use the unprivileged instructions anymore for uaccess (since CONFIG_CPU_USE_DOMAINS is not used) we can easily fix them too. Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/uaccess.h')
-rw-r--r--arch/arm/include/asm/uaccess.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index c136eef8f690..6390a40f16e7 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -349,6 +349,13 @@ do { \
#define __get_user_asm_byte(x, addr, err) \
__get_user_asm(x, addr, err, ldrb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err) \
+ __get_user_asm(x, addr, err, ldrh)
+
+#else
+
#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \
({ \
@@ -367,6 +374,8 @@ do { \
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
#endif
@@ -442,6 +451,13 @@ do { \
#define __put_user_asm_byte(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, strb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err) \
+ __put_user_asm(x, __pu_addr, err, strh)
+
+#else
+
#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \
({ \
@@ -458,6 +474,8 @@ do { \
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __put_user_asm_word(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, str)