summaryrefslogtreecommitdiffstats
path: root/arch/arm/lib
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-08-19 20:40:41 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-08-26 20:34:24 +0100
commita5e090acbf545c0a3b04080f8a488b17ec41fe02 (patch)
treeae603e2e67bcac0564b2eba0a7771f8c5cebf352 /arch/arm/lib
parent2190fed67ba6f3e8129513929f2395843645e928 (diff)
downloadlinux-a5e090acbf545c0a3b04080f8a488b17ec41fe02.tar.bz2
ARM: software-based priviledged-no-access support
Provide a software-based implementation of the priviledged no access support found in ARMv8.1. Userspace pages are mapped using a different domain number from the kernel and IO mappings. If we switch the user domain to "no access" when we enter the kernel, we can prevent the kernel from touching userspace. However, the kernel needs to be able to access userspace via the various user accessor functions. With the wrapping in the previous patch, we can temporarily enable access when the kernel needs user access, and re-disable it afterwards. This allows us to trap non-intended accesses to userspace, eg, caused by an inadvertent dereference of the LIST_POISON* values, which, with appropriate user mappings setup, can be made to succeed. This in turn can allow use-after-free bugs to be further exploited than would otherwise be possible. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/lib')
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S14
1 files changed, 14 insertions, 0 deletions
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e61f89..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
.text
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ .macro save_regs
+ mrc p15, 0, ip, c3, c0, 0
+ stmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ uaccess_enable ip
+ .endm
+
+ .macro load_regs
+ ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ mcr p15, 0, ip, c3, c0, 0
+ ret lr
+ .endm
+#else
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
@@ -24,6 +37,7 @@
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
+#endif
.macro load1b, reg1
ldrusr \reg1, r0, 1