summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/uaccess.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2020-12-02 13:15:53 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2020-12-02 19:49:11 +0000
commitfc703d80130b1c9d6783f4cbb9516fd5fe4a750d (patch)
tree81934957fb9bb197ea0da8614dbd45d0387043ff /arch/arm64/include/asm/uaccess.h
parentf253d827f33cb5a5990b5cfd271941d1a21ecd85 (diff)
downloadlinux-fc703d80130b1c9d6783f4cbb9516fd5fe4a750d.tar.bz2
arm64: uaccess: split user/kernel routines
This patch separates arm64's user and kernel memory access primitives into distinct routines, adding new __{get,put}_kernel_nofault() helpers to access kernel memory, upon which core code builds larger copy routines. The kernel access routines (using LDR/STR) are not affected by PAN (when legitimately accessing kernel memory), nor are they affected by UAO. Switching to KERNEL_DS may set UAO, but this does not adversely affect the kernel access routines. The user access routines (using LDTR/STTR) are not affected by PAN (when legitimately accessing user memory), but are affected by UAO. As these are only legitimate to use under USER_DS with UAO clear, this should not be problematic. Routines performing atomics to user memory (futex and deprecated instruction emulation) still need to transiently clear PAN, and these are left as-is. These are never used on kernel memory. Subsequent patches will refactor the uaccess helpers to remove redundant code, and will also remove the redundant PAN/UAO manipulation. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201202131558.39270-8-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/uaccess.h')
-rw-r--r--arch/arm64/include/asm/uaccess.h64
1 files changed, 38 insertions, 26 deletions
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 743f209d3fab..eb04f02299fe 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -24,6 +24,8 @@
#include <asm/memory.h>
#include <asm/extable.h>
+#define HAVE_GET_KERNEL_NOFAULT
+
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
@@ -253,10 +255,9 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \
+#define __get_mem_asm(load, reg, x, addr, err) \
asm volatile( \
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
- alt_instr " " reg "1, [%2]\n", feature) \
+ "1: " load " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup, \"ax\"\n" \
" .align 2\n" \
@@ -268,25 +269,21 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT))
-#define __raw_get_mem(x, ptr, err) \
+#define __raw_get_mem(ldr, x, ptr, err) \
do { \
unsigned long __gu_val; \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_mem_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
break; \
case 2: \
- __get_mem_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
break; \
case 4: \
- __get_mem_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
break; \
case 8: \
- __get_mem_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
@@ -298,7 +295,7 @@ do { \
do { \
__chk_user_ptr(ptr); \
uaccess_enable_not_uao(); \
- __raw_get_mem(x, ptr, err); \
+ __raw_get_mem("ldtr", x, ptr, err); \
uaccess_disable_not_uao(); \
} while (0)
@@ -323,10 +320,19 @@ do { \
#define get_user __get_user
-#define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ int __gkn_err = 0; \
+ \
+ __raw_get_mem("ldr", *((type *)(dst)), \
+ (__force type *)(src), __gkn_err); \
+ if (unlikely(__gkn_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_mem_asm(store, reg, x, addr, err) \
asm volatile( \
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
- alt_instr " " reg "1, [%2]\n", feature) \
+ "1: " store " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -337,25 +343,21 @@ do { \
: "+r" (err) \
: "r" (x), "r" (addr), "i" (-EFAULT))
-#define __raw_put_mem(x, ptr, err) \
+#define __raw_put_mem(str, x, ptr, err) \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
switch (sizeof(*(ptr))) { \
case 1: \
- __put_mem_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
break; \
case 2: \
- __put_mem_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
break; \
case 4: \
- __put_mem_asm("str", "sttr", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
break; \
case 8: \
- __put_mem_asm("str", "sttr", "%x", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
@@ -366,7 +368,7 @@ do { \
do { \
__chk_user_ptr(ptr); \
uaccess_enable_not_uao(); \
- __raw_put_mem(x, ptr, err); \
+ __raw_put_mem("sttr", x, ptr, err); \
uaccess_disable_not_uao(); \
} while (0)
@@ -391,6 +393,16 @@ do { \
#define put_user __put_user
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ int __pkn_err = 0; \
+ \
+ __raw_put_mem("str", *((type *)(src)), \
+ (__force type *)(dst), __pkn_err); \
+ if (unlikely(__pkn_err)) \
+ goto err_label; \
+} while(0)
+
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user(to, from, n) \
({ \