From 72113d0a7d90d950c7c9a87ab905bffb6bc5752d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 10 Feb 2022 11:11:24 +0900 Subject: signal.h: add linux/signal.h and asm/signal.h to UAPI compile-test coverage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit linux/signal.h and asm/signal.h are currently excluded from the UAPI compile-test because of the errors like follows: HDRTEST usr/include/asm/signal.h In file included from : ./usr/include/asm/signal.h:103:9: error: unknown type name ‘size_t’ 103 | size_t ss_size; | ^~~~~~ The errors can be fixed by replacing size_t with __kernel_size_t. Then, remove the no-header-test entries from user/include/Makefile. Signed-off-by: Masahiro Yamada Reviewed-by: Arnd Bergmann Reviewed-by: Geert Uytterhoeven Acked-by: Geert Uytterhoeven Signed-off-by: Arnd Bergmann --- arch/ia64/include/uapi/asm/signal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/uapi/asm/signal.h b/arch/ia64/include/uapi/asm/signal.h index 38166a88e4c9..63d574e802a2 100644 --- a/arch/ia64/include/uapi/asm/signal.h +++ b/arch/ia64/include/uapi/asm/signal.h @@ -90,7 +90,7 @@ struct siginfo; typedef struct sigaltstack { void __user *ss_sp; int ss_flags; - size_t ss_size; + __kernel_size_t ss_size; } stack_t; -- cgit v1.2.3 From 12700c17fc286149324f92d6d380bc48e43f253d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 15 Feb 2022 17:55:04 +0100 Subject: uaccess: generalize access_ok() There are many different ways that access_ok() is defined across architectures, but in the end, they all just compare against the user_addr_max() value or they accept anything. Provide one definition that works for most architectures, checking against TASK_SIZE_MAX for user processes or skipping the check inside of uaccess_kernel() sections. For architectures without CONFIG_SET_FS(), this should be the fastest check, as it comes down to a single comparison of a pointer against a compile-time constant, while the architecture specific versions tend to do something more complex for historic reasons or get something wrong. Type checking for __user annotations is handled inconsistently across architectures, but this is easily simplified as well by using an inline function that takes a 'const void __user *' argument. A handful of callers need an extra __user annotation for this. Some architectures had trick to use 33-bit or 65-bit arithmetic on the addresses to calculate the overflow, however this simpler version uses fewer registers, which means it can produce better object code in the end despite needing a second (statically predicted) branch. Reviewed-by: Christoph Hellwig Acked-by: Mark Rutland [arm64, asm-generic] Acked-by: Geert Uytterhoeven Acked-by: Stafford Horne Acked-by: Dinh Nguyen Signed-off-by: Arnd Bergmann --- arch/Kconfig | 7 ++++ arch/alpha/include/asm/uaccess.h | 34 +++----------------- arch/arc/include/asm/uaccess.h | 29 ----------------- arch/arm/include/asm/uaccess.h | 20 +----------- arch/arm64/include/asm/uaccess.h | 11 +++---- arch/csky/include/asm/uaccess.h | 8 ----- arch/hexagon/include/asm/uaccess.h | 25 --------------- arch/ia64/include/asm/uaccess.h | 5 ++- arch/m68k/Kconfig.cpu | 1 + arch/m68k/include/asm/uaccess.h | 15 +-------- arch/microblaze/include/asm/uaccess.h | 8 +---- arch/mips/include/asm/uaccess.h | 29 +---------------- arch/nds32/include/asm/uaccess.h | 7 ++-- arch/nios2/include/asm/uaccess.h | 11 +------ arch/openrisc/include/asm/uaccess.h | 19 +---------- arch/parisc/Kconfig | 1 + arch/parisc/include/asm/uaccess.h | 12 ++----- arch/powerpc/include/asm/uaccess.h | 11 +------ arch/riscv/include/asm/uaccess.h | 31 +----------------- arch/s390/Kconfig | 1 + arch/s390/include/asm/uaccess.h | 14 +------- arch/sh/include/asm/uaccess.h | 22 ++----------- arch/sparc/Kconfig | 1 + arch/sparc/include/asm/uaccess.h | 3 -- arch/sparc/include/asm/uaccess_32.h | 18 +++-------- arch/sparc/include/asm/uaccess_64.h | 12 +------ arch/um/include/asm/uaccess.h | 5 +-- arch/x86/include/asm/uaccess.h | 14 ++------ arch/xtensa/include/asm/uaccess.h | 10 +----- include/asm-generic/access_ok.h | 60 +++++++++++++++++++++++++++++++++++ include/asm-generic/uaccess.h | 21 +----------- include/linux/uaccess.h | 7 ---- 32 files changed, 110 insertions(+), 362 deletions(-) create mode 100644 include/asm-generic/access_ok.h (limited to 'arch/ia64') diff --git a/arch/Kconfig b/arch/Kconfig index 678a80713b21..fa5db36bda67 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -898,6 +898,13 @@ config HAVE_SOFTIRQ_ON_OWN_STACK Architecture provides a function to run __do_softirq() on a separate stack. +config ALTERNATE_USER_ADDRESS_SPACE + bool + help + Architectures set this when the CPU uses separate address + spaces for kernel and user space pointers. In this case, the + access_ok() check on a __user pointer is skipped. + config PGTABLE_LEVELS int default 2 diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 1b6f25efa247..82c5743fc9cd 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -20,28 +20,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -/* - * Is a address valid? This does a straightforward calculation rather - * than tests. - * - * Address valid if: - * - "addr" doesn't have any high-bits set - * - AND "size" doesn't have any high-bits set - * - AND "addr+size-(size != 0)" doesn't have any high-bits set - * - OR we are in kernel mode. - */ -#define __access_ok(addr, size) ({ \ - unsigned long __ao_a = (addr), __ao_b = (size); \ - unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ - (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; }) - -#define access_ok(addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __access_ok(((unsigned long)(addr)), (size)); \ -}) +#include /* * These are the main single-value transfer routines. They automatically @@ -105,7 +84,7 @@ extern void __get_user_unknown(void); long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - if (__access_ok((unsigned long)__gu_addr, size)) { \ + if (__access_ok(__gu_addr, size)) { \ __gu_err = 0; \ switch (size) { \ case 1: __get_user_8(__gu_addr); break; \ @@ -200,7 +179,7 @@ extern void __put_user_unknown(void); ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (__access_ok((unsigned long)__pu_addr, size)) { \ + if (__access_ok(__pu_addr, size)) { \ __pu_err = 0; \ switch (size) { \ case 1: __put_user_8(x, __pu_addr); break; \ @@ -316,17 +295,14 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long len) extern long __clear_user(void __user *to, long len); -extern inline long +static inline long clear_user(void __user *to, long len) { - if (__access_ok((unsigned long)to, len)) + if (__access_ok(to, len)) len = __clear_user(to, len); return len; } -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 783bfdb3bfa3..30f80b4be2ab 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -23,35 +23,6 @@ #include /* for generic string functions */ - -#define __kernel_ok (uaccess_kernel()) - -/* - * Algorithmically, for __user_ok() we want do: - * (start < TASK_SIZE) && (start+len < TASK_SIZE) - * where TASK_SIZE could either be retrieved from thread_info->addr_limit or - * emitted directly in code. - * - * This can however be rewritten as follows: - * (len <= TASK_SIZE) && (start+len < TASK_SIZE) - * - * Because it essentially checks if buffer end is within limit and @len is - * non-ngeative, which implies that buffer start will be within limit too. - * - * The reason for rewriting being, for majority of cases, @len is generally - * compile time constant, causing first sub-expression to be compile time - * subsumed. - * - * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10), - * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem - * would already have been done at this call site for __kernel_ok() - * - */ -#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ - ((addr) <= (get_fs() - (sz)))) -#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ - likely(__user_ok((addr), (sz)))) - /*********** Single byte/hword/word copies ******************/ #define __get_user_fn(sz, u, k) \ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index d20d78c34b94..2fcbec9c306c 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -55,21 +55,6 @@ extern int __put_user_bad(void); #ifdef CONFIG_MMU -/* - * We use 33-bit arithmetic here. Success returns zero, failure returns - * addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS, - * so this will always return success in that case. - */ -#define __range_ok(addr, size) ({ \ - unsigned long flag, roksum; \ - __chk_user_ptr(addr); \ - __asm__(".syntax unified\n" \ - "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \ - : "=&r" (flag), "=&r" (roksum) \ - : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \ - : "cc"); \ - flag; }) - /* * This is a type: either unsigned long, if the argument fits into * that type, or otherwise unsigned long long. @@ -241,15 +226,12 @@ extern int __put_user_8(void *, unsigned long long); #else /* CONFIG_MMU */ -#define __addr_ok(addr) ((void)(addr), 1) -#define __range_ok(addr, size) ((void)(addr), 0) - #define get_user(x, p) __get_user(x, p) #define __put_user_check __put_user_nocheck #endif /* CONFIG_MMU */ -#define access_ok(addr, size) (__range_ok(addr, size) == 0) +#include #ifdef CONFIG_CPU_SPECTRE /* diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 199c553b740a..e8dce0cc5eaa 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -26,13 +26,7 @@ #include #include -static inline int __access_ok(const void __user *ptr, unsigned long size) -{ - unsigned long limit = TASK_SIZE_MAX; - unsigned long addr = (unsigned long)ptr; - - return (size <= limit) && (addr <= (limit - size)); -} +static inline int __access_ok(const void __user *ptr, unsigned long size); /* * Test whether a block of memory is a valid user space address. @@ -54,6 +48,9 @@ static inline int access_ok(const void __user *addr, unsigned long size) return likely(__access_ok(addr, size)); } +#define access_ok access_ok + +#include /* * User access enabling/disabling. diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h index ac5a54f57d40..fec8f77ffc99 100644 --- a/arch/csky/include/asm/uaccess.h +++ b/arch/csky/include/asm/uaccess.h @@ -5,14 +5,6 @@ #define user_addr_max() (current_thread_info()->addr_limit.seg) -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - unsigned long limit = user_addr_max(); - - return (size <= limit) && (addr <= (limit - size)); -} -#define __access_ok __access_ok - /* * __put_user_fn */ diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index 719ba3f3c45c..bff77efc0d9a 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -12,31 +12,6 @@ */ #include -/* - * access_ok: - Checks if a user space pointer is valid - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block *may* be valid, false (zero) - * if it is definitely invalid. - * - */ -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) - -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - unsigned long limit = TASK_SIZE; - - return (size <= limit) && (addr <= (limit - size)); -} -#define __access_ok __access_ok - /* * When a kernel-mode page fault is taken, the faulting instruction * address is checked against a table of exception_table_entries. diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index e19d2dcc0ced..e242a3cc1330 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -50,8 +50,6 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - /* * When accessing user memory, we need to make sure the entire area really is in * user-level space. In order to do this efficiently, we make sure that the page at @@ -65,7 +63,8 @@ static inline int __access_ok(const void __user *p, unsigned long size) return likely(addr <= seg) && (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); } -#define access_ok(addr, size) __access_ok((addr), (size)) +#define __access_ok __access_ok +#include /* * These are the main single-value transfer routines. They automatically diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 0d00ef5117dc..16ea9a67723c 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -453,6 +453,7 @@ config CPU_HAS_NO_UNALIGNED config CPU_HAS_ADDRESS_SPACES bool + select ALTERNATE_USER_ADDRESS_SPACE config FPU bool diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h index 9f0f1b6e14ed..64914872a5c9 100644 --- a/arch/m68k/include/asm/uaccess.h +++ b/arch/m68k/include/asm/uaccess.h @@ -10,20 +10,7 @@ #include #include #include - -/* We let the MMU do all checking */ -static inline int access_ok(const void __user *ptr, - unsigned long size) -{ - unsigned long limit = TASK_SIZE; - unsigned long addr = (unsigned long)ptr; - - if (IS_ENABLED(CONFIG_CPU_HAS_ADDRESS_SPACES) || - !IS_ENABLED(CONFIG_MMU)) - return 1; - - return (size <= limit) && (addr <= (limit - size)); -} +#include /* * Not all varients of the 68k family support the notion of address spaces. diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 3fe96979d2c6..bf9b7657a65a 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -39,13 +39,7 @@ # define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - unsigned long limit = user_addr_max(); - - return (size <= limit) && (addr <= (limit - size)); -} -#define access_ok(addr, size) __access_ok((unsigned long)addr, size) +#include # define __FIXUP_SECTION ".section .fixup,\"ax\"\n" # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 73e543bc2e0e..c0cede273c7c 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -44,34 +44,7 @@ extern u64 __ua_limit; #endif /* CONFIG_64BIT */ -/* - * access_ok: - Checks if a user space pointer is valid - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block may be valid, false (zero) - * if it is definitely invalid. - * - * Note that, depending on architecture, this function probably just - * checks that the pointer is in the user space range - after calling - * this function, memory access functions may still return -EFAULT. - */ - -static inline int __access_ok(const void __user *p, unsigned long size) -{ - unsigned long addr = (unsigned long)p; - unsigned long limit = TASK_SIZE_MAX; - - return (size <= limit) && (addr <= (limit - size)); -} - -#define access_ok(addr, size) \ - likely(__access_ok((addr), (size))) +#include /* * put_user: - Write a simple value into user space. diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 37a40981deb3..832d642a4068 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -38,18 +38,15 @@ extern int fixup_exception(struct pt_regs *regs); #define get_fs() (current_thread_info()->addr_limit) #define user_addr_max get_fs +#define uaccess_kernel() (get_fs() == KERNEL_DS) static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; } -#define uaccess_kernel() (get_fs() == KERNEL_DS) +#include -#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) - -#define access_ok(addr, size) \ - __range_ok((unsigned long)addr, (unsigned long)size) /* * Single-value transfer routines. They automatically use the right * size if we just have the right pointer type. Note that the functions diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index a5cbe07cf0da..6664ddc0e8e5 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -30,19 +30,10 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -#define __access_ok(addr, len) \ - (((signed long)(((long)get_fs().seg) & \ - ((long)(addr) | (((long)(addr)) + (len)) | (len)))) == 0) - -#define access_ok(addr, len) \ - likely(__access_ok((unsigned long)(addr), (unsigned long)(len))) +#include # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) - /* * Zero Userspace */ diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 120f5005461b..8f049ec99b3e 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -45,21 +45,7 @@ #define uaccess_kernel() (get_fs() == KERNEL_DS) -/* Ensure that the range from addr to addr+size is all within the process' - * address space - */ -static inline int __range_ok(unsigned long addr, unsigned long size) -{ - const mm_segment_t fs = get_fs(); - - return size <= fs && addr <= (fs - size); -} - -#define access_ok(addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __range_ok((unsigned long)(addr), (size)); \ -}) +#include /* * These are the main single-value transfer routines. They automatically @@ -268,9 +254,6 @@ clear_user(void __user *addr, unsigned long size) return size; } -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 43c1c880def6..15039fdd5413 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config PARISC def_bool y + select ALTERNATE_USER_ADDRESS_SPACE select ARCH_32BIT_OFF_T if !64BIT select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_FUNCTION_TRACER diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 0925bbd6db67..187f4bdff13e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -11,15 +11,9 @@ #include #include -/* - * Note that since kernel addresses are in a separate address space on - * parisc, we don't need to do anything for access_ok(). - * We just let the page fault handler do the right thing. This also means - * that put_user is the same as __put_user, etc. - */ - -#define access_ok(uaddr, size) \ - ( (uaddr) == (uaddr) ) +#define TASK_SIZE_MAX DEFAULT_TASK_SIZE +#include +#include #define put_user __put_user #define get_user __get_user diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index a0032c2e7550..2e83217f52de 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -11,18 +11,9 @@ #ifdef __powerpc64__ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ #define TASK_SIZE_MAX TASK_SIZE_USER64 -#else -#define TASK_SIZE_MAX TASK_SIZE #endif -static inline bool __access_ok(unsigned long addr, unsigned long size) -{ - return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr; -} - -#define access_ok(addr, size) \ - (__chk_user_ptr(addr), \ - __access_ok((unsigned long)(addr), (size))) +#include /* * These are the main single-value transfer routines. They automatically diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 4407b9e48d2c..855450bed9f5 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -21,42 +21,13 @@ #include #include #include +#include #define __enable_user_access() \ __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory") #define __disable_user_access() \ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory") -/** - * access_ok: - Checks if a user space pointer is valid - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block may be valid, false (zero) - * if it is definitely invalid. - * - * Note that, depending on architecture, this function probably just - * checks that the pointer is in the user space range - after calling - * this function, memory access functions may still return -EFAULT. - */ -#define access_ok(addr, size) ({ \ - __chk_user_ptr(addr); \ - likely(__access_ok((unsigned long __force)(addr), (size))); \ -}) - -/* - * Ensure that the range [addr, addr+size) is within the process's - * address space - */ -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - return size <= TASK_SIZE && addr <= TASK_SIZE - size; -} - /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index be9f39fd06df..fb48a62aa985 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -55,6 +55,7 @@ config S390 # Note: keep this list sorted alphabetically # imply IMA_SECURE_AND_OR_TRUSTED_BOOT + select ALTERNATE_USER_ADDRESS_SPACE select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 29332edf46f0..5cb258cd9d29 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -17,22 +17,10 @@ #include #include #include +#include void debug_user_asce(int exit); -static inline int __range_ok(unsigned long addr, unsigned long size) -{ - return 1; -} - -#define __access_ok(addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __range_ok((unsigned long)(addr), (size)); \ -}) - -#define access_ok(addr, size) __access_ok(addr, size) - unsigned long __must_check raw_copy_from_user(void *to, const void __user *from, unsigned long n); diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 8867bb04b00e..ccd219d74851 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -5,28 +5,10 @@ #include #include -#define __addr_ok(addr) \ - ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg) - -/* - * __access_ok: Check if address with size is OK or not. - * - * Uhhuh, this needs 33-bit arithmetic. We have a carry.. - * - * sum := addr + size; carry? --> flag = true; - * if (sum >= addr_limit) flag = true; - */ -#define __access_ok(addr, size) ({ \ - unsigned long __ao_a = (addr), __ao_b = (size); \ - unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ - __ao_end >= __ao_a && __addr_ok(__ao_end); }) - -#define access_ok(addr, size) \ - (__chk_user_ptr(addr), \ - __access_ok((unsigned long __force)(addr), (size))) - #define user_addr_max() (current_thread_info()->addr_limit.seg) +#include + /* * Uh, these should become the main single-value transfer routines ... * They automatically use the right size if we just have the right diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 1cab1b284f1a..9f6f9bce5292 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -62,6 +62,7 @@ config SPARC32 config SPARC64 def_bool 64BIT + select ALTERNATE_USER_ADDRESS_SPACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_KRETPROBES diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index 390094200fc4..ee75f69e3fcd 100644 --- a/arch/sparc/include/asm/uaccess.h +++ b/arch/sparc/include/asm/uaccess.h @@ -10,9 +10,6 @@ #include #endif -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - long strncpy_from_user(char *dest, const char __user *src, long count); #endif diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 4a12346bb69c..367747116260 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -25,17 +25,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test - * can be fairly lightweight. - * No one can read/write anything from userland in the kernel space by setting - * large size and address near to PAGE_OFFSET - a fault will break his intentions. - */ -#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) -#define __kernel_ok (uaccess_kernel()) -#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) -#define access_ok(addr, size) __access_ok((unsigned long)(addr), size) +#include /* Uh, these should become the main single-value transfer routines.. * They automatically use the right size if we just have the right @@ -47,13 +37,13 @@ * and hide all the ugliness from the user. */ #define put_user(x, ptr) ({ \ - unsigned long __pu_addr = (unsigned long)(ptr); \ + void __user *__pu_addr = (ptr); \ __chk_user_ptr(ptr); \ __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \ }) #define get_user(x, ptr) ({ \ - unsigned long __gu_addr = (unsigned long)(ptr); \ + const void __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \ }) @@ -232,7 +222,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size) static inline unsigned long clear_user(void __user *addr, unsigned long n) { - if (n && __access_ok((unsigned long) addr, n)) + if (n && __access_ok(addr, n)) return __clear_user(addr, n); else return n; diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 5c12fb46bc61..59b9a545df23 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -31,7 +31,7 @@ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) +#include #define set_fs(val) \ do { \ @@ -61,16 +61,6 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ }) -static inline int __access_ok(const void __user * addr, unsigned long size) -{ - return 1; -} - -static inline int access_ok(const void __user * addr, unsigned long size) -{ - return 1; -} - void __retl_efault(void); /* Uh, these should become the main single-value transfer routines.. diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 1ecfc96bcc50..7d9d60e41e4e 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -25,7 +25,7 @@ extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __clear_user(void __user *mem, unsigned long len); -static inline int __access_ok(unsigned long addr, unsigned long size); +static inline int __access_ok(const void __user *ptr, unsigned long size); /* Teach asm-generic/uaccess.h that we have C functions for these. */ #define __access_ok __access_ok @@ -36,8 +36,9 @@ static inline int __access_ok(unsigned long addr, unsigned long size); #include -static inline int __access_ok(unsigned long addr, unsigned long size) +static inline int __access_ok(const void __user *ptr, unsigned long size) { + unsigned long addr = (unsigned long)ptr; return __addr_range_nowrap(addr, size) && (__under_task_size(addr, size) || __access_ok_vsyscall(addr, size)); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 201efcec66b7..f78e2b3501a1 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -12,18 +12,6 @@ #include #include -/* - * Test whether a block of memory is a valid user space address. - * Returns 0 if the range is valid, nonzero otherwise. - */ -static inline bool __access_ok(void __user *ptr, unsigned long size) -{ - unsigned long limit = TASK_SIZE_MAX; - unsigned long addr = ptr; - - return (size <= limit) && (addr <= (limit - size)); -} - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline bool pagefault_disabled(void); # define WARN_ON_IN_IRQ() \ @@ -55,6 +43,8 @@ static inline bool pagefault_disabled(void); likely(__access_ok(addr, size)); \ }) +#include + extern int __get_user_1(void); extern int __get_user_2(void); extern int __get_user_4(void); diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 75bd8fbf52ba..0edd9e4b23d0 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -35,15 +35,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -#define __kernel_ok (uaccess_kernel()) -#define __user_ok(addr, size) \ - (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) -#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) -#define access_ok(addr, size) __access_ok((unsigned long)(addr), (size)) - -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) +#include /* * These are the main single-value transfer routines. They diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h new file mode 100644 index 000000000000..d38cc5dad65b --- /dev/null +++ b/include/asm-generic/access_ok.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_ACCESS_OK_H__ +#define __ASM_GENERIC_ACCESS_OK_H__ + +/* + * Checking whether a pointer is valid for user space access. + * These definitions work on most architectures, but overrides can + * be used where necessary. + */ + +/* + * architectures with compat tasks have a variable TASK_SIZE and should + * override this to a constant. + */ +#ifndef TASK_SIZE_MAX +#define TASK_SIZE_MAX TASK_SIZE +#endif + +#ifndef uaccess_kernel +#ifdef CONFIG_SET_FS +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) +#else +#define uaccess_kernel() (0) +#endif +#endif + +#ifndef user_addr_max +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE_MAX) +#endif + +#ifndef __access_ok +/* + * 'size' is a compile-time constant for most callers, so optimize for + * this case to turn the check into a single comparison against a constant + * limit and catch all possible overflows. + * On architectures with separate user address space (m68k, s390, parisc, + * sparc64) or those without an MMU, this should always return true. + * + * This version was originally contributed by Jonas Bonn for the + * OpenRISC architecture, and was found to be the most efficient + * for constant 'size' and 'limit' values. + */ +static inline int __access_ok(const void __user *ptr, unsigned long size) +{ + unsigned long limit = user_addr_max(); + unsigned long addr = (unsigned long)ptr; + + if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) || + !IS_ENABLED(CONFIG_MMU)) + return true; + + return (size <= limit) && (addr <= (limit - size)); +} +#endif + +#ifndef access_ok +#define access_ok(addr, size) likely(__access_ok(addr, size)) +#endif + +#endif diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 0870fa11a7c5..ebc685dc8d74 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -114,28 +114,9 @@ static inline void set_fs(mm_segment_t fs) } #endif -#ifndef uaccess_kernel -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -#endif - -#ifndef user_addr_max -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) -#endif - #endif /* CONFIG_SET_FS */ -#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) - -/* - * The architecture should really override this if possible, at least - * doing a check on the get_fs() - */ -#ifndef __access_ok -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - return 1; -} -#endif +#include /* * These are the main single-value transfer routines. They automatically diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 67e9bc94dc40..2c31667e62e0 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -33,13 +33,6 @@ typedef struct { /* empty dummy */ } mm_segment_t; -#ifndef TASK_SIZE_MAX -#define TASK_SIZE_MAX TASK_SIZE -#endif - -#define uaccess_kernel() (false) -#define user_addr_max() (TASK_SIZE_MAX) - static inline mm_segment_t force_uaccess_begin(void) { return (mm_segment_t) { }; -- cgit v1.2.3 From e5ef21d139957b8d064fefb7db42d1525605dd98 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 11 Feb 2022 21:35:55 +0100 Subject: ia64: remove CONFIG_SET_FS support ia64 only uses set_fs() in one file to handle unaligned access for both user space and kernel instructions. Rewrite this to explicitly pass around a flag about which one it is and drop the feature from the architecture. Signed-off-by: Arnd Bergmann --- arch/ia64/Kconfig | 1 - arch/ia64/include/asm/processor.h | 4 --- arch/ia64/include/asm/thread_info.h | 2 -- arch/ia64/include/asm/uaccess.h | 21 ++++--------- arch/ia64/kernel/unaligned.c | 60 ++++++++++++++++++++++++------------- 5 files changed, 45 insertions(+), 43 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index a7e01573abd8..6b6a35b3d959 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -61,7 +61,6 @@ config IA64 select NEED_SG_DMA_LENGTH select NUMA if !FLATMEM select PCI_MSI_ARCH_FALLBACKS if PCI_MSI - select SET_FS select ZONE_DMA32 default y help diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 45365c2ef598..7cbce290f4e5 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -243,10 +243,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); extern void print_cpu_info (struct cpuinfo_ia64 *); -typedef struct { - unsigned long seg; -} mm_segment_t; - #define SET_UNALIGN_CTL(task,value) \ ({ \ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 51d20cb37706..ef83493e6778 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -27,7 +27,6 @@ struct thread_info { __u32 cpu; /* current CPU */ __u32 last_cpu; /* Last CPU thread ran on */ __u32 status; /* Thread synchronous flags */ - mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE __u64 utime; @@ -48,7 +47,6 @@ struct thread_info { .task = &tsk, \ .flags = 0, \ .cpu = 0, \ - .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ } diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index e242a3cc1330..60adadeb3e9e 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -42,26 +42,17 @@ #include /* - * For historical reasons, the following macros are grossly misnamed: - */ -#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ -#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -/* - * When accessing user memory, we need to make sure the entire area really is in - * user-level space. In order to do this efficiently, we make sure that the page at - * address TASK_SIZE is never valid. We also need to make sure that the address doesn't + * When accessing user memory, we need to make sure the entire area really is + * in user-level space. We also need to make sure that the address doesn't * point inside the virtually mapped linear page table. */ static inline int __access_ok(const void __user *p, unsigned long size) { + unsigned long limit = TASK_SIZE; unsigned long addr = (unsigned long)p; - unsigned long seg = get_fs().seg; - return likely(addr <= seg) && - (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); + + return likely((size <= limit) && (addr <= (limit - size)) && + likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); } #define __access_ok __access_ok #include diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6c1a8951dfbb..0acb5a0cd7ab 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -749,9 +749,25 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi } } +static int emulate_store(unsigned long ifa, void *val, int len, bool kernel_mode) +{ + if (kernel_mode) + return copy_to_kernel_nofault((void *)ifa, val, len); + + return copy_to_user((void __user *)ifa, val, len); +} + +static int emulate_load(void *val, unsigned long ifa, int len, bool kernel_mode) +{ + if (kernel_mode) + return copy_from_kernel_nofault(val, (void *)ifa, len); + + return copy_from_user(val, (void __user *)ifa, len); +} static int -emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs, + bool kernel_mode) { unsigned int len = 1 << ld.x6_sz; unsigned long val = 0; @@ -774,7 +790,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) return -1; } /* this assumes little-endian byte-order: */ - if (copy_from_user(&val, (void __user *) ifa, len)) + if (emulate_load(&val, ifa, len, kernel_mode)) return -1; setreg(ld.r1, val, 0, regs); @@ -872,7 +888,8 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) } static int -emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs, + bool kernel_mode) { unsigned long r2; unsigned int len = 1 << ld.x6_sz; @@ -901,7 +918,7 @@ emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) } /* this assumes little-endian byte-order: */ - if (copy_to_user((void __user *) ifa, &r2, len)) + if (emulate_store(ifa, &r2, len, kernel_mode)) return -1; /* @@ -1021,7 +1038,7 @@ float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final) } static int -emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode) { struct ia64_fpreg fpr_init[2]; struct ia64_fpreg fpr_final[2]; @@ -1050,8 +1067,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs * This assumes little-endian byte-order. Note that there is no "ldfpe" * instruction: */ - if (copy_from_user(&fpr_init[0], (void __user *) ifa, len) - || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len)) + if (emulate_load(&fpr_init[0], ifa, len, kernel_mode) + || emulate_load(&fpr_init[1], (ifa + len), len, kernel_mode)) return -1; DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz); @@ -1126,7 +1143,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs static int -emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs, + bool kernel_mode) { struct ia64_fpreg fpr_init; struct ia64_fpreg fpr_final; @@ -1152,7 +1170,7 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) * See comments in ldX for descriptions on how the various loads are handled. */ if (ld.x6_op != 0x2) { - if (copy_from_user(&fpr_init, (void __user *) ifa, len)) + if (emulate_load(&fpr_init, ifa, len, kernel_mode)) return -1; DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz); @@ -1202,7 +1220,8 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) static int -emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs, + bool kernel_mode) { struct ia64_fpreg fpr_init; struct ia64_fpreg fpr_final; @@ -1244,7 +1263,7 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) DDUMP("fpr_init =", &fpr_init, len); DDUMP("fpr_final =", &fpr_final, len); - if (copy_to_user((void __user *) ifa, &fpr_final, len)) + if (emulate_store(ifa, &fpr_final, len, kernel_mode)) return -1; /* @@ -1295,7 +1314,6 @@ void ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) { struct ia64_psr *ipsr = ia64_psr(regs); - mm_segment_t old_fs = get_fs(); unsigned long bundle[2]; unsigned long opcode; const struct exception_table_entry *eh = NULL; @@ -1304,6 +1322,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) load_store_t insn; } u; int ret = -1; + bool kernel_mode = false; if (ia64_psr(regs)->be) { /* we don't support big-endian accesses */ @@ -1367,13 +1386,13 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (unaligned_dump_stack) dump_stack(); } - set_fs(KERNEL_DS); + kernel_mode = true; } DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n", regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it); - if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16)) + if (emulate_load(bundle, regs->cr_iip, 16, kernel_mode)) goto failure; /* @@ -1467,7 +1486,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) case LDCCLR_IMM_OP: case LDCNC_IMM_OP: case LDCCLRACQ_IMM_OP: - ret = emulate_load_int(ifa, u.insn, regs); + ret = emulate_load_int(ifa, u.insn, regs, kernel_mode); break; case ST_OP: @@ -1478,7 +1497,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) fallthrough; case ST_IMM_OP: case STREL_IMM_OP: - ret = emulate_store_int(ifa, u.insn, regs); + ret = emulate_store_int(ifa, u.insn, regs, kernel_mode); break; case LDF_OP: @@ -1486,21 +1505,21 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) case LDFCCLR_OP: case LDFCNC_OP: if (u.insn.x) - ret = emulate_load_floatpair(ifa, u.insn, regs); + ret = emulate_load_floatpair(ifa, u.insn, regs, kernel_mode); else - ret = emulate_load_float(ifa, u.insn, regs); + ret = emulate_load_float(ifa, u.insn, regs, kernel_mode); break; case LDF_IMM_OP: case LDFA_IMM_OP: case LDFCCLR_IMM_OP: case LDFCNC_IMM_OP: - ret = emulate_load_float(ifa, u.insn, regs); + ret = emulate_load_float(ifa, u.insn, regs, kernel_mode); break; case STF_OP: case STF_IMM_OP: - ret = emulate_store_float(ifa, u.insn, regs); + ret = emulate_store_float(ifa, u.insn, regs, kernel_mode); break; default: @@ -1521,7 +1540,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip); done: - set_fs(old_fs); /* restore original address limit */ return; failure: -- cgit v1.2.3