diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 14:41:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 14:41:04 -0700 |
commit | 5db6db0d400edd8bec274e34960cfa22838e1df5 (patch) | |
tree | 3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/ia64 | |
parent | 5fab10041b4389b61de7e7a49893190bae686241 (diff) | |
parent | 2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff) | |
download | linux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.bz2 |
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.
Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.
This one sat in -next for weeks. -3KLoC"
* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/extable.h | 11 | ||||
-rw-r--r-- | arch/ia64/include/asm/uaccess.h | 102 | ||||
-rw-r--r-- | arch/ia64/lib/memcpy_mck.S | 13 | ||||
-rw-r--r-- | arch/ia64/mm/extable.c | 5 |
5 files changed, 43 insertions, 89 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 18ca6a9ce566..6a15083cc366 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -52,7 +52,6 @@ config IA64 select MODULES_USE_ELF_RELA select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_AUDITSYSCALL - select HAVE_ARCH_HARDENED_USERCOPY default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/ia64/include/asm/extable.h b/arch/ia64/include/asm/extable.h new file mode 100644 index 000000000000..20376e71eab4 --- /dev/null +++ b/arch/ia64/include/asm/extable.h @@ -0,0 +1,11 @@ +#ifndef _ASM_IA64_EXTABLE_H +#define _ASM_IA64_EXTABLE_H + +#define ARCH_HAS_RELATIVE_EXTABLE + +struct exception_table_entry { + int insn; /* location-relative address of insn this fixup is for */ + int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ +}; + +#endif diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 471044be2a3b..82a7646c4416 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -33,14 +33,13 @@ */ #include <linux/compiler.h> -#include <linux/errno.h> -#include <linux/sched.h> #include <linux/page-flags.h> #include <linux/mm.h> #include <asm/intrinsics.h> #include <asm/pgtable.h> #include <asm/io.h> +#include <asm/extable.h> /* * For historical reasons, the following macros are grossly misnamed: @@ -48,9 +47,6 @@ #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) @@ -63,14 +59,14 @@ * address TASK_SIZE is never valid. We also need to make sure that the address doesn't * point inside the virtually mapped linear page table. */ -#define __access_ok(addr, size, segment) \ -({ \ - __chk_user_ptr(addr); \ - (likely((unsigned long) (addr) <= (segment).seg) \ - && ((segment).seg == KERNEL_DS.seg \ - || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ -}) -#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) +static inline int __access_ok(const void __user *p, unsigned long size) +{ + unsigned long addr = (unsigned long)p; + unsigned long seg = get_fs().seg; + return likely(addr <= seg) && + (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); +} +#define access_ok(type, addr, size) __access_ok((addr), (size)) /* * These are the main single-value transfer routines. They automatically @@ -80,8 +76,8 @@ * (a) re-use the arguments for side effects (sizeof/typeof is ok) * (b) require any knowledge of processes at this stage */ -#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs()) -#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) +#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) /* * The "__xxx" versions do not do address space checking, useful when @@ -184,13 +180,13 @@ extern void __get_user_unknown (void); * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while * using r8/r9. */ -#define __do_get_user(check, x, ptr, size, segment) \ +#define __do_get_user(check, x, ptr, size) \ ({ \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ __typeof__ (size) __gu_size = (size); \ long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ - if (!check || __access_ok(__gu_ptr, size, segment)) \ + if (!check || __access_ok(__gu_ptr, size)) \ switch (__gu_size) { \ case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ @@ -202,8 +198,8 @@ extern void __get_user_unknown (void); __gu_err; \ }) -#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) -#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment) +#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size) +#define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size) extern void __put_user_unknown (void); @@ -211,14 +207,14 @@ extern void __put_user_unknown (void); * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. */ -#define __do_put_user(check, x, ptr, size, segment) \ +#define __do_put_user(check, x, ptr, size) \ ({ \ __typeof__ (x) __pu_x = (x); \ __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ __typeof__ (size) __pu_size = (size); \ long __pu_err = -EFAULT; \ \ - if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \ + if (!check || __access_ok(__pu_ptr, __pu_size)) \ switch (__pu_size) { \ case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ @@ -229,8 +225,8 @@ extern void __put_user_unknown (void); __pu_err; \ }) -#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) -#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment) +#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size) +#define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size) /* * Complex access routines @@ -239,56 +235,19 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use unsigned long count); static inline unsigned long -__copy_to_user (void __user *to, const void *from, unsigned long count) +raw_copy_to_user(void __user *to, const void *from, unsigned long count) { - check_object_size(from, count, true); - return __copy_user(to, (__force void __user *) from, count); } static inline unsigned long -__copy_from_user (void *to, const void __user *from, unsigned long count) +raw_copy_from_user(void *to, const void __user *from, unsigned long count) { - check_object_size(to, count, false); - return __copy_user((__force void __user *) to, from, count); } -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user -#define copy_to_user(to, from, n) \ -({ \ - void __user *__cu_to = (to); \ - const void *__cu_from = (from); \ - long __cu_len = (n); \ - \ - if (__access_ok(__cu_to, __cu_len, get_fs())) { \ - check_object_size(__cu_from, __cu_len, true); \ - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ - } \ - __cu_len; \ -}) - -static inline unsigned long -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - check_object_size(to, n, false); - if (likely(__access_ok(from, n, get_fs()))) - n = __copy_user((__force void __user *) to, from, n); - else - memset(to, 0, n); - return n; -} - -#define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) - -static inline unsigned long -copy_in_user (void __user *to, const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))) - n = __copy_user(to, from, n); - return n; -} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER extern unsigned long __do_clear_user (void __user *, unsigned long); @@ -297,7 +256,7 @@ extern unsigned long __do_clear_user (void __user *, unsigned long); #define clear_user(to, n) \ ({ \ unsigned long __cu_len = (n); \ - if (__access_ok(to, __cu_len, get_fs())) \ + if (__access_ok(to, __cu_len)) \ __cu_len = __do_clear_user(to, __cu_len); \ __cu_len; \ }) @@ -313,7 +272,7 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from, ({ \ const char __user * __sfu_from = (from); \ long __sfu_ret = -EFAULT; \ - if (__access_ok(__sfu_from, 0, get_fs())) \ + if (__access_ok(__sfu_from, 0)) \ __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ __sfu_ret; \ }) @@ -325,7 +284,7 @@ extern unsigned long __strlen_user (const char __user *); ({ \ const char __user *__su_str = (str); \ unsigned long __su_ret = 0; \ - if (__access_ok(__su_str, 0, get_fs())) \ + if (__access_ok(__su_str, 0)) \ __su_ret = __strlen_user(__su_str); \ __su_ret; \ }) @@ -341,18 +300,11 @@ extern unsigned long __strnlen_user (const char __user *, long); ({ \ const char __user *__su_str = (str); \ unsigned long __su_ret = 0; \ - if (__access_ok(__su_str, 0, get_fs())) \ + if (__access_ok(__su_str, 0)) \ __su_ret = __strnlen_user(__su_str, len); \ __su_ret; \ }) -#define ARCH_HAS_RELATIVE_EXTABLE - -struct exception_table_entry { - int insn; /* location-relative address of insn this fixup is for */ - int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ -}; - #define ARCH_HAS_TRANSLATE_MEM_PTR 1 static __inline__ void * xlate_dev_mem_ptr(phys_addr_t p) diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S index b264b6a7967b..bbbadc478f5b 100644 --- a/arch/ia64/lib/memcpy_mck.S +++ b/arch/ia64/lib/memcpy_mck.S @@ -556,9 +556,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ #define D r22 #define F r28 -#define memset_arg0 r32 -#define memset_arg2 r33 - #define saved_retval loc0 #define saved_rtlink loc1 #define saved_pfs_stack loc2 @@ -622,7 +619,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ * (faulting_addr - orig_dst) -> len to faulting st address * B = (cur_dst - orig_dst) -> len copied so far * C = A - B -> len need to be copied - * D = orig_len - A -> len need to be zeroed + * D = orig_len - A -> len need to be left along */ (p6) sub A = F, saved_in0 (p7) sub A = F, saved_in1 @@ -638,9 +635,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ sub D = saved_in2, A ;; cmp.gt p8,p0=C,r0 // more than 1 byte? - add memset_arg0=saved_in0, A -(p6) mov memset_arg2=0 // copy_to_user should not call memset -(p7) mov memset_arg2=D // copy_from_user need to have kbuf zeroed mov r8=0 mov saved_retval = D mov saved_rtlink = b0 @@ -652,11 +646,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ;; add saved_retval=saved_retval,r8 // above might return non-zero value - cmp.gt p8,p0=memset_arg2,r0 // more than 1 byte? - mov out0=memset_arg0 // *s - mov out1=r0 // c - mov out2=memset_arg2 // n -(p8) br.call.sptk.few b0=memset ;; mov retval=saved_retval diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c index 4edb816aba9a..10dd4a66e167 100644 --- a/arch/ia64/mm/extable.c +++ b/arch/ia64/mm/extable.c @@ -5,7 +5,10 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/uaccess.h> +#include <asm/ptrace.h> +#include <asm/extable.h> +#include <asm/errno.h> +#include <asm/processor.h> void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e) |