diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 14:41:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 14:41:04 -0700 |
commit | 5db6db0d400edd8bec274e34960cfa22838e1df5 (patch) | |
tree | 3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/xtensa/include | |
parent | 5fab10041b4389b61de7e7a49893190bae686241 (diff) | |
parent | 2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff) | |
download | linux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.bz2 |
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.
Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.
This one sat in -next for weeks. -3KLoC"
* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...
Diffstat (limited to 'arch/xtensa/include')
-rw-r--r-- | arch/xtensa/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/xtensa/include/asm/asm-uaccess.h | 3 | ||||
-rw-r--r-- | arch/xtensa/include/asm/uaccess.h | 67 |
3 files changed, 10 insertions, 61 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index f41408c53fe1..cc23e9ecc6bb 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += dma-contiguous.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fcntl.h generic-y += hardirq.h generic-y += ioctl.h diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h index a7a110039786..dfdf9fae1f84 100644 --- a/arch/xtensa/include/asm/asm-uaccess.h +++ b/arch/xtensa/include/asm/asm-uaccess.h @@ -19,9 +19,6 @@ #include <linux/errno.h> #include <asm/types.h> -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/processor.h> diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 848a3d736bcb..2e7bac0d4b2c 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -16,14 +16,9 @@ #ifndef _XTENSA_UACCESS_H #define _XTENSA_UACCESS_H -#include <linux/errno.h> #include <linux/prefetch.h> #include <asm/types.h> - -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - -#include <linux/sched.h> +#include <asm/extable.h> /* * The fs value determines whether argument validity checking should @@ -43,7 +38,7 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) @@ -239,60 +234,22 @@ __asm__ __volatile__( \ * Copy to/from user space */ -/* - * We use a generic, arbitrary-sized copy subroutine. The Xtensa - * architecture would cause heavy code bloat if we tried to inline - * these functions and provide __constant_copy_* equivalents like the - * i386 versions. __xtensa_copy_user is quite efficient. See the - * .fixup section of __xtensa_copy_user for a discussion on the - * X_zeroing equivalents for Xtensa. - */ - extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); -#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) - static inline unsigned long -__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __copy_user(to, from, n); -} - -static inline unsigned long -__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) -{ - return __copy_user(to, from, n); + prefetchw(to); + return __xtensa_copy_user(to, (__force const void *)from, n); } - static inline unsigned long -__generic_copy_to_user(void *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { prefetch(from); - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to, from, n); - return n; -} - -static inline unsigned long -__generic_copy_from_user(void *to, const void *from, unsigned long n) -{ - prefetchw(to); - if (access_ok(VERIFY_READ, from, n)) - return __copy_user(to, from, n); - else - memset(to, 0, n); - return n; + return __xtensa_copy_user((__force void *)to, from, n); } - -#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) -#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) -#define __copy_to_user(to, from, n) \ - __generic_copy_to_user_nocheck((to), (from), (n)) -#define __copy_from_user(to, from, n) \ - __generic_copy_from_user_nocheck((to), (from), (n)) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * We need to return the number of bytes not cleared. Our memset() @@ -348,10 +305,4 @@ static inline long strnlen_user(const char *str, long len) return __strnlen_user(str, len); } - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - #endif /* _XTENSA_UACCESS_H */ |