diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2017-03-25 19:33:21 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2017-03-29 12:06:28 -0400 |
commit | beba3a20bf90ce1b93e24592c3ebf0d0bb581bbe (patch) | |
tree | 065a869987df6450138fd091c9d215e2d2fca498 /arch/x86/lib | |
parent | a41e0d754240fe8ca9c4f2070bf67e3b0228aa22 (diff) | |
download | linux-beba3a20bf90ce1b93e24592c3ebf0d0bb581bbe.tar.bz2 |
x86: switch to RAW_COPY_USER
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/usercopy.c | 55 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 170 |
2 files changed, 3 insertions, 222 deletions
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index a851f3d199c2..c8c6ad0d58b8 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -4,12 +4,9 @@ * For licencing details see kernel-base/COPYING */ -#include <linux/highmem.h> +#include <linux/uaccess.h> #include <linux/export.h> -#include <asm/word-at-a-time.h> -#include <linux/sched.h> - /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. @@ -34,53 +31,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) return ret; } EXPORT_SYMBOL_GPL(copy_from_user_nmi); - -/** - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -unsigned long _copy_to_user(void __user *to, const void *from, unsigned n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; -} -EXPORT_SYMBOL(_copy_to_user); - -/** - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -unsigned long _copy_from_user(void *to, const void __user *from, unsigned n) -{ - unsigned long res = n; - if (access_ok(VERIFY_READ, from, n)) - res = __copy_from_user_inatomic(to, from, n); - if (unlikely(res)) - memset(to + n - res, 0, res); - return res; -} -EXPORT_SYMBOL(_copy_from_user); diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 02aa7aa8b9f3..bd057a4ffe6e 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -5,12 +5,7 @@ * Copyright 1997 Andi Kleen <ak@muc.de> * Copyright 1997 Linus Torvalds */ -#include <linux/mm.h> -#include <linux/highmem.h> -#include <linux/blkdev.h> #include <linux/export.h> -#include <linux/backing-dev.h> -#include <linux/interrupt.h> #include <linux/uaccess.h> #include <asm/mmx.h> #include <asm/asm.h> @@ -201,98 +196,6 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) return size; } -static unsigned long -__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) -{ - int d0, d1; - __asm__ __volatile__( - " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" - " cmpl $67, %0\n" - " jbe 2f\n" - "1: movl 64(%4), %%eax\n" - " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" - " movl %%eax, 0(%3)\n" - " movl %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" - " movl %%eax, 8(%3)\n" - " movl %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" - " movl %%eax, 16(%3)\n" - " movl %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" - " movl %%eax, 24(%3)\n" - " movl %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" - " movl %%eax, 32(%3)\n" - " movl %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" - " movl %%eax, 40(%3)\n" - " movl %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" - " movl %%eax, 48(%3)\n" - " movl %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" - " movl %%eax, 56(%3)\n" - " movl %%edx, 60(%3)\n" - " addl $-64, %0\n" - " addl $64, %4\n" - " addl $64, %3\n" - " cmpl $63, %0\n" - " ja 0b\n" - "5: movl %0, %%eax\n" - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" - "6: rep; movsl\n" - " movl %%eax,%0\n" - "7: rep; movsb\n" - "8:\n" - ".section .fixup,\"ax\"\n" - "9: lea 0(%%eax,%0,4),%0\n" - "16: pushl %0\n" - " pushl %%eax\n" - " xorl %%eax,%%eax\n" - " rep; stosb\n" - " popl %%eax\n" - " popl %0\n" - " jmp 8b\n" - ".previous\n" - _ASM_EXTABLE(0b,16b) - _ASM_EXTABLE(1b,16b) - _ASM_EXTABLE(2b,16b) - _ASM_EXTABLE(21b,16b) - _ASM_EXTABLE(3b,16b) - _ASM_EXTABLE(31b,16b) - _ASM_EXTABLE(4b,16b) - _ASM_EXTABLE(41b,16b) - _ASM_EXTABLE(10b,16b) - _ASM_EXTABLE(51b,16b) - _ASM_EXTABLE(11b,16b) - _ASM_EXTABLE(61b,16b) - _ASM_EXTABLE(12b,16b) - _ASM_EXTABLE(71b,16b) - _ASM_EXTABLE(13b,16b) - _ASM_EXTABLE(81b,16b) - _ASM_EXTABLE(14b,16b) - _ASM_EXTABLE(91b,16b) - _ASM_EXTABLE(6b,9b) - _ASM_EXTABLE(7b,16b) - : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) - : "eax", "edx", "memory"); - return size; -} - static unsigned long __copy_user_intel_nocache(void *to, const void __user *from, unsigned long size) { @@ -387,8 +290,6 @@ static unsigned long __copy_user_intel_nocache(void *to, * Leave these declared but undefined. They should not be any references to * them */ -unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, - unsigned long size); unsigned long __copy_user_intel(void __user *to, const void *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ @@ -427,47 +328,7 @@ do { \ : "memory"); \ } while (0) -#define __copy_user_zeroing(to, from, size) \ -do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ - "4: rep; movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ - "0: rep; movsl\n" \ - " movl %3,%0\n" \ - "1: rep; movsb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "5: addl %3,%0\n" \ - " jmp 6f\n" \ - "3: lea 0(%3,%0,4),%0\n" \ - "6: pushl %0\n" \ - " pushl %%eax\n" \ - " xorl %%eax,%%eax\n" \ - " rep; stosb\n" \ - " popl %%eax\n" \ - " popl %0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(4b,5b) \ - _ASM_EXTABLE(0b,3b) \ - _ASM_EXTABLE(1b,6b) \ - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ - : "3"(size), "0"(size), "1"(to), "2"(from) \ - : "memory"); \ -} while (0) - -unsigned long __copy_to_user_ll(void __user *to, const void *from, - unsigned long n) +unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) { stac(); if (movsl_is_ok(to, from, n)) @@ -477,34 +338,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, clac(); return n; } -EXPORT_SYMBOL(__copy_to_user_ll); - -unsigned long __copy_from_user_ll(void *to, const void __user *from, - unsigned long n) -{ - stac(); - if (movsl_is_ok(to, from, n)) - __copy_user_zeroing(to, from, n); - else - n = __copy_user_zeroing_intel(to, from, n); - clac(); - return n; -} -EXPORT_SYMBOL(__copy_from_user_ll); - -unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, - unsigned long n) -{ - stac(); - if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); - else - n = __copy_user_intel((void __user *)to, - (const void *)from, n); - clac(); - return n; -} -EXPORT_SYMBOL(__copy_from_user_ll_nozero); +EXPORT_SYMBOL(__copy_user_ll); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) |