summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 14:41:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 14:41:04 -0700
commit5db6db0d400edd8bec274e34960cfa22838e1df5 (patch)
tree3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/mips
parent5fab10041b4389b61de7e7a49893190bae686241 (diff)
parent2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff)
downloadlinux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.bz2
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro: "This is the uaccess unification pile. It's _not_ the end of uaccess work, but the next batch of that will go into the next cycle. This one mostly takes copy_from_user() and friends out of arch/* and gets the zero-padding behaviour in sync for all architectures. Dealing with the nocache/writethrough mess is for the next cycle; fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am sold on access_ok() in there, BTW; just not in this pile), same for reducing __copy_... callsites, strn*... stuff, etc. - there will be a pile about as large as this one in the next merge window. This one sat in -next for weeks. -3KLoC" * 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits) HAVE_ARCH_HARDENED_USERCOPY is unconditional now CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now m32r: switch to RAW_COPY_USER hexagon: switch to RAW_COPY_USER microblaze: switch to RAW_COPY_USER get rid of padding, switch to RAW_COPY_USER ia64: get rid of copy_in_user() ia64: sanitize __access_ok() ia64: get rid of 'segment' argument of __do_{get,put}_user() ia64: get rid of 'segment' argument of __{get,put}_user_check() ia64: add extable.h powerpc: get rid of zeroing, switch to RAW_COPY_USER esas2r: don't open-code memdup_user() alpha: fix stack smashing in old_adjtimex(2) don't open-code kernel_setsockopt() mips: switch to RAW_COPY_USER mips: get rid of tail-zeroing in primitives mips: make copy_from_user() zero tail explicitly mips: clean and reorder the forest of macros... mips: consolidate __invoke_... wrappers ...
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S31
-rw-r--r--arch/mips/include/asm/checksum.h4
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/uaccess.h449
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c24
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/kernel/unaligned.c10
-rw-r--r--arch/mips/lib/memcpy.S49
-rw-r--r--arch/mips/oprofile/backtrace.c2
10 files changed, 84 insertions, 492 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e0bb576410bb..d6d545a45118 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -68,7 +68,6 @@ config MIPS
select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_ARCH_HARDENED_USERCOPY
menu "Machine selection"
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index cfd97f6448bb..0a7c9834b81c 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -140,15 +140,6 @@
.set noat
/*
- * t7 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-EXPORT_SYMBOL(__copy_user_inatomic)
- b __copy_user_common
- li t7, 1
- END(__copy_user_inatomic)
-
-/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
@@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy)
__memcpy:
FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user)
- li t7, 0 /* not inatomic */
-__copy_user_common:
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
@@ -414,25 +403,7 @@ l_exc:
LOAD t0, TI_TASK($28)
LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
SUB len, AT, t0 # len number of uncopied bytes
- bnez t7, 2f /* Skip the zeroing out part if inatomic */
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- beqz len, done
- SUB src, len, 1
-1: sb zero, 0(dst)
- ADD dst, dst, 1
- bnez src, 1b
- SUB src, src, 1
-2: jr ra
+ jr ra
nop
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index c8b574f7e0cc..77cad232a1c6 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err_ptr)
{
might_fault();
- if (segment_eq(get_fs(), get_ds()))
+ if (uaccess_kernel())
return __csum_partial_copy_kernel((__force void *)src, dst,
len, sum, err_ptr);
else
@@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
{
might_fault();
if (access_ok(VERIFY_WRITE, dst, len)) {
- if (segment_eq(get_fs(), get_ds()))
+ if (uaccess_kernel())
return __csum_partial_copy_kernel(src,
(__force void *)dst,
len, sum, err_ptr);
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 55fd94e6cd0b..7f12d7e27c94 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -20,7 +20,7 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
-#include <linux/uaccess.h> /* for segment_eq() */
+#include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void);
@@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
\
__##pfx##flush_prologue \
\
- if (segment_eq(get_fs(), USER_DS)) { \
+ if (!uaccess_kernel()) { \
while (1) { \
protected_cachee_op(hitop, addr); \
if (addr == aend) \
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 5347cfe15af2..99e629a590a5 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -12,8 +12,6 @@
#define _ASM_UACCESS_H
#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm-eva.h>
#include <asm/extable.h>
@@ -71,9 +69,6 @@ extern u64 __ua_limit;
#define USER_DS ((mm_segment_t) { __UA_LIMIT })
#endif
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
@@ -93,7 +88,7 @@ static inline bool eva_kernel_access(void)
if (!IS_ENABLED(CONFIG_EVA))
return false;
- return segment_eq(get_fs(), get_ds());
+ return uaccess_kernel();
}
/*
@@ -133,23 +128,14 @@ static inline bool eva_kernel_access(void)
* this function, memory access functions may still return -EFAULT.
*/
-#define __access_mask get_fs().seg
-
-#define __access_ok(addr, size, mask) \
-({ \
- unsigned long __addr = (unsigned long) (addr); \
- unsigned long __size = size; \
- unsigned long __mask = mask; \
- unsigned long __ok; \
- \
- __chk_user_ptr(addr); \
- __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
- __ua_size(__size))); \
- __ok == 0; \
-})
+static inline int __access_ok(const void __user *p, unsigned long size)
+{
+ unsigned long addr = (unsigned long)p;
+ return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
+}
#define access_ok(type, addr, size) \
- likely(__access_ok((addr), (size), __access_mask))
+ likely(__access_ok((addr), (size)))
/*
* put_user: - Write a simple value into user space.
@@ -811,157 +797,7 @@ extern void __put_user_unaligned_unknown(void);
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
-#ifndef CONFIG_EVA
-#define __invoke_copy_to_user(to, from, n) \
-({ \
- register void __user *__cu_to_r __asm__("$4"); \
- register const void *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- __MODULE_JAL(__copy_user) \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
-#define __invoke_copy_to_kernel(to, from, n) \
- __invoke_copy_to_user(to, from, n)
-
-#endif
-
-/*
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define __copy_to_user(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_from, __cu_len, true); \
- might_fault(); \
- \
- if (eva_kernel_access()) \
- __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
- __cu_len); \
- else \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
- __cu_len); \
- __cu_len; \
-})
-
-extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
-
-#define __copy_to_user_inatomic(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_from, __cu_len, true); \
- \
- if (eva_kernel_access()) \
- __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
- __cu_len); \
- else \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
- __cu_len); \
- __cu_len; \
-})
-
-#define __copy_from_user_inatomic(to, from, n) \
-({ \
- void *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_to, __cu_len, false); \
- \
- if (eva_kernel_access()) \
- __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
- __cu_from,\
- __cu_len);\
- else \
- __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
- __cu_from, \
- __cu_len); \
- __cu_len; \
-})
-
-/*
- * copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define copy_to_user(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_from, __cu_len, true); \
- \
- if (eva_kernel_access()) { \
- __cu_len = __invoke_copy_to_kernel(__cu_to, \
- __cu_from, \
- __cu_len); \
- } else { \
- if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
- might_fault(); \
- __cu_len = __invoke_copy_to_user(__cu_to, \
- __cu_from, \
- __cu_len); \
- } \
- } \
- __cu_len; \
-})
-
-#ifndef CONFIG_EVA
-
-#define __invoke_copy_from_user(to, from, n) \
+#define __invoke_copy_from(func, to, from, n) \
({ \
register void *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \
@@ -972,7 +808,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r = (n); \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
- __MODULE_JAL(__copy_user) \
+ __MODULE_JAL(func) \
".set\tnoat\n\t" \
__UA_ADDU "\t$1, %1, %2\n\t" \
".set\tat\n\t" \
@@ -984,33 +820,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \
})
-#define __invoke_copy_from_kernel(to, from, n) \
- __invoke_copy_from_user(to, from, n)
-
-/* For userland <-> userland operations */
-#define ___invoke_copy_in_user(to, from, n) \
- __invoke_copy_from_user(to, from, n)
-
-/* For kernel <-> kernel operations */
-#define ___invoke_copy_in_kernel(to, from, n) \
- __invoke_copy_from_user(to, from, n)
-
-#define __invoke_copy_from_user_inatomic(to, from, n) \
+#define __invoke_copy_to(func, to, from, n) \
({ \
- register void *__cu_to_r __asm__("$4"); \
- register const void __user *__cu_from_r __asm__("$5"); \
+ register void __user *__cu_to_r __asm__("$4"); \
+ register const void *__cu_from_r __asm__("$5"); \
register long __cu_len_r __asm__("$6"); \
\
__cu_to_r = (to); \
__cu_from_r = (from); \
__cu_len_r = (n); \
__asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- __MODULE_JAL(__copy_user_inatomic) \
- ".set\tnoat\n\t" \
- __UA_ADDU "\t$1, %1, %2\n\t" \
- ".set\tat\n\t" \
- ".set\treorder" \
+ __MODULE_JAL(func) \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
@@ -1018,228 +838,79 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \
})
-#define __invoke_copy_from_kernel_inatomic(to, from, n) \
- __invoke_copy_from_user_inatomic(to, from, n) \
+#define __invoke_copy_from_kernel(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#define __invoke_copy_to_kernel(to, from, n) \
+ __invoke_copy_to(__copy_user, to, from, n)
+
+#define ___invoke_copy_in_kernel(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#ifndef CONFIG_EVA
+#define __invoke_copy_from_user(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#define __invoke_copy_to_user(to, from, n) \
+ __invoke_copy_to(__copy_user, to, from, n)
+
+#define ___invoke_copy_in_user(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
#else
/* EVA specific functions */
-extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
- size_t __n);
extern size_t __copy_from_user_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_to_user_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
-#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
-({ \
- register void *__cu_to_r __asm__("$4"); \
- register const void __user *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- __MODULE_JAL(func_ptr) \
- ".set\tnoat\n\t" \
- __UA_ADDU "\t$1, %1, %2\n\t" \
- ".set\tat\n\t" \
- ".set\treorder" \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
-#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
-({ \
- register void *__cu_to_r __asm__("$4"); \
- register const void __user *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- __MODULE_JAL(func_ptr) \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
/*
* Source or destination address is in userland. We need to go through
* the TLB
*/
#define __invoke_copy_from_user(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
-
-#define __invoke_copy_from_user_inatomic(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, \
- __copy_user_inatomic_eva)
+ __invoke_copy_from(__copy_from_user_eva, to, from, n)
#define __invoke_copy_to_user(to, from, n) \
- __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
+ __invoke_copy_to(__copy_to_user_eva, to, from, n)
#define ___invoke_copy_in_user(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
-
-/*
- * Source or destination address in the kernel. We are not going through
- * the TLB
- */
-#define __invoke_copy_from_kernel(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
-
-#define __invoke_copy_from_kernel_inatomic(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
-
-#define __invoke_copy_to_kernel(to, from, n) \
- __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
-
-#define ___invoke_copy_in_kernel(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
+ __invoke_copy_from(__copy_in_user_eva, to, from, n)
#endif /* CONFIG_EVA */
-/*
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define __copy_from_user(to, from, n) \
-({ \
- void *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_to, __cu_len, false); \
- \
- if (eva_kernel_access()) { \
- __cu_len = __invoke_copy_from_kernel(__cu_to, \
- __cu_from, \
- __cu_len); \
- } else { \
- might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
- } \
- __cu_len; \
-})
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return __invoke_copy_to_kernel(to, from, n);
+ else
+ return __invoke_copy_to_user(to, from, n);
+}
-/*
- * copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define copy_from_user(to, from, n) \
-({ \
- void *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_to, __cu_len, false); \
- \
- if (eva_kernel_access()) { \
- __cu_len = __invoke_copy_from_kernel(__cu_to, \
- __cu_from, \
- __cu_len); \
- } else { \
- if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
- might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, \
- __cu_from, \
- __cu_len); \
- } else { \
- memset(__cu_to, 0, __cu_len); \
- } \
- } \
- __cu_len; \
-})
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return __invoke_copy_from_kernel(to, from, n);
+ else
+ return __invoke_copy_from_user(to, from, n);
+}
-#define __copy_in_user(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- if (eva_kernel_access()) { \
- __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
- __cu_len); \
- } else { \
- might_fault(); \
- __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
- __cu_len); \
- } \
- __cu_len; \
-})
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
-#define copy_in_user(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- if (eva_kernel_access()) { \
- __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
- __cu_len); \
- } else { \
- if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
- access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
- might_fault(); \
- __cu_len = ___invoke_copy_in_user(__cu_to, \
- __cu_from, \
- __cu_len); \
- } \
- } \
- __cu_len; \
-})
+static inline unsigned long
+raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return ___invoke_copy_in_kernel(to, from, n);
+ else
+ return ___invoke_copy_in_user(to, from, n);
+}
extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index d8f1cf1ec370..550e7d03090a 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1200,7 +1200,7 @@ fpu_emul:
case lwl_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1273,7 +1273,7 @@ fpu_emul:
case lwr_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1347,7 +1347,7 @@ fpu_emul:
case swl_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1417,7 +1417,7 @@ fpu_emul:
case swr_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1492,7 +1492,7 @@ fpu_emul:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1611,7 +1611,7 @@ fpu_emul:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1730,7 +1730,7 @@ fpu_emul:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1848,7 +1848,7 @@ fpu_emul:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1965,7 +1965,7 @@ fpu_emul:
err = SIGBUS;
break;
}
- if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
@@ -2021,7 +2021,7 @@ fpu_emul:
err = SIGBUS;
break;
}
- if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
@@ -2084,7 +2084,7 @@ fpu_emul:
err = SIGBUS;
break;
}
- if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
@@ -2145,7 +2145,7 @@ fpu_emul:
err = SIGBUS;
break;
}
- if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index f1d17ece4181..1dfa7f5796c7 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -98,7 +98,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
if (unlikely(addr & 3))
return -EINVAL;
- if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
+ if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4)))
return -EINVAL;
if (cpu_has_llsc && R10000_LLSC_WAR) {
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 7ed98354fe9d..f806ee56e639 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) {
- if (segment_eq(get_fs(), get_ds()))
+ if (uaccess_kernel())
LoadHW(addr, value, res);
else
LoadHWE(addr, value, res);
@@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) {
- if (segment_eq(get_fs(), get_ds()))
+ if (uaccess_kernel())
LoadW(addr, value, res);
else
LoadWE(addr, value, res);
@@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) {
- if (segment_eq(get_fs(), get_ds()))
+ if (uaccess_kernel())
LoadHWU(addr, value, res);
else
LoadHWUE(addr, value, res);
@@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
value = regs->regs[insn.i_format.rt];
if (IS_ENABLED(CONFIG_EVA)) {
- if (segment_eq(get_fs(), get_ds()))
+ if (uaccess_kernel())
StoreHW(addr, value, res);
else
StoreHWE(addr, value, res);
@@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
value = regs->regs[insn.i_format.rt];
if (IS_ENABLED(CONFIG_EVA)) {
- if (segment_eq(get_fs(), get_ds()))
+ if (uaccess_kernel())
StoreW(addr, value, res);
else
StoreWE(addr, value, res);
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index c3031f18c572..3114a2ed1f4e 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -562,39 +562,9 @@
LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
- bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- .set reorder /* DADDI_WAR */
- SUB src, len, 1
- beqz len, .Ldone\@
- .set noreorder
-1: sb zero, 0(dst)
- ADD dst, dst, 1
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- bnez src, 1b
- SUB src, src, 1
-#else
- .set push
- .set noat
- li v1, 1
- bnez src, 1b
- SUB src, src, v1
- .set pop
-#endif
jr ra
nop
-
#define SEXC(n) \
.set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u\@: \
@@ -673,15 +643,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
END(__rmemcpy)
/*
- * t6 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-EXPORT_SYMBOL(__copy_user_inatomic)
- b __copy_user_common
- li t6, 1
- END(__copy_user_inatomic)
-
-/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
@@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy)
.L__memcpy:
FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user)
- li t6, 0 /* not inatomic */
-__copy_user_common:
/* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
@@ -708,20 +667,12 @@ __copy_user_common:
* space
*/
-LEAF(__copy_user_inatomic_eva)
-EXPORT_SYMBOL(__copy_user_inatomic_eva)
- b __copy_from_user_common
- li t6, 1
- END(__copy_user_inatomic_eva)
-
/*
* __copy_from_user (EVA)
*/
LEAF(__copy_from_user_eva)
EXPORT_SYMBOL(__copy_from_user_eva)
- li t6, 0 /* not inatomic */
-__copy_from_user_common:
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
END(__copy_from_user_eva)
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
index 5e645c9a3162..16ace558cd9d 100644
--- a/arch/mips/oprofile/backtrace.c
+++ b/arch/mips/oprofile/backtrace.c
@@ -18,7 +18,7 @@ struct stackframe {
static inline int get_mem(unsigned long addr, unsigned long *result)
{
unsigned long *address = (unsigned long *) addr;
- if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long)))
+ if (!access_ok(VERIFY_READ, address, sizeof(unsigned long)))
return -1;
if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
return -3;