diff options
Diffstat (limited to 'arch/parisc/include/asm/uaccess.h')
-rw-r--r-- | arch/parisc/include/asm/uaccess.h | 126 |
1 files changed, 44 insertions, 82 deletions
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index a0b461336b6a..852322f2d1f9 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -27,7 +27,8 @@ * that put_user is the same as __put_user, etc. */ -#define access_ok(type, uaddr, size) (1) +#define access_ok(type, uaddr, size) \ + ( (uaddr) == (uaddr) ) #define put_user __put_user #define get_user __get_user @@ -59,6 +60,15 @@ struct exception_table_entry { ".previous\n" /* + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry + * (with lowest bit set) for which the fault handler in fixup_exception() will + * load -EFAULT into %r8 for a read or write fault, and zeroes the target + * register in case of a read fault in get_user(). + */ +#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ + ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) + +/* * The page fault handler stores, in a per-cpu area, the following information * if a fixup routine is available. */ @@ -85,7 +95,7 @@ struct exception_data { #define __get_user(x, ptr) \ ({ \ register long __gu_err __asm__ ("r8") = 0; \ - register long __gu_val __asm__ ("r9") = 0; \ + register long __gu_val; \ \ load_sr2(); \ switch (sizeof(*(ptr))) { \ @@ -101,22 +111,23 @@ struct exception_data { }) #define __get_user_asm(ldx, ptr) \ - __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ + __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err) \ - : "r1"); + : "r"(ptr), "1"(__gu_err)); #if !defined(CONFIG_64BIT) #define __get_user_asm64(ptr) \ - __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ - "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ - ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ + __asm__(" copy %%r0,%R0\n" \ + "1: ldw 0(%%sr2,%2),%0\n" \ + "2: ldw 4(%%sr2,%2),%R0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err) \ - : "r1"); + : "r"(ptr), "1"(__gu_err)); #endif /* !defined(CONFIG_64BIT) */ @@ -142,32 +153,31 @@ struct exception_data { * The "__put_user/kernel_asm()" macros tell gcc they read from memory * instead of writing. This is because they do not write to any memory * gcc knows about, so there are no aliasing issues. These macros must - * also be aware that "fixup_put_user_skip_[12]" are executed in the - * context of the fault, and any registers used there must be listed - * as clobbers. In this case only "r1" is used by the current routines. - * r8/r9 are already listed as err/val. + * also be aware that fixups are executed in the context of the fault, + * and any registers used there must be listed as clobbers. + * r8 is already listed as err. */ #define __put_user_asm(stx, x, ptr) \ __asm__ __volatile__ ( \ - "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ + "1: " stx " %2,0(%%sr2,%1)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(x), "0"(__pu_err) \ - : "r1") + : "r"(ptr), "r"(x), "0"(__pu_err)) #if !defined(CONFIG_64BIT) #define __put_user_asm64(__val, ptr) do { \ __asm__ __volatile__ ( \ - "\n1:\tstw %2,0(%%sr2,%1)" \ - "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ - ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ + "1: stw %2,0(%%sr2,%1)\n" \ + "2: stw %R2,4(%%sr2,%1)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(__val), "0"(__pu_err) \ - : "r1"); \ + : "r"(ptr), "r"(__val), "0"(__pu_err)); \ } while (0) #endif /* !defined(CONFIG_64BIT) */ @@ -177,9 +187,6 @@ struct exception_data { * Complex access routines -- external declarations */ -extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); -extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); -extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); extern long strncpy_from_user(char *, const char __user *, long); extern unsigned lclear_user(void __user *, unsigned long); extern long lstrnlen_user(const char __user *, long); @@ -193,59 +200,14 @@ extern long lstrnlen_user(const char __user *, long); #define clear_user lclear_user #define __clear_user lclear_user -unsigned long __must_check __copy_to_user(void __user *dst, const void *src, - unsigned long len); -unsigned long __must_check __copy_from_user(void *dst, const void __user *src, - unsigned long len); -unsigned long copy_in_user(void __user *dst, const void __user *src, - unsigned long len); -#define __copy_in_user copy_in_user -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - -extern void __compiletime_error("usercopy buffer size is too small") -__bad_copy_user(void); - -static inline void copy_user_overflow(int size, unsigned long count) -{ - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); -} - -static __always_inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - int sz = __compiletime_object_size(to); - unsigned long ret = n; - - if (likely(sz < 0 || sz >= n)) { - check_object_size(to, n, false); - ret = __copy_from_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - if (unlikely(ret)) - memset(to + (n - ret), 0, ret); - - return ret; -} - -static __always_inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - int sz = __compiletime_object_size(from); - - if (likely(sz < 0 || sz >= n)) { - check_object_size(from, n, true); - n = __copy_to_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - return n; -} +unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, + unsigned long len); +unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, + unsigned long len); +unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src, + unsigned long len); +#define INLINE_COPY_TO_USER +#define INLINE_COPY_FROM_USER struct pt_regs; int fixup_exception(struct pt_regs *regs); |