diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-10 11:40:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-10 11:40:36 -0700 |
commit | f94c128eefcce2e3448d543f13cd7d7b8aa660a5 (patch) | |
tree | 720dcaa8bbc2a663fe96b1d1c23f7b3720be7d16 /arch/metag | |
parent | c44b59430393c38873fd933333d945f426857a59 (diff) | |
parent | e3cd7f013bac3105d44b8bd5a90359989d45b5a5 (diff) | |
download | linux-f94c128eefcce2e3448d543f13cd7d7b8aa660a5.tar.bz2 |
Merge tag 'metag-for-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag
Pull metag updates from James Hogan:
"These patches primarily make some usercopy improvements (following on
from the recent usercopy fixes):
- reformat and simplify rapf copy loops
- add 64-bit get_user support
And fix a couple more uaccess issues, partily pointed out by Al:
- fix access_ok() serious shortcomings
- fix strncpy_from_user() address validation
Also included is a trivial removal of a redundant increment"
* tag 'metag-for-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag:
metag/mm: Drop pointless increment
metag/uaccess: Check access_ok in strncpy_from_user
metag/uaccess: Fix access_ok()
metag/usercopy: Add 64-bit get_user support
metag/usercopy: Simplify rapf loop fixup corner case
metag/usercopy: Reformat rapf loop inline asm
Diffstat (limited to 'arch/metag')
-rw-r--r-- | arch/metag/include/asm/uaccess.h | 58 | ||||
-rw-r--r-- | arch/metag/lib/usercopy.c | 236 | ||||
-rw-r--r-- | arch/metag/mm/mmu-meta1.c | 1 |
3 files changed, 143 insertions, 152 deletions
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 5ebc2850690e..9c8fbf8fb5aa 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -24,24 +24,32 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (uaccess_kernel()) -/* - * Explicitly allow NULL pointers here. Parts of the kernel such - * as readv/writev use access_ok to validate pointers, but want - * to allow NULL pointers for various reasons. NULL pointers are - * safe to allow through because the first page is not mappable on - * Meta. - * - * We also wish to avoid letting user code access the system area - * and the kernel half of the address space. - */ -#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \ - ((addr) > PAGE_OFFSET && \ - (addr) < LINCORE_BASE)) - static inline int __access_ok(unsigned long addr, unsigned long size) { - return __kernel_ok || !__user_bad(addr, size); + /* + * Allow access to the user mapped memory area, but not the system area + * before it. The check extends to the top of the address space when + * kernel access is allowed (there's no real reason to user copy to the + * system area in any case). + */ + if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg && + size <= get_fs().seg - addr)) + return true; + /* + * Explicitly allow NULL pointers here. Parts of the kernel such + * as readv/writev use access_ok to validate pointers, but want + * to allow NULL pointers for various reasons. NULL pointers are + * safe to allow through because the first page is not mappable on + * Meta. + */ + if (!addr) + return true; + /* Allow access to core code memory area... */ + if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT && + size <= LINCORE_CODE_LIMIT + 1 - addr) + return true; + /* ... but no other areas. */ + return false; } #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ @@ -113,7 +121,8 @@ extern long __get_user_bad(void); #define __get_user_nocheck(x, ptr, size) \ ({ \ - long __gu_err, __gu_val; \ + long __gu_err; \ + long long __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -121,7 +130,8 @@ extern long __get_user_bad(void); #define __get_user_check(x, ptr, size) \ ({ \ - long __gu_err = -EFAULT, __gu_val = 0; \ + long __gu_err = -EFAULT; \ + long long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ if (access_ok(VERIFY_READ, __gu_addr, size)) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ @@ -132,6 +142,7 @@ extern long __get_user_bad(void); extern unsigned char __get_user_asm_b(const void __user *addr, long *err); extern unsigned short __get_user_asm_w(const void __user *addr, long *err); extern unsigned int __get_user_asm_d(const void __user *addr, long *err); +extern unsigned long long __get_user_asm_l(const void __user *addr, long *err); #define __get_user_size(x, ptr, size, retval) \ do { \ @@ -143,6 +154,8 @@ do { \ x = __get_user_asm_w(ptr, &retval); break; \ case 4: \ x = __get_user_asm_d(ptr, &retval); break; \ + case 8: \ + x = __get_user_asm_l(ptr, &retval); break; \ default: \ (x) = __get_user_bad(); \ } \ @@ -161,8 +174,13 @@ do { \ extern long __must_check __strncpy_from_user(char *dst, const char __user *src, long count); -#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count) - +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return -EFAULT; + return __strncpy_from_user(dst, src, count); +} /* * Return the size of a string (including the ending 0) * diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index e8a4ea83cabb..c941abdb8f85 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -246,65 +246,47 @@ #define __asm_copy_user_64bit_rapf_loop( \ to, from, ret, n, id, FIXUP) \ asm volatile ( \ - ".balign 8\n" \ - "MOV RAPF, %1\n" \ - "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ - "MOV D0Ar6, #0\n" \ - "LSR D1Ar5, %3, #6\n" \ - "SUB TXRPT, D1Ar5, #2\n" \ - "MOV RAPF, %1\n" \ + ".balign 8\n" \ + " MOV RAPF, %1\n" \ + " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ + " MOV D0Ar6, #0\n" \ + " LSR D1Ar5, %3, #6\n" \ + " SUB TXRPT, D1Ar5, #2\n" \ + " MOV RAPF, %1\n" \ "$Lloop"id":\n" \ - "ADD RAPF, %1, #64\n" \ - "21:\n" \ - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "22:\n" \ - "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "23:\n" \ - "SUB %3, %3, #32\n" \ - "24:\n" \ - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "25:\n" \ - "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "26:\n" \ - "SUB %3, %3, #32\n" \ - "DCACHE [%1+#-64], D0Ar6\n" \ - "BR $Lloop"id"\n" \ + " ADD RAPF, %1, #64\n" \ + "21: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "22: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "23: SUB %3, %3, #32\n" \ + "24: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "25: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26: SUB %3, %3, #32\n" \ + " DCACHE [%1+#-64], D0Ar6\n" \ + " BR $Lloop"id"\n" \ \ - "MOV RAPF, %1\n" \ - "27:\n" \ - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "28:\n" \ - "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "29:\n" \ - "SUB %3, %3, #32\n" \ - "30:\n" \ - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "31:\n" \ - "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "32:\n" \ - "SUB %0, %0, #8\n" \ - "33:\n" \ - "SETL [%0++], D0.7, D1.7\n" \ - "SUB %3, %3, #32\n" \ - "1:" \ - "DCACHE [%1+#-64], D0Ar6\n" \ - "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ - "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ - "GETL D0.5, D1.5, [A0StP+#-24]\n" \ - "GETL D0.6, D1.6, [A0StP+#-16]\n" \ - "GETL D0.7, D1.7, [A0StP+#-8]\n" \ - "SUB A0StP, A0StP, #40\n" \ + " MOV RAPF, %1\n" \ + "27: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "28: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29: SUB %3, %3, #32\n" \ + "30: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "31: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32: SETL [%0+#-8], D0.7, D1.7\n" \ + " SUB %3, %3, #32\n" \ + "1: DCACHE [%1+#-64], D0Ar6\n" \ + " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ + " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ + " GETL D0.5, D1.5, [A0StP+#-24]\n" \ + " GETL D0.6, D1.6, [A0StP+#-16]\n" \ + " GETL D0.7, D1.7, [A0StP+#-8]\n" \ + " SUB A0StP, A0StP, #40\n" \ " .section .fixup,\"ax\"\n" \ - "4:\n" \ - " ADD %0, %0, #8\n" \ - "3:\n" \ - " MOV D0Ar2, TXSTATUS\n" \ + "3: MOV D0Ar2, TXSTATUS\n" \ " MOV D1Ar1, TXSTATUS\n" \ " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \ " MOV TXSTATUS, D1Ar1\n" \ FIXUP \ - " MOVT D0Ar2,#HI(1b)\n" \ - " JUMP D0Ar2,#LO(1b)\n" \ + " MOVT D0Ar2, #HI(1b)\n" \ + " JUMP D0Ar2, #LO(1b)\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .long 21b,3b\n" \ @@ -319,7 +301,6 @@ " .long 30b,3b\n" \ " .long 31b,3b\n" \ " .long 32b,3b\n" \ - " .long 33b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ @@ -397,89 +378,59 @@ #define __asm_copy_user_32bit_rapf_loop( \ to, from, ret, n, id, FIXUP) \ asm volatile ( \ - ".balign 8\n" \ - "MOV RAPF, %1\n" \ - "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ - "MOV D0Ar6, #0\n" \ - "LSR D1Ar5, %3, #6\n" \ - "SUB TXRPT, D1Ar5, #2\n" \ - "MOV RAPF, %1\n" \ - "$Lloop"id":\n" \ - "ADD RAPF, %1, #64\n" \ - "21:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "22:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "23:\n" \ - "SUB %3, %3, #16\n" \ - "24:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "25:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "26:\n" \ - "SUB %3, %3, #16\n" \ - "27:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "28:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "29:\n" \ - "SUB %3, %3, #16\n" \ - "30:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "31:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "32:\n" \ - "SUB %3, %3, #16\n" \ - "DCACHE [%1+#-64], D0Ar6\n" \ - "BR $Lloop"id"\n" \ + ".balign 8\n" \ + " MOV RAPF, %1\n" \ + " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ + " MOV D0Ar6, #0\n" \ + " LSR D1Ar5, %3, #6\n" \ + " SUB TXRPT, D1Ar5, #2\n" \ + " MOV RAPF, %1\n" \ + "$Lloop"id":\n" \ + " ADD RAPF, %1, #64\n" \ + "21: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "22: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "23: SUB %3, %3, #16\n" \ + "24: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "25: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26: SUB %3, %3, #16\n" \ + "27: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "28: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29: SUB %3, %3, #16\n" \ + "30: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "31: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32: SUB %3, %3, #16\n" \ + " DCACHE [%1+#-64], D0Ar6\n" \ + " BR $Lloop"id"\n" \ \ - "MOV RAPF, %1\n" \ - "33:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "34:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "35:\n" \ - "SUB %3, %3, #16\n" \ - "36:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "37:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "38:\n" \ - "SUB %3, %3, #16\n" \ - "39:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "40:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "41:\n" \ - "SUB %3, %3, #16\n" \ - "42:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "43:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "44:\n" \ - "SUB %0, %0, #4\n" \ - "45:\n" \ - "SETD [%0++], D0.7\n" \ - "SUB %3, %3, #16\n" \ - "1:" \ - "DCACHE [%1+#-64], D0Ar6\n" \ - "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ - "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ - "GETL D0.5, D1.5, [A0StP+#-24]\n" \ - "GETL D0.6, D1.6, [A0StP+#-16]\n" \ - "GETL D0.7, D1.7, [A0StP+#-8]\n" \ - "SUB A0StP, A0StP, #40\n" \ + " MOV RAPF, %1\n" \ + "33: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "34: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "35: SUB %3, %3, #16\n" \ + "36: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "37: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "38: SUB %3, %3, #16\n" \ + "39: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "40: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "41: SUB %3, %3, #16\n" \ + "42: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "43: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "44: SETD [%0+#-4], D0.7\n" \ + " SUB %3, %3, #16\n" \ + "1: DCACHE [%1+#-64], D0Ar6\n" \ + " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ + " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ + " GETL D0.5, D1.5, [A0StP+#-24]\n" \ + " GETL D0.6, D1.6, [A0StP+#-16]\n" \ + " GETL D0.7, D1.7, [A0StP+#-8]\n" \ + " SUB A0StP, A0StP, #40\n" \ " .section .fixup,\"ax\"\n" \ - "4:\n" \ - " ADD %0, %0, #4\n" \ - "3:\n" \ - " MOV D0Ar2, TXSTATUS\n" \ + "3: MOV D0Ar2, TXSTATUS\n" \ " MOV D1Ar1, TXSTATUS\n" \ " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \ " MOV TXSTATUS, D1Ar1\n" \ FIXUP \ - " MOVT D0Ar2,#HI(1b)\n" \ - " JUMP D0Ar2,#LO(1b)\n" \ + " MOVT D0Ar2, #HI(1b)\n" \ + " JUMP D0Ar2, #LO(1b)\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .long 21b,3b\n" \ @@ -506,7 +457,6 @@ " .long 42b,3b\n" \ " .long 43b,3b\n" \ " .long 44b,3b\n" \ - " .long 45b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ @@ -1094,6 +1044,30 @@ unsigned int __get_user_asm_d(const void __user *addr, long *err) } EXPORT_SYMBOL(__get_user_asm_d); +unsigned long long __get_user_asm_l(const void __user *addr, long *err) +{ + register unsigned long long x asm ("D0Re0") = 0; + asm volatile ( + " GETL %0,%t0,[%2]\n" + "1:\n" + " GETL %0,%t0,[%2]\n" + "2:\n" + " .section .fixup,\"ax\"\n" + "3: MOV D0FrT,%3\n" + " SETD [%1],D0FrT\n" + " MOVT D0FrT,#HI(2b)\n" + " JUMP D0FrT,#LO(2b)\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .long 1b,3b\n" + " .previous\n" + : "=r" (x) + : "r" (err), "r" (addr), "P" (-EFAULT) + : "D0FrT"); + return x; +} +EXPORT_SYMBOL(__get_user_asm_l); + long __put_user_asm_b(unsigned int x, void __user *addr) { register unsigned int err asm ("D0Re0") = 0; diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c index 91f4255bcb5c..62ebab90924d 100644 --- a/arch/metag/mm/mmu-meta1.c +++ b/arch/metag/mm/mmu-meta1.c @@ -152,6 +152,5 @@ void __init mmu_init(unsigned long mem_end) p_swapper_pg_dir++; addr += PGDIR_SIZE; - entry++; } } |