summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-09-11 19:18:28 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-09-11 19:18:28 +0100
commitc2172ce2303051764829d4958bd50a11ada0590f (patch)
treec465594496072249d2ad8ee4642f7c5dcd57e139 /arch
parenta4a5a7379e4ca03c192b732d61e446994eb67bbc (diff)
parent0b61f2c0f37983c98ed4207f3f5e265938371b68 (diff)
downloadlinux-c2172ce2303051764829d4958bd50a11ada0590f.tar.bz2
Merge branch 'uaccess' into fixes
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig15
-rw-r--r--arch/arm/include/asm/assembler.h42
-rw-r--r--arch/arm/include/asm/domain.h57
-rw-r--r--arch/arm/include/asm/futex.h19
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/thread_info.h4
-rw-r--r--arch/arm/include/asm/uaccess.h85
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/entry-armv.S32
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/entry-header.S112
-rw-r--r--arch/arm/kernel/head.S5
-rw-r--r--arch/arm/kernel/process.c51
-rw-r--r--arch/arm/kernel/swp_emulate.c3
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/lib/clear_user.S6
-rw-r--r--arch/arm/lib/copy_from_user.S6
-rw-r--r--arch/arm/lib/copy_to_user.S6
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S14
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c4
-rw-r--r--arch/arm/mm/abort-ev4.S1
-rw-r--r--arch/arm/mm/abort-ev5t.S4
-rw-r--r--arch/arm/mm/abort-ev5tj.S4
-rw-r--r--arch/arm/mm/abort-ev6.S8
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-lv4t.S2
-rw-r--r--arch/arm/mm/abort-macro.S14
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/pgd.c10
-rw-r--r--arch/arm/nwfpe/entry.S3
-rw-r--r--arch/arm/xen/hypercall.S15
31 files changed, 398 insertions, 139 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1c5021002fe4..a7a2e328edf9 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1700,6 +1700,21 @@ config HIGHPTE
consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory.
+config CPU_SW_DOMAIN_PAN
+ bool "Enable use of CPU domains to implement privileged no-access"
+ depends on MMU && !ARM_LPAE
+ default y
+ help
+ Increase kernel security by ensuring that normal kernel accesses
+ are unable to access userspace addresses. This can help prevent
+ use-after-free bugs becoming an exploitable privilege escalation
+ by ensuring that magic values (such as LIST_POISON) will always
+ fault when dereferenced.
+
+ CPUs with low-vector mappings use a best-efforts implementation.
+ Their lower 1MB needs to remain accessible for the vectors, but
+ the remainder of userspace will become appropriately inaccessible.
+
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe57279c66..9007c518d1d8 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -445,6 +445,48 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ mrc p15, 0, \tmp, c3, c0, 0
+ str \tmp, [sp, #S_FRAME_SIZE]
+#endif
+ .endm
+
+ .macro uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ ldr r0, [sp, #S_FRAME_SIZE]
+ mcr p15, 0, r0, c3, c0, 0
+#endif
+ .endm
+
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..fc8ba1663601 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -12,6 +12,7 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
+#include <asm/thread_info.h>
#endif
/*
@@ -34,15 +35,14 @@
*/
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
-#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2
-#define DOMAIN_TABLE 2
#define DOMAIN_USER 1
#define DOMAIN_IO 0
#endif
+#define DOMAIN_VECTORS 3
/*
* Domain types
@@ -55,30 +55,65 @@
#define DOMAIN_MANAGER 1
#endif
-#define domain_val(dom,type) ((type) << (2*(dom)))
+#define domain_mask(dom) ((3) << (2 * (dom)))
+#define domain_val(dom,type) ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+ domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+ unsigned int domain;
+
+ asm(
+ "mrc p15, 0, %0, c3, c0 @ get domain"
+ : "=r" (domain)
+ : "m" (current_thread_info()->cpu_domain));
+
+ return domain;
+}
+
static inline void set_domain(unsigned val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
- : : "r" (val));
+ : : "r" (val) : "memory");
isb();
}
+#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \
do { \
- struct thread_info *thread = current_thread_info(); \
- unsigned int domain = thread->cpu_domain; \
- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
- thread->cpu_domain = domain | domain_val(dom, type); \
- set_domain(thread->cpu_domain); \
+ unsigned int domain = get_domain(); \
+ domain &= ~domain_mask(dom); \
+ domain = domain | domain_val(dom, type); \
+ set_domain(domain); \
} while (0)
#else
-static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed82809d82..6795368ad023 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags; \
smp_mb(); \
prefetchw(uaddr); \
+ __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret;
u32 val;
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
smp_mb();
*uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret = 0;
u32 val;
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
preempt_disable();
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
*uval = val;
preempt_enable();
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278e953e..d0131ee6f6af 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32eded3e50..ae02e68b61fc 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -25,7 +25,6 @@
struct task_struct;
#include <asm/types.h>
-#include <asm/domain.h>
typedef unsigned long mm_segment_t;
@@ -74,9 +73,6 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d09ef7a..01bae13b2cea 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ unsigned int old_domain = get_domain();
+
+ /* Set the current domain access to permit user accesses */
+ set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+ domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+ return old_domain;
+#else
+ return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /* Restore the user access mask */
+ set_domain(flags);
+#endif
+}
+
+/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
*/
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
break; \
default: __e = __get_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
__e; \
})
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
break; \
default: __e = __put_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
__e; \
})
@@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
+ unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
@@ -381,9 +417,11 @@ do { \
#define __put_user_err(x, ptr, err) \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
+ unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
@@ -391,6 +429,7 @@ do { \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
} while (0)
#define __put_user_asm_byte(x, __pu_addr, err) \
@@ -474,11 +513,46 @@ do { \
#ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_from_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_to_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_clear_user(addr, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
#else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +585,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
+/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5e5a51a99e68..f89811fb9a55 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cb4fb1e69778..3e1c26eb32b4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
#define SPFIX(code...)
#endif
- .macro svc_entry, stack_hole=0, trace=1
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp )
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
- add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
@
stmia r7, {r2 - r6}
+ uaccess_save r0
+ .if \uaccess
+ uaccess_disable r0
+ .endif
+
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
.align 5
__dabt_svc:
- svc_entry
+ svc_entry uaccess=0
mov r2, sp
dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
- .macro usr_entry, trace=1
+ .macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
+ .if \uaccess
+ uaccess_disable ip
+ .endif
+
@ Enable the alignment trap while in kernel mode
ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
.align 5
__dabt_usr:
- usr_entry
+ usr_entry uaccess=0
kuser_cmpxchg_check
mov r2, sp
dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
.align 5
__und_usr:
- usr_entry
+ usr_entry uaccess=0
mov r2, r4
mov r3, r5
@@ -484,6 +493,8 @@ __und_usr:
1: ldrt r0, [r4]
ARM_BE8(rev r0, r0) @ little endian instruction
+ uaccess_disable ip
+
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
2: ldrht r5, [r4]
ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0
- blo __und_usr_fault_16 @ 16bit undefined instruction
+ blo __und_usr_fault_16_pan @ 16bit undefined instruction
3: ldrht r0, [r2]
ARM_BE8(rev16 r0, r0) @ little endian instruction
+ uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
__und_usr_fault_32:
mov r1, #4
b 1f
+__und_usr_fault_16_pan:
+ uaccess_disable ip
__und_usr_fault_16:
mov r1, #2
1: mov r0, sp
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4]
#ifdef CONFIG_CPU_USE_DOMAINS
+ mrc p15, 0, r6, c3, c0, 0 @ Get domain register
+ str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b48dd4f37f80..61974dfba132 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -174,6 +174,8 @@ ENTRY(vector_swi)
USER( ldr scno, [lr, #-4] ) @ get SWI instruction
#endif
+ uaccess_disable tbl
+
adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045abead7..0d22ad206d52 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
-#ifndef CONFIG_THUMB2_KERNEL
+
.macro svc_exit, rpsr, irq = 0
.if \irq != 0
@ IRQs already off
@@ -215,6 +215,10 @@
blne trace_hardirqs_off
#endif
.endif
+ uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode SVC restore
msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
strex r1, r2, [r0] @ clear the exclusive monitor
#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#else
+ @ Thumb mode SVC restore
+ ldr lr, [sp, #S_SP] @ top of the stack
+ ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
+
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
+ strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
+
+ stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
+ ldmia sp, {r0 - r12}
+ mov sp, lr
+ ldr lr, [sp], #4
+ rfeia sp!
+#endif
.endm
@
@@ -241,6 +259,9 @@
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
+ uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode restore
mov r0, sp
ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
@ clobber state restored below)
@@ -250,9 +271,27 @@
msr spsr_cxsf, r9
ldr r0, [r0, #S_R0]
ldmia r8, {pc}^
+#else
+ @ Thumb mode restore
+ add r0, sp, #S_R2
+ ldr lr, [sp, #S_LR]
+ ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+ @ clobber state restored below)
+ ldmia r0, {r2 - r12}
+ mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+ msr cpsr_c, r1
+ sub r0, #S_R2
+ add r8, r0, #S_PC
+ ldmia r0, {r0 - r1}
+ rfeia r8
+#endif
.endm
+
.macro restore_user_regs, fast = 0, offset = 0
+ uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode restore
mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc
@@ -270,72 +309,16 @@
@ after ldm {}^
add sp, sp, #\offset + S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-
-#else /* CONFIG_THUMB2_KERNEL */
- .macro svc_exit, rpsr, irq = 0
- .if \irq != 0
- @ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
- @ The parent context IRQs must have been enabled to get here in
- @ the first place, so there's no point checking the PSR I bit.
- bl trace_hardirqs_on
-#endif
- .else
- @ IRQs off again before pulling preserved data off the stack
- disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
- tst \rpsr, #PSR_I_BIT
- bleq trace_hardirqs_on
- tst \rpsr, #PSR_I_BIT
- blne trace_hardirqs_off
-#endif
- .endif
- ldr lr, [sp, #S_SP] @ top of the stack
- ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
-
- @ We must avoid clrex due to Cortex-A15 erratum #830321
- strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
-
- stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
- ldmia sp, {r0 - r12}
- mov sp, lr
- ldr lr, [sp], #4
- rfeia sp!
- .endm
-
- @
- @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
- @
- @ For full details see non-Thumb implementation above.
- @
- .macro svc_exit_via_fiq
- add r0, sp, #S_R2
- ldr lr, [sp, #S_LR]
- ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
- @ clobber state restored below)
- ldmia r0, {r2 - r12}
- mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
- msr cpsr_c, r1
- sub r0, #S_R2
- add r8, r0, #S_PC
- ldmia r0, {r0 - r1}
- rfeia r8
- .endm
-
-#ifdef CONFIG_CPU_V7M
- /*
- * Note we don't need to do clrex here as clearing the local monitor is
- * part of each exception entry and exit sequence.
- */
- .macro restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+ @ V7M restore.
+ @ Note that we don't need to do clrex here as clearing the local
+ @ monitor is part of the exception entry and exit sequence.
.if \offset
add sp, #\offset
.endif
v7m_exception_slow_exit ret_r0 = \fast
- .endm
-#else /* ifdef CONFIG_CPU_V7M */
- .macro restore_user_regs, fast = 0, offset = 0
+#else
+ @ Thumb mode restore
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
@@ -353,9 +336,8 @@
.endif
add sp, sp, #S_FRAME_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-#endif /* ifdef CONFIG_CPU_V7M / else */
#endif /* !CONFIG_THUMB2_KERNEL */
+ .endm
/*
* Context tracking subsystem. Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 29e2991465cb..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -464,10 +464,7 @@ __enable_mmu:
#ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
- mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+ mov r5, #DACR_INIT
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a41719..e550a4541f48 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs)
buf[4] = '\0';
#ifndef CONFIG_CPU_V7M
- printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
- buf, interrupts_enabled(regs) ? "n" : "ff",
- fast_interrupts_enabled(regs) ? "n" : "ff",
- processor_modes[processor_mode(regs)],
- isa_modes[isa_mode(regs)],
- get_fs() == get_ds() ? "kernel" : "user");
+ {
+ unsigned int domain = get_domain();
+ const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Get the domain register for the parent context. In user
+ * mode, we don't save the DACR, so lets use what it should
+ * be. For other modes, we place it after the pt_regs struct.
+ */
+ if (user_mode(regs))
+ domain = DACR_UACCESS_ENABLE;
+ else
+ domain = *(unsigned int *)(regs + 1);
+#endif
+
+ if ((domain & domain_mask(DOMAIN_USER)) ==
+ domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+ segment = "none";
+ else if (get_fs() == get_ds())
+ segment = "kernel";
+ else
+ segment = "user";
+
+ printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
+ buf, interrupts_enabled(regs) ? "n" : "ff",
+ fast_interrupts_enabled(regs) ? "n" : "ff",
+ processor_modes[processor_mode(regs)],
+ isa_modes[isa_mode(regs)], segment);
+ }
#else
printk("xPSR: %08lx\n", regs->ARM_cpsr);
#endif
@@ -146,10 +170,9 @@ void __show_regs(struct pt_regs *regs)
buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU
{
- unsigned int transbase, dac;
+ unsigned int transbase, dac = get_domain();
asm("mrc p15, 0, %0, c2, c0\n\t"
- "mrc p15, 0, %1, c3, c0\n"
- : "=r" (transbase), "=r" (dac));
+ : "=r" (transbase));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
transbase, dac);
}
@@ -210,6 +233,16 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+#ifdef CONFIG_CPU_USE_DOMAINS
+ /*
+ * Copy the initial value of the domain access control register
+ * from the current thread: thread->addr_limit will have been
+ * copied from the current thread via setup_thread_stack() in
+ * kernel/fork.c
+ */
+ thread->cpu_domain = get_domain();
+#endif
+
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756782c7..5b26e7efa9ea 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
while (1) {
unsigned long temp;
+ unsigned int __ua_flags;
+ __ua_flags = uaccess_save_and_enable();
if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp);
else
__user_swp_asm(*data, address, res, temp);
+ uaccess_restore(__ua_flags);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226236f2..969f9d9e665f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */
/*
* on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7db2d5..970d6c043774 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
.text
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
* Returns : number of bytes NOT cleared
*/
ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9952be..1512bebfbf1b 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
/*
* Prototype:
*
- * size_t __copy_from_user(void *to, const void *from, size_t n)
+ * size_t arm_copy_from_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -89,11 +89,11 @@
.text
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
#include "copy_template.S"
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b0675a3e..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
/*
* Prototype:
*
- * size_t __copy_to_user(void *to, const void *from, size_t n)
+ * size_t arm_copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -93,11 +93,11 @@
.text
ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
#include "copy_template.S"
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e61f89..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
.text
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ .macro save_regs
+ mrc p15, 0, ip, c3, c0, 0
+ stmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ uaccess_enable ip
+ .endm
+
+ .macro load_regs
+ ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ mcr p15, 0, ip, c3, c0, 0
+ ret lr
+ .endm
+#else
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
@@ -24,6 +37,7 @@
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
+#endif
.macro load1b, reg1
ldrusr \reg1, r0, 1
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 4b39af2dfda9..d72b90905132 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -136,7 +136,7 @@ out:
}
unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
{
/*
* This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
return n;
}
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
{
/* See rational for this in __copy_to_user() above. */
if (n < 64)
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4aba9..b3b31e30cadd 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r3, [r4] @ read aborted ARM instruction
+ uaccess_disable ip @ disable userspace access
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4653a3..a6a381a6caa5 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
mrc p15, 0, r0, c6, c0, 0 @ get FAR
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction
+ uaccess_disable ip @ disable user access
bic r1, r1, #1 << 11 @ clear bits 11 of FSR
- do_ldrd_abort tmp=ip, insn=r3
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq do_DataAbort @ yes
tst r3, #1 << 20 @ check write
orreq r1, r1, #1 << 11
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a61264..00ab011bef58 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
bne do_DataAbort
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction
- do_ldrd_abort tmp=ip, insn=r3
+ uaccess_disable ip @ disable userspace access
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq do_DataAbort @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c22a33..8801a15aa105 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136?
- bne do_DataAbort
+ bne 1f
tst r5, #PSR_J_BIT @ Java?
tsteq r5, #PSR_T_BIT @ Thumb?
- bne do_DataAbort
+ bne 1f
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
ARM_BE8(rev r3, r3)
- do_ldrd_abort tmp=ip, insn=r3
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq 1f @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
#endif
+1: uaccess_disable ip @ disable userspace access
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad054214..e8d0e08c227f 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ uaccess_disable ip @ disable userspace access
/*
* V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f3982580c273..6d8e8e3365d1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
#endif
bne .data_thumb_abort
ldr r8, [r4] @ read arm instruction
+ uaccess_disable ip @ disable userspace access
tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
+ uaccess_disable ip @ disable userspace access
tst r8, #1 << 11 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes
and r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68ef0e83..4509bee4e081 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
tst \psr, #PSR_T_BIT
beq not_thumb
ldrh \tmp, [\pc] @ Read aborted Thumb instruction
+ uaccess_disable ip @ disable userspace access
and \tmp, \tmp, # 0xfe00 @ Mask opcode field
cmp \tmp, # 0x5600 @ Is it ldrsb?
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
* [7:4] == 1101
* [20] == 0
*/
- .macro do_ldrd_abort, tmp, insn
- tst \insn, #0x0e100000 @ [27:25,20] == 0
- bne not_ldrd
- and \tmp, \insn, #0x000000f0 @ [7:4] == 1101
- cmp \tmp, #0x000000d0
- beq do_DataAbort
-not_ldrd:
+ .macro teq_ldrd, tmp, insn
+ mov \tmp, #0x0e100000
+ orr \tmp, #0x000000f0
+ and \tmp, \insn, \tmp
+ teq \tmp, #0x000000d0
.endm
-
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 870838a46d52..1cb9c1c1c05f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
+ .domain = DOMAIN_VECTORS,
},
[MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
+ .domain = DOMAIN_VECTORS,
},
[MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f11dd9f..e683db1b90a3 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
if (!new_pte)
goto no_pte;
+#ifndef CONFIG_ARM_LPAE
+ /*
+ * Modify the PTE pointer to have the correct domain. This
+ * needs to be the vectors domain to avoid the low vectors
+ * being unmapped.
+ */
+ pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+ pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index 71df43547659..39c20afad7ed 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -95,9 +95,10 @@ emulate:
reteq r4 @ no, return failure
next:
+ uaccess_enable r3
.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@ increment PC
-
+ uaccess_disable r3
and r2, r6, #0x0F000000 @ test for FP insns
teq r2, #0x0C000000
teqne r2, #0x0D000000
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index f00e08075938..10fd99c568c6 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
mov r1, r2
mov r2, r3
ldr r3, [sp, #8]
+ /*
+ * Privcmd calls are issued by the userspace. We need to allow the
+ * kernel to access the userspace memory before issuing the hypercall.
+ */
+ uaccess_enable r4
+
+ /* r4 is loaded now as we use it as scratch register before */
ldr r4, [sp, #4]
__HVC(XEN_IMM)
+
+ /*
+ * Disable userspace access from kernel. This is fine to do it
+ * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
+ * called before.
+ */
+ uaccess_disable r4
+
ldm sp!, {r4}
ret lr
ENDPROC(privcmd_call);