summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/entry-common.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/entry-common.S')
-rw-r--r--arch/arm/kernel/entry-common.S326
1 files changed, 242 insertions, 84 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 366e5097a41a..1e7b04a40a31 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -29,18 +29,14 @@ ret_fast_syscall:
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne fast_work_pending
+#if defined(CONFIG_IRQSOFF_TRACER)
+ asm_trace_hardirqs_on
+#endif
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- @ fast_restore_user_regs
- ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
- ldr lr, [sp, #S_OFF + S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
+ restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
/*
@@ -51,10 +47,12 @@ fast_work_pending:
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
- tst r1, #_TIF_SIGPENDING
+ tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
+ tst r1, #_TIF_SIGPENDING @ delivering a signal?
+ movne why, #0 @ prevent further restarts
bl do_notify_resume
b ret_slow_syscall @ Check work again
@@ -70,17 +68,13 @@ ret_slow_syscall:
tst r1, #_TIF_WORK_MASK
bne work_pending
no_work_pending:
+#if defined(CONFIG_IRQSOFF_TRACER)
+ asm_trace_hardirqs_on
+#endif
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- @ slow_restore_user_regs
- ldr r1, [sp, #S_PSR] @ get calling cpsr
- ldr lr, [sp, #S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
+ restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user)
/*
@@ -106,56 +100,222 @@ ENDPROC(ret_from_fork)
#define CALL(x) .long x
#ifdef CONFIG_FUNCTION_TRACER
+/*
+ * When compiling with -pg, gcc inserts a call to the mcount routine at the
+ * start of every function. In mcount, apart from the function's address (in
+ * lr), we need to get hold of the function's caller's address.
+ *
+ * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
+ *
+ * bl mcount
+ *
+ * These versions have the limitation that in order for the mcount routine to
+ * be able to determine the function's caller's address, an APCS-style frame
+ * pointer (which is set up with something like the code below) is required.
+ *
+ * mov ip, sp
+ * push {fp, ip, lr, pc}
+ * sub fp, ip, #4
+ *
+ * With EABI, these frame pointers are not available unless -mapcs-frame is
+ * specified, and if building as Thumb-2, not even then.
+ *
+ * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
+ * with call sites like:
+ *
+ * push {lr}
+ * bl __gnu_mcount_nc
+ *
+ * With these compilers, frame pointers are not necessary.
+ *
+ * mcount can be thought of as a function called in the middle of a subroutine
+ * call. As such, it needs to be transparent for both the caller and the
+ * callee: the original lr needs to be restored when leaving mcount, and no
+ * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
+ * clobber the ip register. This is OK because the ARM calling convention
+ * allows it to be clobbered in subroutines and doesn't use it to hold
+ * parameters.)
+ *
+ * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
+ * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
+ * arch/arm/kernel/ftrace.c).
+ */
+
+#ifndef CONFIG_OLD_MCOUNT
+#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
+#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
+#endif
+#endif
+
+.macro __mcount suffix
+ mcount_enter
+ ldr r0, =ftrace_trace_function
+ ldr r2, [r0]
+ adr r0, .Lftrace_stub
+ cmp r0, r2
+ bne 1f
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ldr r1, =ftrace_graph_return
+ ldr r2, [r1]
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
+
+ ldr r1, =ftrace_graph_entry
+ ldr r2, [r1]
+ ldr r0, =ftrace_graph_entry_stub
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
+#endif
+
+ mcount_exit
+
+1: mcount_get_lr r1 @ lr of instrumented func
+ mov r0, lr @ instrumented function
+ sub r0, r0, #MCOUNT_INSN_SIZE
+ adr lr, BSYM(2f)
+ mov pc, r2
+2: mcount_exit
+.endm
+
+.macro __ftrace_caller suffix
+ mcount_enter
+
+ mcount_get_lr r1 @ lr of instrumented func
+ mov r0, lr @ instrumented function
+ sub r0, r0, #MCOUNT_INSN_SIZE
+
+ .globl ftrace_call\suffix
+ftrace_call\suffix:
+ bl ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call\suffix
+ftrace_graph_call\suffix:
+ mov r0, r0
+#endif
+
+ mcount_exit
+.endm
+
+.macro __ftrace_graph_caller
+ sub r0, fp, #4 @ &lr of instrumented routine (&parent)
#ifdef CONFIG_DYNAMIC_FTRACE
+ @ called from __ftrace_caller, saved in mcount_enter
+ ldr r1, [sp, #16] @ instrumented routine (func)
+#else
+ @ called from __mcount, untouched in lr
+ mov r1, lr @ instrumented routine (func)
+#endif
+ sub r1, r1, #MCOUNT_INSN_SIZE
+ mov r2, fp @ frame pointer
+ bl prepare_ftrace_return
+ mcount_exit
+.endm
+
+#ifdef CONFIG_OLD_MCOUNT
+/*
+ * mcount
+ */
+
+.macro mcount_enter
+ stmdb sp!, {r0-r3, lr}
+.endm
+
+.macro mcount_get_lr reg
+ ldr \reg, [fp, #-4]
+.endm
+
+.macro mcount_exit
+ ldr lr, [fp, #-4]
+ ldmia sp!, {r0-r3, pc}
+.endm
+
ENTRY(mcount)
- stmdb sp!, {r0-r3, lr}
- mov r0, lr
- sub r0, r0, #MCOUNT_INSN_SIZE
+#ifdef CONFIG_DYNAMIC_FTRACE
+ stmdb sp!, {lr}
+ ldr lr, [fp, #-4]
+ ldmia sp!, {pc}
+#else
+ __mcount _old
+#endif
+ENDPROC(mcount)
- .globl mcount_call
-mcount_call:
- bl ftrace_stub
- ldr lr, [fp, #-4] @ restore lr
- ldmia sp!, {r0-r3, pc}
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller_old)
+ __ftrace_caller _old
+ENDPROC(ftrace_caller_old)
+#endif
-ENTRY(ftrace_caller)
- stmdb sp!, {r0-r3, lr}
- ldr r1, [fp, #-4]
- mov r0, lr
- sub r0, r0, #MCOUNT_INSN_SIZE
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller_old)
+ __ftrace_graph_caller
+ENDPROC(ftrace_graph_caller_old)
+#endif
+
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+#endif
- .globl ftrace_call
-ftrace_call:
- bl ftrace_stub
- ldr lr, [fp, #-4] @ restore lr
- ldmia sp!, {r0-r3, pc}
+/*
+ * __gnu_mcount_nc
+ */
+.macro mcount_enter
+ stmdb sp!, {r0-r3, lr}
+.endm
+
+.macro mcount_get_lr reg
+ ldr \reg, [sp, #20]
+.endm
+
+.macro mcount_exit
+ ldmia sp!, {r0-r3, ip, lr}
+ mov pc, ip
+.endm
+
+ENTRY(__gnu_mcount_nc)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ mov ip, lr
+ ldmia sp!, {lr}
+ mov pc, ip
#else
+ __mcount
+#endif
+ENDPROC(__gnu_mcount_nc)
-ENTRY(mcount)
- stmdb sp!, {r0-r3, lr}
- ldr r0, =ftrace_trace_function
- ldr r2, [r0]
- adr r0, ftrace_stub
- cmp r0, r2
- bne trace
- ldr lr, [fp, #-4] @ restore lr
- ldmia sp!, {r0-r3, pc}
-
-trace:
- ldr r1, [fp, #-4] @ lr of instrumented routine
- mov r0, lr
- sub r0, r0, #MCOUNT_INSN_SIZE
- mov lr, pc
- mov pc, r2
- mov lr, r1 @ restore lr
- ldmia sp!, {r0-r3, pc}
-
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
- .globl ftrace_stub
-ftrace_stub:
- mov pc, lr
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller)
+ __ftrace_caller
+ENDPROC(ftrace_caller)
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+ __ftrace_graph_caller
+ENDPROC(ftrace_graph_caller)
+#endif
+
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl return_to_handler
+return_to_handler:
+ stmdb sp!, {r0-r3}
+ mov r0, fp @ frame pointer
+ bl ftrace_return_to_handler
+ mov lr, r0 @ r0 has real ret addr
+ ldmia sp!, {r0-r3}
+ mov pc, lr
+#endif
+
+ENTRY(ftrace_stub)
+.Lftrace_stub:
+ mov pc, lr
+ENDPROC(ftrace_stub)
#endif /* CONFIG_FUNCTION_TRACER */
@@ -182,8 +342,10 @@ ftrace_stub:
ENTRY(vector_swi)
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
- add r8, sp, #S_PC
- stmdb r8, {sp, lr}^ @ Calling sp, lr
+ ARM( add r8, sp, #S_PC )
+ ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
+ THUMB( mov r8, sp )
+ THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
mrs r8, spsr @ called from non-FIQ mode, so ok.
str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR
@@ -250,7 +412,6 @@ ENTRY(vector_swi)
get_thread_info tsk
adr tbl, sys_call_table @ load syscall table pointer
- ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
#if defined(CONFIG_OABI_COMPAT)
/*
@@ -267,12 +428,24 @@ ENTRY(vector_swi)
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
#endif
+ ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
stmdb sp!, {r4, r5} @ push fifth and sixth args
- tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+
+#ifdef CONFIG_SECCOMP
+ tst r10, #_TIF_SECCOMP
+ beq 1f
+ mov r0, scno
+ bl __secure_computing
+ add r0, sp, #S_R0 + S_OFF @ pointer to regs
+ ldmia r0, {r0 - r3} @ have to reload r0 - r3
+1:
+#endif
+
+ tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit
- adr lr, ret_fast_syscall @ return address
+ adr lr, BSYM(ret_fast_syscall) @ return address
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
@@ -293,7 +466,7 @@ __sys_trace:
mov r0, #0 @ trace entry [IP = 0]
bl syscall_trace
- adr lr, __sys_trace_return @ return address
+ adr lr, BSYM(__sys_trace_return) @ return address
mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
@@ -373,23 +546,15 @@ sys_clone_wrapper:
b sys_clone
ENDPROC(sys_clone_wrapper)
-sys_sigsuspend_wrapper:
- add r3, sp, #S_OFF
- b sys_sigsuspend
-ENDPROC(sys_sigsuspend_wrapper)
-
-sys_rt_sigsuspend_wrapper:
- add r2, sp, #S_OFF
- b sys_rt_sigsuspend
-ENDPROC(sys_rt_sigsuspend_wrapper)
-
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
@@ -419,22 +584,15 @@ sys_mmap2:
tst r5, #PGOFF_MASK
moveq r5, r5, lsr #PAGE_SHIFT - 12
streq r5, [sp, #4]
- beq do_mmap2
+ beq sys_mmap_pgoff
mov r0, #-EINVAL
mov pc, lr
#else
str r5, [sp, #4]
- b do_mmap2
+ b sys_mmap_pgoff
#endif
ENDPROC(sys_mmap2)
-ENTRY(pabort_ifar)
- mrc p15, 0, r0, cr6, cr0, 2
-ENTRY(pabort_noifar)
- mov pc, lr
-ENDPROC(pabort_ifar)
-ENDPROC(pabort_noifar)
-
#ifdef CONFIG_OABI_COMPAT
/*