summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2006-01-14 16:31:29 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-01-14 16:31:29 +0000
commit3f2829a31573e3e502b874c8d69a765f7a778793 (patch)
tree40f64826c0d2964c56c366f770e2d3959123eb59 /arch/arm/kernel
parentba95e4e4a0a8a3c6aba363d45f78d5f2e2d111eb (diff)
downloadlinux-3f2829a31573e3e502b874c8d69a765f7a778793.tar.bz2
[ARM] 3105/4: ARM EABI: new syscall entry convention
Patch from Nicolas Pitre For a while we wanted to change the way syscalls were called on ARM. Instead of encoding the syscall number in the swi instruction which requires reading back the instruction from memory to extract that number and polluting the data cache, it was decided that simply storing the syscall number into r7 would be more efficient. Since this represents an ABI change then making that change at the same time as EABI support is the right thing to do. It is now expected that EABI user space binaries put the syscall number into r7 and use "swi 0" to call the kernel. Syscall register argument are also expected to have "EABI arrangement" i.e. 64-bit arguments should be put in a pair of registers from an even register number. Example with long ftruncate64(unsigned int fd, loff_t length): legacy ABI: - put fd into r0 - put length into r1-r2 - use "swi #(0x900000 + 194)" to call the kernel new ARM EABI: - put fd into r0 - put length into r2-r3 (skipping over r1) - put 194 into r7 - use "swi 0" to call the kernel Note that it is important to use 0 for the swi argument as backward compatibility with legacy ABI user space relies on this. The syscall macros in asm-arm/unistd.h were also updated to support both ABIs and implement the right call method automatically. Signed-off-by: Nicolas Pitre <nico@cam.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-common.S35
-rw-r--r--arch/arm/kernel/traps.c2
2 files changed, 22 insertions, 15 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index e2b42997ad33..34826bcceb7a 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -98,20 +98,14 @@ ENTRY(ret_from_fork)
run on an ARM7 and we can save a couple of instructions.
--pb */
#ifdef CONFIG_CPU_ARM710
- .macro arm710_bug_check, instr, temp
- and \temp, \instr, #0x0f000000 @ check for SWI
- teq \temp, #0x0f000000
- bne .Larm700bug
- .endm
-
-.Larm700bug:
+#define A710(code...) code
+.Larm710bug:
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE
subs pc, lr, #4
#else
- .macro arm710_bug_check, instr, temp
- .endm
+#define A710(code...)
#endif
.align 5
@@ -129,14 +123,24 @@ ENTRY(vector_swi)
/*
* Get the system call number.
*/
-#ifdef CONFIG_ARM_THUMB
+#if defined(CONFIG_AEABI)
+
+ @ syscall number is in scno (r7) already.
+
+ A710( ldr ip, [lr, #-4] @ get SWI instruction )
+ A710( and ip, ip, #0x0f000000 @ check for SWI )
+ A710( teq ip, #0x0f000000 )
+ A710( bne .Larm710bug )
+#elif defined(CONFIG_ARM_THUMB)
tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
ldreq scno, [lr, #-4]
#else
ldr scno, [lr, #-4] @ get SWI instruction
+ A710( and ip, scno, #0x0f000000 @ check for SWI )
+ A710( teq ip, #0x0f000000 )
+ A710( bne .Larm710bug )
#endif
- arm710_bug_check scno, ip
#ifdef CONFIG_ALIGNMENT_TRAP
ldr ip, __cr_alignment
@@ -145,18 +149,19 @@ ENTRY(vector_swi)
#endif
enable_irq
- stmdb sp!, {r4, r5} @ push fifth and sixth args
-
get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
+#ifndef CONFIG_AEABI
bic scno, scno, #0xff000000 @ mask off SWI op-code
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
+#endif
adr tbl, sys_call_table @ load syscall table pointer
+ stmdb sp!, {r4, r5} @ push fifth and sixth args
tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
bne __sys_trace
- adr lr, ret_fast_syscall @ return address
cmp scno, #NR_syscalls @ check upper syscall limit
+ adr lr, ret_fast_syscall @ return address
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
@@ -207,6 +212,7 @@ ENTRY(sys_call_table)
@ r8 = syscall table
.type sys_syscall, #function
sys_syscall:
+#ifndef CONFIG_AEABI
eor scno, r0, #__NR_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
@@ -216,6 +222,7 @@ sys_syscall:
movlo r2, r3
movlo r3, r4
ldrlo pc, [tbl, scno, lsl #2]
+#endif
b sys_ni_syscall
sys_fork_wrapper:
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 93cfd3ffcc72..10235b01582e 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -404,7 +404,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
struct thread_info *thread = current_thread_info();
siginfo_t info;
- if ((no >> 16) != 0x9f)
+ if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
switch (no & 0xffff) {