summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/vector.S
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-10-23 09:40:02 +0100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-10-23 22:34:19 +1100
commit955c1cab809edfb5429603c68493363074ac20cf (patch)
tree366056342adca338a8efc6b7e8caf52279765647 /arch/powerpc/kernel/vector.S
parent18461960cbf50bf345ef0667d45d5f64de8fb893 (diff)
downloadlinux-955c1cab809edfb5429603c68493363074ac20cf.tar.bz2
powerpc: Don't corrupt user registers on 32-bit
Commit de79f7b9f6 ("powerpc: Put FP/VSX and VR state into structures") modified load_up_fpu() and load_up_altivec() in such a way that they now use r7 and r8. Unfortunately, the callers of these functions on 32-bit machines then return to userspace via fast_exception_return, which doesn't restore all of the volatile GPRs, but only r1, r3 -- r6 and r9 -- r12. This was causing userspace segfaults and other userspace misbehaviour on 32-bit machines. This fixes the problem by changing the register usage of load_up_fpu() and load_up_altivec() to avoid using r7 and r8 and instead use r6 and r10. This also adds comments to those functions saying which registers may be used. Signed-off-by: Paul Mackerras <paulus@samba.org> Tested-by: Scott Wood <scottwood@freescale.com> (on e500mc, so no altivec) Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/vector.S')
-rw-r--r--arch/powerpc/kernel/vector.S15
1 files changed, 9 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index eacda4eea2d7..0458a9aaba9d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -64,6 +64,9 @@ _GLOBAL(store_vr_state)
* Enables the VMX for use in the kernel on return.
* On SMP we know the VMX is free, since we give it up every
* switch (ie, no lazy save of the vector registers).
+ *
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
*/
_GLOBAL(load_up_altivec)
mfmsr r5 /* grab the current MSR */
@@ -89,11 +92,11 @@ _GLOBAL(load_up_altivec)
/* Save VMX state to last_task_used_altivec's THREAD struct */
toreal(r4)
addi r4,r4,THREAD
- addi r7,r4,THREAD_VRSTATE
- SAVE_32VRS(0,r5,r7)
+ addi r6,r4,THREAD_VRSTATE
+ SAVE_32VRS(0,r5,r6)
mfvscr vr0
li r10,VRSTATE_VSCR
- stvx vr0,r10,r7
+ stvx vr0,r10,r6
/* Disable VMX for last_task_used_altivec */
PPC_LL r5,PT_REGS(r4)
toreal(r5)
@@ -125,13 +128,13 @@ _GLOBAL(load_up_altivec)
oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
#endif
- addi r7,r5,THREAD_VRSTATE
+ addi r6,r5,THREAD_VRSTATE
li r4,1
li r10,VRSTATE_VSCR
stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r7
+ lvx vr0,r10,r6
mtvscr vr0
- REST_32VRS(0,r4,r7)
+ REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
/* Update last_task_used_altivec to 'current' */
subi r4,r5,THREAD /* Back to 'current' */