summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorCyril Bur <cyrilbur@gmail.com>2016-09-23 16:18:11 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-10-04 16:43:07 +1100
commite909fb83d39292679118761426d7784715ad79ad (patch)
tree02bc288a2692d60e06c216db9b7fed78b3df5ef4 /arch/powerpc
parent3cee070a13b141b8eb5727c3bfa9920092f87264 (diff)
downloadlinux-e909fb83d39292679118761426d7784715ad79ad.tar.bz2
powerpc: Never giveup a reclaimed thread when enabling kernel {fp, altivec, vsx}
After a thread is reclaimed from its active or suspended transactional state the checkpointed state exists on CPU, this state (along with the live/transactional state) has been saved in its entirety by the reclaiming process. There exists a sequence of events that would cause the kernel to call one of enable_kernel_fp(), enable_kernel_altivec() or enable_kernel_vsx() after a thread has been reclaimed. These functions save away any user state on the CPU so that the kernel can use the registers. Not only is this saving away unnecessary at this point, it is actually incorrect. It causes a save of the checkpointed state to the live structures within the thread struct thus destroying the true live state for that thread. Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/process.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 34ee5f2e3271..45b6ea069f92 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -205,12 +205,23 @@ EXPORT_SYMBOL_GPL(flush_fp_to_thread);
void enable_kernel_fp(void)
{
+ unsigned long cpumsr;
+
WARN_ON(preemptible());
- msr_check_and_set(MSR_FP);
+ cpumsr = msr_check_and_set(MSR_FP);
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
check_if_tm_restore_required(current);
+ /*
+ * If a thread has already been reclaimed then the
+ * checkpointed registers are on the CPU but have definitely
+ * been saved by the reclaim code. Don't need to and *cannot*
+ * giveup as this would save to the 'live' structure not the
+ * checkpointed structure.
+ */
+ if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ return;
__giveup_fpu(current);
}
}
@@ -257,12 +268,23 @@ EXPORT_SYMBOL(giveup_altivec);
void enable_kernel_altivec(void)
{
+ unsigned long cpumsr;
+
WARN_ON(preemptible());
- msr_check_and_set(MSR_VEC);
+ cpumsr = msr_check_and_set(MSR_VEC);
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
check_if_tm_restore_required(current);
+ /*
+ * If a thread has already been reclaimed then the
+ * checkpointed registers are on the CPU but have definitely
+ * been saved by the reclaim code. Don't need to and *cannot*
+ * giveup as this would save to the 'live' structure not the
+ * checkpointed structure.
+ */
+ if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ return;
__giveup_altivec(current);
}
}
@@ -331,12 +353,23 @@ static void save_vsx(struct task_struct *tsk)
void enable_kernel_vsx(void)
{
+ unsigned long cpumsr;
+
WARN_ON(preemptible());
- msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+ cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
check_if_tm_restore_required(current);
+ /*
+ * If a thread has already been reclaimed then the
+ * checkpointed registers are on the CPU but have definitely
+ * been saved by the reclaim code. Don't need to and *cannot*
+ * giveup as this would save to the 'live' structure not the
+ * checkpointed structure.
+ */
+ if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ return;
if (current->thread.regs->msr & MSR_FP)
__giveup_fpu(current);
if (current->thread.regs->msr & MSR_VEC)