summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-03 10:38:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-03 10:38:21 -0700
commit2ed7533cb7d657410ac29adf08d608b634e58eb0 (patch)
tree0c155334c9d9a208801c8d8c46b231bb44ecc3f8 /arch
parent7827fc7d24043522726c58612d2c7f39843721cf (diff)
parentcca19f0b684f4ed6aabf6ad07ae3e15e77bfd78a (diff)
downloadlinux-2ed7533cb7d657410ac29adf08d608b634e58eb0.tar.bz2
Merge tag 'powerpc-4.18-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "One fix for a regression in a recent TLB flush optimisation, which caused us to incorrectly not send TLB invalidations to coprocessors. Thanks to Frederic Barrat, Nicholas Piggin, Vaibhav Jain" * tag 'powerpc-4.18-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/64s/radix: Fix missing global invalidations when removing copro
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/mmu_context.h33
1 files changed, 21 insertions, 12 deletions
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 79d570cbf332..b2f89b621b15 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
{
int c;
- c = atomic_dec_if_positive(&mm->context.copros);
-
- /* Detect imbalance between add and remove */
- WARN_ON(c < 0);
-
/*
- * Need to broadcast a global flush of the full mm before
- * decrementing active_cpus count, as the next TLBI may be
- * local and the nMMU and/or PSL need to be cleaned up.
- * Should be rare enough so that it's acceptable.
+ * When removing the last copro, we need to broadcast a global
+ * flush of the full mm, as the next TLBI may be local and the
+ * nMMU and/or PSL need to be cleaned up.
+ *
+ * Both the 'copros' and 'active_cpus' counts are looked at in
+ * flush_all_mm() to determine the scope (local/global) of the
+ * TLBIs, so we need to flush first before decrementing
+ * 'copros'. If this API is used by several callers for the
+ * same context, it can lead to over-flushing. It's hopefully
+ * not common enough to be a problem.
*
* Skip on hash, as we don't know how to do the proper flush
* for the time being. Invalidations will remain global if
- * used on hash.
+ * used on hash. Note that we can't drop 'copros' either, as
+ * it could make some invalidations local with no flush
+ * in-between.
*/
- if (c == 0 && radix_enabled()) {
+ if (radix_enabled()) {
flush_all_mm(mm);
- dec_mm_active_cpus(mm);
+
+ c = atomic_dec_if_positive(&mm->context.copros);
+ /* Detect imbalance between add and remove */
+ WARN_ON(c < 0);
+
+ if (c == 0)
+ dec_mm_active_cpus(mm);
}
}
#else