summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_builtin.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-11-23 19:52:20 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-11-24 21:09:01 +1100
commit0ba0e5d5a691806cca3d4f290dcc61f656049872 (patch)
treef9c40cba8fcebd5c2d0eae25d25fb708719e3d9e /arch/powerpc/kvm/book3s_hv_builtin.c
parenta089a6869e7f613a8d961ac65bafd127317e4c5c (diff)
downloadlinux-0ba0e5d5a691806cca3d4f290dcc61f656049872.tar.bz2
KVM: PPC: Book3S HV: Split P8 from P9 path guest vCPU TLB flushing
This creates separate functions for old and new paths for vCPU TLB flushing, which will reduce complexity of the next change. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211123095231.1036501-43-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_builtin.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c53
1 files changed, 8 insertions, 45 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 70b7a8f97153..ad70756a777c 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -682,60 +682,23 @@ static void flush_guest_tlb(struct kvm *kvm)
unsigned long rb, set;
rb = PPC_BIT(52); /* IS = 2 */
- if (kvm_is_radix(kvm)) {
- /* R=1 PRS=1 RIC=2 */
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+ /* R=0 PRS=0 RIC=0 */
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (1), "i" (1), "i" (2),
+ : : "r" (rb), "i" (0), "i" (0), "i" (0),
"r" (0) : "memory");
- for (set = 1; set < kvm->arch.tlb_sets; ++set) {
- rb += PPC_BIT(51); /* increment set number */
- /* R=1 PRS=1 RIC=0 */
- asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (1), "i" (1), "i" (0),
- "r" (0) : "memory");
- }
- asm volatile("ptesync": : :"memory");
- // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
- asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
- } else {
- for (set = 0; set < kvm->arch.tlb_sets; ++set) {
- /* R=0 PRS=0 RIC=0 */
- asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (0), "i" (0), "i" (0),
- "r" (0) : "memory");
- rb += PPC_BIT(51); /* increment set number */
- }
- asm volatile("ptesync": : :"memory");
- // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
+ rb += PPC_BIT(51); /* increment set number */
}
+ asm volatile("ptesync": : :"memory");
}
-void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
- struct kvm_nested_guest *nested)
+void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu)
{
- cpumask_t *need_tlb_flush;
-
- /*
- * On POWER9, individual threads can come in here, but the
- * TLB is shared between the 4 threads in a core, hence
- * invalidating on one thread invalidates for all.
- * Thus we make all 4 threads use the same bit.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu = cpu_first_tlb_thread_sibling(pcpu);
-
- if (nested)
- need_tlb_flush = &nested->need_tlb_flush;
- else
- need_tlb_flush = &kvm->arch.need_tlb_flush;
-
- if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
+ if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) {
flush_guest_tlb(kvm);
/* Clear the bit after the TLB flush */
- cpumask_clear_cpu(pcpu, need_tlb_flush);
+ cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush);
}
}
EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);