summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2022-11-01 15:53:55 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2022-11-18 12:59:10 -0500
commit53ca765a041d5a24650d3f01bced791be5d72df7 (patch)
tree32275d47843885562cf06fd7f99e46b2622d5b1a
parentb6c2c22fa7012616b3039c9f559bf01195137b9d (diff)
downloadlinux-53ca765a041d5a24650d3f01bced791be5d72df7.tar.bz2
KVM: x86: hyper-v: Create a separate fifo for L2 TLB flush
To handle L2 TLB flush requests, KVM needs to use a separate fifo from regular (L1) Hyper-V TLB flush requests: e.g. when a request to flush something in L2 is made, the target vCPU can transition from L2 to L1, receive a request to flush a GVA for L1 and then try to enter L2 back. The first request needs to be processed at this point. Similarly, requests to flush GVAs in L1 must wait until L2 exits to L1. No functional change as KVM doesn't handle L2 TLB flush requests from L2 yet. Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20221101145426.251680-18-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h8
-rw-r--r--arch/x86/kvm/hyperv.c11
-rw-r--r--arch/x86/kvm/hyperv.h19
3 files changed, 30 insertions, 8 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3e35dcf40dc7..89f9c98ff445 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -631,6 +631,12 @@ struct kvm_vcpu_hv_synic {
*/
#define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1)
+enum hv_tlb_flush_fifos {
+ HV_L1_TLB_FLUSH_FIFO,
+ HV_L2_TLB_FLUSH_FIFO,
+ HV_NR_TLB_FLUSH_FIFOS,
+};
+
struct kvm_vcpu_hv_tlb_flush_fifo {
spinlock_t write_lock;
DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE);
@@ -658,7 +664,7 @@ struct kvm_vcpu_hv {
u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */
} cpuid_cache;
- struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo;
+ struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS];
};
/* Xen HVM per vcpu emulation context */
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 0bfa59838e0a..989846310303 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -956,8 +956,10 @@ int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
hv_vcpu->vp_index = vcpu->vcpu_idx;
- INIT_KFIFO(hv_vcpu->tlb_flush_fifo.entries);
- spin_lock_init(&hv_vcpu->tlb_flush_fifo.write_lock);
+ for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) {
+ INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries);
+ spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock);
+ }
return 0;
}
@@ -1839,7 +1841,8 @@ static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, u64 *entries, int count)
if (!hv_vcpu)
return;
- tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
+ /* kvm_hv_flush_tlb() is not ready to handle requests for L2s yet */
+ tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo[HV_L1_TLB_FLUSH_FIFO];
spin_lock(&tlb_flush_fifo->write_lock);
@@ -1874,7 +1877,7 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
if (!tdp_enabled || !hv_vcpu)
return -EINVAL;
- tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
+ tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index f79edf9234cd..8942e8c6c912 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -22,6 +22,7 @@
#define __ARCH_X86_KVM_HYPERV_H__
#include <linux/kvm_host.h>
+#include "x86.h"
/* "Hv#1" signature */
#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
@@ -151,15 +152,27 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
+static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu,
+ bool is_guest_mode)
+{
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO :
+ HV_L1_TLB_FLUSH_FIFO;
+
+ /* KVM does not handle L2 TLB flush requests yet */
+ WARN_ON_ONCE(i != HV_L1_TLB_FLUSH_FIFO);
+
+ return &hv_vcpu->tlb_flush_fifo[i];
+}
+
static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
- struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
- if (!hv_vcpu || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
+ if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
return;
- tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
+ tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
kfifo_reset_out(&tlb_flush_fifo->entries);
}