summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu/mmu_internal.h
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-11-17 18:08:42 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2021-12-08 04:24:41 -0500
commitce92ef7604ffe74da84f559f6eba8c6053250451 (patch)
tree0e9cfe20eb6172341195c76f57b201ecbdeb3ba9 /arch/x86/kvm/mmu/mmu_internal.h
parent8fc78909c05d1691c0d087cb1b9a4858762c747d (diff)
downloadlinux-ce92ef7604ffe74da84f559f6eba8c6053250451.tar.bz2
KVM: x86/mmu: Use shadow page role to detect PML-unfriendly pages for L2
Rework make_spte() to query the shadow page's role, specifically whether or not it's a guest_mode page, a.k.a. a page for L2, when determining if the SPTE is compatible with PML. This eliminates a dependency on @vcpu, with a future goal of being able to create SPTEs without a specific vCPU. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu_internal.h')
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h7
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 52c6527b1a06..5897ce4cdf10 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -104,7 +104,7 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
return kvm_mmu_role_as_id(sp->role);
}
-static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
+static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
{
/*
* When using the EPT page-modification log, the GPAs in the CPU dirty
@@ -112,10 +112,9 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
* on write protection to record dirty pages, which bypasses PML, since
* writes now result in a vmexit. Note, the check on CPU dirty logging
* being enabled is mandatory as the bits used to denote WP-only SPTEs
- * are reserved for NPT w/ PAE (32-bit KVM).
+ * are reserved for PAE paging (32-bit KVM).
*/
- return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
- kvm_x86_ops.cpu_dirty_log_size;
+ return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
}
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,