summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-01-08 12:24:47 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2020-01-27 20:00:09 +0100
commit2c0629f4b95cf5adf5b6f78f7d318df894b5f9a1 (patch)
tree2cee3961016c84c322f365ebdcd60cb555124306 /arch
parent293e306e7faac4eafaefb9518a1cd6eaecad88e9 (diff)
downloadlinux-2c0629f4b95cf5adf5b6f78f7d318df894b5f9a1.tar.bz2
KVM: x86/mmu: Remove lpage_is_disallowed() check from set_spte()
Remove the late "lpage is disallowed" check from set_spte() now that the initial check is performed after acquiring mmu_lock. Fold the guts of the remaining helper, __mmu_gfn_lpage_is_disallowed(), into kvm_mmu_hugepage_adjust() to eliminate the unnecessary slot !NULL check. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu/mmu.c39
1 files changed, 3 insertions, 36 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 812c69f7f552..a9e6683c802b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1264,28 +1264,6 @@ static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
list_del(&sp->lpage_disallowed_link);
}
-static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
- struct kvm_memory_slot *slot)
-{
- struct kvm_lpage_info *linfo;
-
- if (slot) {
- linfo = lpage_info_slot(gfn, slot, level);
- return !!linfo->disallow_lpage;
- }
-
- return true;
-}
-
-static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
- int level)
-{
- struct kvm_memory_slot *slot;
-
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
-}
-
static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
bool no_dirty_log)
{
@@ -3078,18 +3056,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= (u64)pfn << PAGE_SHIFT;
if (pte_access & ACC_WRITE_MASK) {
-
- /*
- * Legacy code to handle an obsolete scenario where a different
- * vcpu creates new sp in the window between this vcpu's query
- * of lpage_is_disallowed() and acquiring mmu_lock. No longer
- * necessary now that lpage_is_disallowed() is called after
- * acquiring mmu_lock.
- */
- if (level > PT_PAGE_TABLE_LEVEL &&
- mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
- goto done;
-
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
/*
@@ -3121,7 +3087,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
set_pte:
if (mmu_spte_update(sptep, spte))
ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
-done:
return ret;
}
@@ -3309,6 +3274,7 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp)
{
struct kvm_memory_slot *slot;
+ struct kvm_lpage_info *linfo;
kvm_pfn_t pfn = *pfnp;
kvm_pfn_t mask;
int level;
@@ -3326,7 +3292,8 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
max_level = min(max_level, kvm_x86_ops->get_lpage_level());
for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
- if (!__mmu_gfn_lpage_is_disallowed(gfn, max_level, slot))
+ linfo = lpage_info_slot(gfn, slot, max_level);
+ if (!linfo->disallow_lpage)
break;
}