summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-12-06 15:57:20 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2020-01-08 18:16:05 +0100
commit39ca1ecb784b29965fd780bed1e8a3792a086a29 (patch)
tree00bee61425fe3efee029dfb95c1370fe0e180c1e
parentf0f37e229c0517fa0d8bda73a2aeee28260370a2 (diff)
downloadlinux-39ca1ecb784b29965fd780bed1e8a3792a086a29.tar.bz2
KVM: x86/mmu: Refactor handling of forced 4k pages in page faults
Refactor the page fault handlers and mapping_level() to track the max allowed page level instead of only tracking if a 4k page is mandatory due to one restriction or another. This paves the way for cleanly consolidating tdp_page_fault() and nonpaging_page_fault(), and for eliminating a redundant check on mmu_gfn_lpage_is_disallowed(). No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu/mmu.c45
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h16
2 files changed, 29 insertions, 32 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8db2bb050809..daf41806243f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1328,18 +1328,19 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
}
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
- bool *force_pt_level)
+ int *max_levelp)
{
- int host_level, max_level;
+ int host_level, max_level = *max_levelp;
struct kvm_memory_slot *slot;
- if (unlikely(*force_pt_level))
+ if (unlikely(max_level == PT_PAGE_TABLE_LEVEL))
return PT_PAGE_TABLE_LEVEL;
slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
- *force_pt_level = !memslot_valid_for_gpte(slot, true);
- if (unlikely(*force_pt_level))
+ if (!memslot_valid_for_gpte(slot, true)) {
+ *max_levelp = PT_PAGE_TABLE_LEVEL;
return PT_PAGE_TABLE_LEVEL;
+ }
host_level = host_mapping_level(vcpu->kvm, large_gfn);
@@ -4173,9 +4174,10 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
unsigned long mmu_seq;
gfn_t gfn = gpa >> PAGE_SHIFT;
bool write = error_code & PFERR_WRITE_MASK;
- bool force_pt_level, map_writable;
+ bool map_writable;
bool exec = error_code & PFERR_FETCH_MASK;
bool lpage_disallowed = exec && is_nx_huge_page_enabled();
+ int max_level;
/* Note, paging is disabled, ergo gva == gpa. */
pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
@@ -4191,19 +4193,12 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
- force_pt_level = lpage_disallowed;
- level = mapping_level(vcpu, gfn, &force_pt_level);
- if (likely(!force_pt_level)) {
- /*
- * This path builds a PAE pagetable - so we can map
- * 2mb pages at maximum. Therefore check if the level
- * is larger than that.
- */
- if (level > PT_DIRECTORY_LEVEL)
- level = PT_DIRECTORY_LEVEL;
+ /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
+ max_level = lpage_disallowed ? PT_PAGE_TABLE_LEVEL : PT_DIRECTORY_LEVEL;
+ level = mapping_level(vcpu, gfn, &max_level);
+ if (level > PT_PAGE_TABLE_LEVEL)
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- }
if (fast_page_fault(vcpu, gpa, level, error_code))
return RET_PF_RETRY;
@@ -4223,7 +4218,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
goto out_unlock;
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
- if (likely(!force_pt_level))
+ if (likely(max_level > PT_PAGE_TABLE_LEVEL))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
prefault, false);
@@ -4277,7 +4272,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
kvm_pfn_t pfn;
int r;
int level;
- bool force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
@@ -4305,13 +4299,12 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
break;
}
- force_pt_level = lpage_disallowed || max_level == PT_PAGE_TABLE_LEVEL;
- level = mapping_level(vcpu, gfn, &force_pt_level);
- if (likely(!force_pt_level)) {
- if (level > max_level)
- level = max_level;
+ if (lpage_disallowed)
+ max_level = PT_PAGE_TABLE_LEVEL;
+
+ level = mapping_level(vcpu, gfn, &max_level);
+ if (level > PT_PAGE_TABLE_LEVEL)
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- }
if (fast_page_fault(vcpu, gpa, level, error_code))
return RET_PF_RETRY;
@@ -4331,7 +4324,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
goto out_unlock;
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
- if (likely(!force_pt_level))
+ if (likely(max_level > PT_PAGE_TABLE_LEVEL))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
prefault, lpage_disallowed);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index c1d7b866a03f..1938a6e4e631 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -778,7 +778,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
bool map_writable, is_self_change_mapping;
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
is_nx_huge_page_enabled();
- bool force_pt_level = lpage_disallowed;
+ int max_level;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -818,14 +818,18 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
+ max_level = lpage_disallowed ? PT_PAGE_TABLE_LEVEL :
+ PT_MAX_HUGEPAGE_LEVEL;
+
if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
- level = mapping_level(vcpu, walker.gfn, &force_pt_level);
- if (likely(!force_pt_level)) {
+ level = mapping_level(vcpu, walker.gfn, &max_level);
+ if (likely(max_level > PT_DIRECTORY_LEVEL)) {
level = min(walker.level, level);
walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
- } else
- force_pt_level = true;
+ } else {
+ max_level = PT_PAGE_TABLE_LEVEL;
+ }
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
@@ -865,7 +869,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
- if (!force_pt_level)
+ if (max_level > PT_PAGE_TABLE_LEVEL)
transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
level, pfn, map_writable, prefault, lpage_disallowed);