diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2015-07-07 15:03:18 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-07-10 13:25:24 +0200 |
commit | d1fe9219551e914f26219afaca1063b280f25963 (patch) | |
tree | f8d2e9309f11a47dcdb76bbd93065bc52ddafb6e /arch/x86/kvm | |
parent | 5d75a747596be046546eeb9d6ba39a3af851a1af (diff) | |
download | linux-d1fe9219551e914f26219afaca1063b280f25963.tar.bz2 |
KVM: x86: reintroduce kvm_is_mmio_pfn
The call to get_mt_mask was really using kvm_is_reserved_pfn to
detect an MMIO-backed page. In this case, we want "false" to be
returned for the zero page.
Reintroduce a separate kvm_is_mmio_pfn predicate for this use
only.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f807496b62c2..44171462bd2a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, return 0; } +static bool kvm_is_mmio_pfn(pfn_t pfn) +{ + if (pfn_valid(pfn)) + return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); + + return true; +} + static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int level, gfn_t gfn, pfn_t pfn, bool speculative, @@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, - kvm_is_reserved_pfn(pfn)); + kvm_is_mmio_pfn(pfn)); if (host_writable) spte |= SPTE_HOST_WRITEABLE; |