diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2020-06-22 13:20:32 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-07-08 16:21:54 -0400 |
commit | 985ab2780164698ec6e7d73fad523d50449261dd (patch) | |
tree | 5492cd905af56f37b0be0e2299875d0401a1ae64 /arch/x86/kvm/mmu/mmu_internal.h | |
parent | 6ca9a6f3adef955e004123069e15ecffa462e823 (diff) | |
download | linux-985ab2780164698ec6e7d73fad523d50449261dd.tar.bz2 |
KVM: x86/mmu: Make kvm_mmu_page definition and accessor internal-only
Make 'struct kvm_mmu_page' MMU-only, nothing outside of the MMU should
be poking into the gory details of shadow pages.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200622202034.15093-5-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu_internal.h')
-rw-r--r-- | arch/x86/kvm/mmu/mmu_internal.h | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index d7938c37c7de..8afa60f0a1a5 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -2,6 +2,54 @@ #ifndef __KVM_X86_MMU_INTERNAL_H #define __KVM_X86_MMU_INTERNAL_H +#include <linux/types.h> + +#include <asm/kvm_host.h> + +struct kvm_mmu_page { + struct list_head link; + struct hlist_node hash_link; + struct list_head lpage_disallowed_link; + + bool unsync; + u8 mmu_valid_gen; + bool mmio_cached; + bool lpage_disallowed; /* Can't be replaced by an equiv large page */ + + /* + * The following two entries are used to key the shadow page in the + * hash table. + */ + union kvm_mmu_page_role role; + gfn_t gfn; + + u64 *spt; + /* hold the gfn of each spte inside spt */ + gfn_t *gfns; + int root_count; /* Currently serving as active root */ + unsigned int unsync_children; + struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ + DECLARE_BITMAP(unsync_child_bitmap, 512); + +#ifdef CONFIG_X86_32 + /* + * Used out of the mmu-lock to avoid reading spte values while an + * update is in progress; see the comments in __get_spte_lockless(). + */ + int clear_spte_count; +#endif + + /* Number of writes since the last time traversal visited this page. */ + atomic_t write_flooding_count; +}; + +static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) +{ + struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); + + return (struct kvm_mmu_page *)page_private(page); +} + void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, |