summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-06-22 13:20:32 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-07-08 16:21:54 -0400
commit985ab2780164698ec6e7d73fad523d50449261dd (patch)
tree5492cd905af56f37b0be0e2299875d0401a1ae64 /arch
parent6ca9a6f3adef955e004123069e15ecffa462e823 (diff)
downloadlinux-985ab2780164698ec6e7d73fad523d50449261dd.tar.bz2
KVM: x86/mmu: Make kvm_mmu_page definition and accessor internal-only
Make 'struct kvm_mmu_page' MMU-only, nothing outside of the MMU should be poking into the gory details of shadow pages. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200622202034.15093-5-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h46
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h48
2 files changed, 50 insertions, 44 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2357763bf7f2..97cb005c7aa7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -322,43 +322,6 @@ struct kvm_rmap_head {
unsigned long val;
};
-struct kvm_mmu_page {
- struct list_head link;
- struct hlist_node hash_link;
- struct list_head lpage_disallowed_link;
-
- bool unsync;
- u8 mmu_valid_gen;
- bool mmio_cached;
- bool lpage_disallowed; /* Can't be replaced by an equiv large page */
-
- /*
- * The following two entries are used to key the shadow page in the
- * hash table.
- */
- union kvm_mmu_page_role role;
- gfn_t gfn;
-
- u64 *spt;
- /* hold the gfn of each spte inside spt */
- gfn_t *gfns;
- int root_count; /* Currently serving as active root */
- unsigned int unsync_children;
- struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
- DECLARE_BITMAP(unsync_child_bitmap, 512);
-
-#ifdef CONFIG_X86_32
- /*
- * Used out of the mmu-lock to avoid reading spte values while an
- * update is in progress; see the comments in __get_spte_lockless().
- */
- int clear_spte_count;
-#endif
-
- /* Number of writes since the last time traversal visited this page. */
- atomic_t write_flooding_count;
-};
-
struct kvm_pio_request {
unsigned long linear_rip;
unsigned long count;
@@ -384,6 +347,8 @@ struct kvm_mmu_root_info {
#define KVM_MMU_NUM_PREV_ROOTS 3
+struct kvm_mmu_page;
+
/*
* x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
* and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
@@ -1560,13 +1525,6 @@ static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
return gpa;
}
-static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
-{
- struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
-
- return (struct kvm_mmu_page *)page_private(page);
-}
-
static inline u16 kvm_read_ldt(void)
{
u16 ldt;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d7938c37c7de..8afa60f0a1a5 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -2,6 +2,54 @@
#ifndef __KVM_X86_MMU_INTERNAL_H
#define __KVM_X86_MMU_INTERNAL_H
+#include <linux/types.h>
+
+#include <asm/kvm_host.h>
+
+struct kvm_mmu_page {
+ struct list_head link;
+ struct hlist_node hash_link;
+ struct list_head lpage_disallowed_link;
+
+ bool unsync;
+ u8 mmu_valid_gen;
+ bool mmio_cached;
+ bool lpage_disallowed; /* Can't be replaced by an equiv large page */
+
+ /*
+ * The following two entries are used to key the shadow page in the
+ * hash table.
+ */
+ union kvm_mmu_page_role role;
+ gfn_t gfn;
+
+ u64 *spt;
+ /* hold the gfn of each spte inside spt */
+ gfn_t *gfns;
+ int root_count; /* Currently serving as active root */
+ unsigned int unsync_children;
+ struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
+ DECLARE_BITMAP(unsync_child_bitmap, 512);
+
+#ifdef CONFIG_X86_32
+ /*
+ * Used out of the mmu-lock to avoid reading spte values while an
+ * update is in progress; see the comments in __get_spte_lockless().
+ */
+ int clear_spte_count;
+#endif
+
+ /* Number of writes since the last time traversal visited this page. */
+ atomic_t write_flooding_count;
+};
+
+static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
+{
+ struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
+
+ return (struct kvm_mmu_page *)page_private(page);
+}
+
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,