summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2021-02-06 09:53:33 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2021-02-08 14:49:01 -0500
commit897218ff7cf19290ec2d69652ce673d8ed6fedeb (patch)
treee2901b8ab7865dc22205fdd57c173f72279aad19 /arch/x86/kvm
parente36b250e502a6145fa95a3d93af83ba66fc6ae1b (diff)
downloadlinux-897218ff7cf19290ec2d69652ce673d8ed6fedeb.tar.bz2
KVM: x86: compile out TDP MMU on 32-bit systems
The TDP MMU assumes that it can do atomic accesses to 64-bit PTEs. Rather than just disabling it, compile it out completely so that it is possible to use for example 64-bit xchg. To limit the number of stubs, wrap all accesses to tdp_mmu_enabled or tdp_mmu_page with a function. Calls to all other functions in tdp_mmu.c are eliminated and do not even reach the linker. Reviewed-by: Sean Christopherson <seanjc@google.com> Tested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/Makefile3
-rw-r--r--arch/x86/kvm/mmu/mmu.c36
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h2
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c29
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h32
5 files changed, 51 insertions, 51 deletions
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index a50041235530..aeab168c5711 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -17,7 +17,8 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o xen.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
- mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o
+ mmu/spte.o
+kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 86af58294272..bebb66c6d068 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1225,7 +1225,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
{
struct kvm_rmap_head *rmap_head;
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);
while (mask) {
@@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
{
struct kvm_rmap_head *rmap_head;
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);
while (mask) {
@@ -1310,7 +1310,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
}
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
write_protected |=
kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
@@ -1522,7 +1522,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
return r;
@@ -1534,7 +1534,7 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
return r;
@@ -1589,7 +1589,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
int young = false;
young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
return young;
@@ -1600,7 +1600,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
int young = false;
young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
return young;
@@ -3155,7 +3155,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
if (kvm_mmu_put_root(kvm, sp)) {
- if (sp->tdp_mmu_page)
+ if (is_tdp_mmu_page(sp))
kvm_tdp_mmu_free_root(kvm, sp);
else if (sp->role.invalid)
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
@@ -3249,7 +3249,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
hpa_t root;
unsigned i;
- if (vcpu->kvm->arch.tdp_mmu_enabled) {
+ if (is_tdp_mmu_enabled(vcpu->kvm)) {
root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
if (!VALID_PAGE(root))
@@ -5411,7 +5411,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
kvm_zap_obsolete_pages(kvm);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_zap_all(kvm);
write_unlock(&kvm->mmu_lock);
@@ -5474,7 +5474,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
}
}
- if (kvm->arch.tdp_mmu_enabled) {
+ if (is_tdp_mmu_enabled(kvm)) {
flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
if (flush)
kvm_flush_remote_tlbs(kvm);
@@ -5498,7 +5498,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
write_lock(&kvm->mmu_lock);
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
write_unlock(&kvm->mmu_lock);
@@ -5564,7 +5564,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
kvm_mmu_zap_collapsible_spte, true);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
write_unlock(&kvm->mmu_lock);
}
@@ -5591,7 +5591,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
write_unlock(&kvm->mmu_lock);
@@ -5614,7 +5614,7 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
write_lock(&kvm->mmu_lock);
flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
false);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
write_unlock(&kvm->mmu_lock);
@@ -5630,7 +5630,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
write_lock(&kvm->mmu_lock);
flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
write_unlock(&kvm->mmu_lock);
@@ -5658,7 +5658,7 @@ restart:
kvm_mmu_commit_zap_page(kvm, &invalid_list);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_zap_all(kvm);
write_unlock(&kvm->mmu_lock);
@@ -5969,7 +5969,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct kvm_mmu_page,
lpage_disallowed_link);
WARN_ON_ONCE(!sp->lpage_disallowed);
- if (sp->tdp_mmu_page) {
+ if (is_tdp_mmu_page(sp)) {
kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
} else {
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 98db78a26957..9e38d3c5daad 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -56,10 +56,12 @@ struct kvm_mmu_page {
/* Number of writes since the last time traversal visited this page. */
atomic_t write_flooding_count;
+#ifdef CONFIG_X86_64
bool tdp_mmu_page;
/* Used for freeing the page asyncronously if it is a TDP MMU page. */
struct rcu_head rcu_head;
+#endif
};
extern struct kmem_cache *mmu_page_header_cache;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index aa0845d5e1e4..ec99e50a7ba7 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -10,24 +10,13 @@
#include <asm/cmpxchg.h>
#include <trace/events/kvm.h>
-#ifdef CONFIG_X86_64
static bool __read_mostly tdp_mmu_enabled = false;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
-#endif
-
-static bool is_tdp_mmu_enabled(void)
-{
-#ifdef CONFIG_X86_64
- return tdp_enabled && READ_ONCE(tdp_mmu_enabled);
-#else
- return false;
-#endif /* CONFIG_X86_64 */
-}
/* Initializes the TDP MMU for the VM, if enabled. */
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
- if (!is_tdp_mmu_enabled())
+ if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
return;
/* This should not be changed for the lifetime of the VM. */
@@ -96,22 +85,6 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
-bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
-{
- struct kvm_mmu_page *sp;
-
- if (!kvm->arch.tdp_mmu_enabled)
- return false;
- if (WARN_ON(!VALID_PAGE(hpa)))
- return false;
-
- sp = to_shadow_page(hpa);
- if (WARN_ON(!sp))
- return false;
-
- return sp->tdp_mmu_page && sp->root_count;
-}
-
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index cbbdbadd1526..b4b65e3699b3 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -5,10 +5,6 @@
#include <linux/kvm_host.h>
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
-void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
-
-bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
@@ -47,4 +43,32 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
+#ifdef CONFIG_X86_64
+void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
+#else
+static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
+static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+#endif
+
+static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+{
+ struct kvm_mmu_page *sp;
+
+ if (!is_tdp_mmu_enabled(kvm))
+ return false;
+ if (WARN_ON(!VALID_PAGE(hpa)))
+ return false;
+
+ sp = to_shadow_page(hpa);
+ if (WARN_ON(!sp))
+ return false;
+
+ return is_tdp_mmu_page(sp) && sp->root_count;
+}
+
#endif /* __KVM_X86_MMU_TDP_MMU_H */