summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-01-08 15:06:02 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-01-08 15:06:02 -0800
commit2a190b22aa1149cda804527aa603db45f75439c3 (patch)
treeafbbea961c6cfc17428324c204e469411b45d597 /arch
parent3e2a590acbed38a6908a5c4df7754dcb65f6fd37 (diff)
parent872f36eb0b0f4f0e3a81ea1e51a6bdf58ccfdc6e (diff)
downloadlinux-2a190b22aa1149cda804527aa603db45f75439c3.tar.bz2
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "x86: - Fixes for the new scalable MMU - Fixes for migration of nested hypervisors on AMD - Fix for clang integrated assembler - Fix for left shift by 64 (UBSAN) - Small cleanups - Straggler SEV-ES patch ARM: - VM init cleanups - PSCI relay cleanups - Kill CONFIG_KVM_ARM_PMU - Fixup __init annotations - Fixup reg_to_encoding() - Fix spurious PMCR_EL0 access Misc: - selftests cleanups" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (38 commits) KVM: x86: __kvm_vcpu_halt can be static KVM: SVM: Add support for booting APs in an SEV-ES guest KVM: nSVM: cancel KVM_REQ_GET_NESTED_STATE_PAGES on nested vmexit KVM: nSVM: mark vmcb as dirty when forcingly leaving the guest mode KVM: nSVM: correctly restore nested_run_pending on migration KVM: x86/mmu: Clarify TDP MMU page list invariants KVM: x86/mmu: Ensure TDP MMU roots are freed after yield kvm: check tlbs_dirty directly KVM: x86: change in pv_eoi_get_pending() to make code more readable MAINTAINERS: Really update email address for Sean Christopherson KVM: x86: fix shift out of bounds reported by UBSAN KVM: selftests: Implement perf_test_util more conventionally KVM: selftests: Use vm_create_with_vcpus in create_vm KVM: selftests: Factor out guest mode code KVM/SVM: Remove leftover __svm_vcpu_run prototype from svm.c KVM: SVM: Add register operand to vmsave call in sev_es_vcpu_load KVM: x86/mmu: Optimize not-present/MMIO SPTE check in get_mmio_spte() KVM: x86/mmu: Use raw level to index into MMIO walks' sptes array KVM: x86/mmu: Get root level from walkers when retrieving MMIO SPTE KVM: x86/mmu: Use -1 to flag an undefined spte in get_mmio_spte() ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kvm/Kconfig8
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c7
-rw-r--r--arch/arm64/kvm/arm.c32
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/adjust_pc.h9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-smp.c6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c59
-rw-r--r--arch/arm64/kvm/pmu-emul.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/arm64/kvm/va_layout.c7
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c11
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c20
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c21
-rw-r--r--arch/x86/include/asm/kvm_host.h19
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c53
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c113
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h4
-rw-r--r--arch/x86/kvm/svm/nested.c8
-rw-r--r--arch/x86/kvm/svm/sev.c24
-rw-r--r--arch/x86/kvm/svm/svm.c12
-rw-r--r--arch/x86/kvm/svm/svm.h2
-rw-r--r--arch/x86/kvm/vmx/nested.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c30
30 files changed, 292 insertions, 212 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 11beda85ee7e..8fcfab0c2567 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -17,6 +17,7 @@
#include <linux/jump_label.h>
#include <linux/kvm_types.h>
#include <linux/percpu.h>
+#include <linux/psci.h>
#include <asm/arch_gicv3.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
@@ -240,6 +241,28 @@ struct kvm_host_data {
struct kvm_pmu_events pmu_events;
};
+struct kvm_host_psci_config {
+ /* PSCI version used by host. */
+ u32 version;
+
+ /* Function IDs used by host if version is v0.1. */
+ struct psci_0_1_function_ids function_ids_0_1;
+
+ bool psci_0_1_cpu_suspend_implemented;
+ bool psci_0_1_cpu_on_implemented;
+ bool psci_0_1_cpu_off_implemented;
+ bool psci_0_1_migrate_implemented;
+};
+
+extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
+#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
+
+extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
+#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
+
+extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
+#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
+
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 7ffb5f1d8b68..e99eddec0a46 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2568,7 +2568,7 @@ static void verify_hyp_capabilities(void)
int parange, ipa_max;
unsigned int safe_vmid_bits, vmid_bits;
- if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
+ if (!IS_ENABLED(CONFIG_KVM))
return;
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 376343d6f13a..ad00f99ee9b0 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
- if (IS_ENABLED(CONFIG_KVM))
+ if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
kvm_compute_layout();
}
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 043756db8f6e..3964acf5451e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -49,14 +49,6 @@ if KVM
source "virt/kvm/Kconfig"
-config KVM_ARM_PMU
- bool "Virtual Performance Monitoring Unit (PMU) support"
- depends on HW_PERF_EVENTS
- default y
- help
- Adds support for a virtual Performance Monitoring Unit (PMU) in
- virtual machines.
-
endif # KVM
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 60fd181df624..13b017284bf9 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/vgic-debug.o
-kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
+kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 32ba6fbc3814..74e0699661e9 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm))
goto no_vgic;
- if (!vgic_initialized(vcpu->kvm))
- return -ENODEV;
-
+ /*
+ * At this stage, we have the guarantee that the vgic is both
+ * available and initialized.
+ */
if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n");
return -EINVAL;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 6e637d2b4cfb..04c44853b103 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -65,10 +65,6 @@ static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
-extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-extern u32 kvm_nvhe_sym(kvm_host_psci_version);
-extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
-
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the
* first time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
- ret = kvm_vgic_map_resources(kvm);
- if (ret)
- return ret;
- }
+ ret = kvm_vgic_map_resources(kvm);
+ if (ret)
+ return ret;
} else {
/*
* Tell the rest of the code that there are userspace irqchip
@@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
.notifier_call = hyp_init_cpu_pm_notifier,
};
-static void __init hyp_cpu_pm_init(void)
+static void hyp_cpu_pm_init(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
-static void __init hyp_cpu_pm_exit(void)
+static void hyp_cpu_pm_exit(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
@@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void)
* allow any other CPUs from the `possible` set to boot.
*/
for_each_online_cpu(cpu)
- kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
+ hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
}
+#define init_psci_0_1_impl_state(config, what) \
+ config.psci_0_1_ ## what ## _implemented = psci_ops.what
+
static bool init_psci_relay(void)
{
/*
@@ -1618,8 +1615,15 @@ static bool init_psci_relay(void)
return false;
}
- kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
- kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
+ kvm_host_psci_config.version = psci_ops.get_version();
+
+ if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
+ kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
+ init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
+ }
return true;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index b1f60923a8fe..61716359035d 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
}
}
+/*
+ * Skip an instruction while host sysregs are live.
+ * Assumes host is always 64-bit.
+ */
+static inline void kvm_skip_host_instr(void)
+{
+ write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index bde658d51404..a906f9e2ff34 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
__kvm_hyp_host_forward_smc(host_ctxt);
}
-static void skip_host_instruction(void)
-{
- write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
-}
-
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
bool handled;
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
if (!handled)
default_host_smc_handler(host_ctxt);
- /*
- * Unlike HVC, the return address of an SMC is the instruction's PC.
- * Move the return address past the instruction.
- */
- skip_host_instruction();
+ /* SMC was trapped, move ELR past the current PC. */
+ kvm_skip_host_instr();
}
void handle_trap(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index cbab0c6246e2..2997aa156d8e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -14,14 +14,14 @@
* Other CPUs should not be allowed to boot because their features were
* not checked against the finalized system capabilities.
*/
-u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
u64 cpu_logical_map(unsigned int cpu)
{
- if (cpu >= ARRAY_SIZE(__cpu_logical_map))
+ if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
hyp_panic();
- return __cpu_logical_map[cpu];
+ return hyp_cpu_logical_map[cpu];
}
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 08dc9de69314..e3947846ffcb 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -7,11 +7,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
-#include <linux/psci.h>
-#include <kvm/arm_psci.h>
#include <uapi/linux/psci.h>
#include <nvhe/trap_handler.h>
@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
/* Config options set by the host. */
-__ro_after_init u32 kvm_host_psci_version;
-__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
-__ro_after_init s64 hyp_physvirt_offset;
+struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
+s64 __ro_after_init hyp_physvirt_offset;
#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
@@ -47,19 +43,16 @@ struct psci_boot_args {
static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
-static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
-{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
-
- return func_id;
-}
+#define is_psci_0_1(what, func_id) \
+ (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
+ (func_id) == kvm_host_psci_config.function_ids_0_1.what)
static bool is_psci_0_1_call(u64 func_id)
{
- return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate);
+ return (is_psci_0_1(cpu_suspend, func_id) ||
+ is_psci_0_1(cpu_on, func_id) ||
+ is_psci_0_1(cpu_off, func_id) ||
+ is_psci_0_1(migrate, func_id));
}
static bool is_psci_0_2_call(u64 func_id)
@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
(PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
}
-static bool is_psci_call(u64 func_id)
-{
- switch (kvm_host_psci_version) {
- case PSCI_VERSION(0, 1):
- return is_psci_0_1_call(func_id);
- default:
- return is_psci_0_2_call(func_id);
- }
-}
-
static unsigned long psci_call(unsigned long fn, unsigned long arg0,
unsigned long arg1, unsigned long arg2)
{
@@ -248,15 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
- if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate))
+ if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
return psci_forward(host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
+ if (is_psci_0_1(cpu_on, func_id))
return psci_cpu_on(func_id, host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
+ if (is_psci_0_1(cpu_suspend, func_id))
return psci_cpu_suspend(func_id, host_ctxt);
- else
- return PSCI_RET_NOT_SUPPORTED;
+
+ return PSCI_RET_NOT_SUPPORTED;
}
static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
{
- u64 func_id = get_psci_func_id(host_ctxt);
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;
- if (!is_psci_call(func_id))
- return false;
-
- switch (kvm_host_psci_version) {
+ switch (kvm_host_psci_config.version) {
case PSCI_VERSION(0, 1):
+ if (!is_psci_0_1_call(func_id))
+ return false;
ret = psci_0_1_handler(func_id, host_ctxt);
break;
case PSCI_VERSION(0, 2):
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_0_2_handler(func_id, host_ctxt);
break;
default:
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_1_0_handler(func_id, host_ctxt);
break;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 398f6df1bbe4..4ad66a532e38 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- kvm_pmu_vcpu_reset(vcpu);
-
return 0;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3313dedfa505..42ccc27fb684 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
+ /* No PMU available, PMCR_EL0 may UNDEF... */
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
pmcr = read_sysreg(pmcr_el0);
/*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
@@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
- (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index d8cc51bd60bf..70fcd6a12fe1 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
}
/*
- * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
*/
static void init_hyp_physvirt_offset(void)
{
- extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
u64 kern_va, hyp_va;
/* Compute the offset from the hyp VA and PA of a random symbol. */
- kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+ kern_va = (u64)lm_alias(__hyp_text_start);
hyp_va = __early_kern_hyp_va(kern_va);
- CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+ hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
}
/*
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 32e32d67a127..052917deb149 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm)
* Map the MMIO regions depending on the VGIC model exposed to the guest
* called on the first VCPU run.
* Also map the virtual CPU interface into the VM.
- * v2/v3 derivatives call vgic_init if not already done.
+ * v2 calls vgic_init() if not already done.
+ * v3 and derivatives return an error if the VGIC is not initialized.
* vgic_ready() returns true if this function has succeeded.
* @kvm: kvm struct pointer
*/
@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
+ if (likely(vgic_ready(kvm)))
+ return 0;
+
mutex_lock(&kvm->lock);
+ if (vgic_ready(kvm))
+ goto out;
+
if (!irqchip_in_kernel(kvm))
goto out;
@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (ret)
__kvm_vgic_destroy(kvm);
+ else
+ dist->ready = true;
out:
mutex_unlock(&kvm->lock);
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index ebf53a4e1296..11934c2af2f4 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
- if (vgic_ready(kvm))
- goto out;
-
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
kvm_err("VGIC CPU and dist frames overlap\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm)
ret = vgic_init(kvm);
if (ret) {
kvm_err("Unable to initialize VGIC dynamic data structures\n");
- goto out;
+ return ret;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
- goto out;
+ return ret;
}
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm)
KVM_VGIC_V2_CPU_SIZE, true);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
+ return ret;
}
}
- dist->ready = true;
-
-out:
- return ret;
+ return 0;
}
DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 9cdf39a94a63..52915b342351 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm)
int ret = 0;
int c;
- if (vgic_ready(kvm))
- goto out;
-
kvm_for_each_vcpu(c, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
kvm_debug("vcpu %d redistributor base not set\n", c);
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
}
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
kvm_err("Need to set vgic distributor addresses first\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (!vgic_v3_check_base(kvm)) {
kvm_err("VGIC redist and dist frames overlap\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm)
* the VGIC before we need to use it.
*/
if (!vgic_initialized(kvm)) {
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
- goto out;
+ return ret;
}
if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm);
- dist->ready = true;
-out:
- return ret;
+ return 0;
}
DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3ab7b46087b7..3d6616f6f6ef 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1010,9 +1010,21 @@ struct kvm_arch {
*/
bool tdp_mmu_enabled;
- /* List of struct tdp_mmu_pages being used as roots */
+ /*
+ * List of struct kvmp_mmu_pages being used as roots.
+ * All struct kvm_mmu_pages in the list should have
+ * tdp_mmu_page set.
+ * All struct kvm_mmu_pages in the list should have a positive
+ * root_count except when a thread holds the MMU lock and is removing
+ * an entry from the list.
+ */
struct list_head tdp_mmu_roots;
- /* List of struct tdp_mmu_pages not being used as roots */
+
+ /*
+ * List of struct kvmp_mmu_pages not being used as roots.
+ * All struct kvm_mmu_pages in the list should have
+ * tdp_mmu_page set and a root_count of 0.
+ */
struct list_head tdp_mmu_pages;
};
@@ -1287,6 +1299,8 @@ struct kvm_x86_ops {
void (*migrate_timers)(struct kvm_vcpu *vcpu);
void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
+
+ void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
};
struct kvm_x86_nested_ops {
@@ -1468,6 +1482,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3136e05831cf..43cceadd073e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -674,7 +674,7 @@ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
(unsigned long long)vcpu->arch.pv_eoi.msr_val);
return false;
}
- return val & 0x1;
+ return val & KVM_PV_EOI_ENABLED;
}
static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
@@ -2898,7 +2898,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
/* evaluate pending_events before reading the vector */
smp_rmb();
sipi_vector = apic->sipi_vector;
- kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
+ kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 9c4a9c8e43d9..581925e476d6 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -49,7 +49,7 @@ static inline u64 rsvd_bits(int s, int e)
if (e < s)
return 0;
- return ((1ULL << (e - s + 1)) - 1) << s;
+ return ((2ULL << (e - s)) - 1) << s;
}
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c478904af518..6d16481aa29d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3493,26 +3493,25 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*/
-static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
+static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{
struct kvm_shadow_walk_iterator iterator;
- int leaf = vcpu->arch.mmu->root_level;
+ int leaf = -1;
u64 spte;
-
walk_shadow_page_lockless_begin(vcpu);
- for (shadow_walk_init(&iterator, vcpu, addr);
+ for (shadow_walk_init(&iterator, vcpu, addr),
+ *root_level = iterator.level;
shadow_walk_okay(&iterator);
__shadow_walk_next(&iterator, spte)) {
leaf = iterator.level;
spte = mmu_spte_get_lockless(iterator.sptep);
- sptes[leaf - 1] = spte;
+ sptes[leaf] = spte;
if (!is_shadow_present_pte(spte))
break;
-
}
walk_shadow_page_lockless_end(vcpu);
@@ -3520,14 +3519,12 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
return leaf;
}
-/* return true if reserved bit is detected on spte. */
+/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
- u64 sptes[PT64_ROOT_MAX_LEVEL];
+ u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
struct rsvd_bits_validate *rsvd_check;
- int root = vcpu->arch.mmu->shadow_root_level;
- int leaf;
- int level;
+ int root, leaf, level;
bool reserved = false;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
@@ -3536,35 +3533,45 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
}
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
- leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes);
+ leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
- leaf = get_walk(vcpu, addr, sptes);
+ leaf = get_walk(vcpu, addr, sptes, &root);
+
+ if (unlikely(leaf < 0)) {
+ *sptep = 0ull;
+ return reserved;
+ }
+
+ *sptep = sptes[leaf];
+
+ /*
+ * Skip reserved bits checks on the terminal leaf if it's not a valid
+ * SPTE. Note, this also (intentionally) skips MMIO SPTEs, which, by
+ * design, always have reserved bits set. The purpose of the checks is
+ * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
+ */
+ if (!is_shadow_present_pte(sptes[leaf]))
+ leaf++;
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
- for (level = root; level >= leaf; level--) {
- if (!is_shadow_present_pte(sptes[level - 1]))
- break;
+ for (level = root; level >= leaf; level--)
/*
* Use a bitwise-OR instead of a logical-OR to aggregate the
* reserved bit and EPT's invalid memtype/XWR checks to avoid
* adding a Jcc in the loop.
*/
- reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) |
- __is_rsvd_bits_set(rsvd_check, sptes[level - 1],
- level);
- }
+ reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
+ __is_rsvd_bits_set(rsvd_check, sptes[level], level);
if (reserved) {
pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
__func__, addr);
for (level = root; level >= leaf; level--)
pr_err("------ spte 0x%llx level %d.\n",
- sptes[level - 1], level);
+ sptes[level], level);
}
- *sptep = sptes[leaf - 1];
-
return reserved;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 4bd2f1dc0172..2ef8615f9dba 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -44,7 +44,48 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
}
-#define for_each_tdp_mmu_root(_kvm, _root) \
+static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+ if (kvm_mmu_put_root(kvm, root))
+ kvm_tdp_mmu_free_root(kvm, root);
+}
+
+static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
+ struct kvm_mmu_page *root)
+{
+ lockdep_assert_held(&kvm->mmu_lock);
+
+ if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
+ return false;
+
+ kvm_mmu_get_root(kvm, root);
+ return true;
+
+}
+
+static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+ struct kvm_mmu_page *root)
+{
+ struct kvm_mmu_page *next_root;
+
+ next_root = list_next_entry(root, link);
+ tdp_mmu_put_root(kvm, root);
+ return next_root;
+}
+
+/*
+ * Note: this iterator gets and puts references to the roots it iterates over.
+ * This makes it safe to release the MMU lock and yield within the loop, but
+ * if exiting the loop early, the caller must drop the reference to the most
+ * recent root. (Unless keeping a live reference is desirable.)
+ */
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
+ for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \
+ typeof(*_root), link); \
+ tdp_mmu_next_root_valid(_kvm, _root); \
+ _root = tdp_mmu_next_root(_kvm, _root))
+
+#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
@@ -447,18 +488,9 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
struct kvm_mmu_page *root;
bool flush = false;
- for_each_tdp_mmu_root(kvm, root) {
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
+ for_each_tdp_mmu_root_yield_safe(kvm, root)
flush |= zap_gfn_range(kvm, root, start, end, true);
- kvm_mmu_put_root(kvm, root);
- }
-
return flush;
}
@@ -619,13 +651,7 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
int ret = 0;
int as_id;
- for_each_tdp_mmu_root(kvm, root) {
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
as_id = kvm_mmu_page_as_id(root);
slots = __kvm_memslots(kvm, as_id);
kvm_for_each_memslot(memslot, slots) {
@@ -647,8 +673,6 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
ret |= handler(kvm, memslot, root, gfn_start,
gfn_end, data);
}
-
- kvm_mmu_put_root(kvm, root);
}
return ret;
@@ -838,21 +862,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
int root_as_id;
bool spte_set = false;
- for_each_tdp_mmu_root(kvm, root) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
-
- kvm_mmu_put_root(kvm, root);
}
return spte_set;
@@ -906,21 +922,13 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
int root_as_id;
bool spte_set = false;
- for_each_tdp_mmu_root(kvm, root) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
-
- kvm_mmu_put_root(kvm, root);
}
return spte_set;
@@ -1029,21 +1037,13 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
int root_as_id;
bool spte_set = false;
- for_each_tdp_mmu_root(kvm, root) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
-
- kvm_mmu_put_root(kvm, root);
}
return spte_set;
}
@@ -1089,21 +1089,13 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_mmu_page *root;
int root_as_id;
- for_each_tdp_mmu_root(kvm, root) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
zap_collapsible_spte_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
-
- kvm_mmu_put_root(kvm, root);
}
}
@@ -1160,16 +1152,19 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*/
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+ int *root_level)
{
struct tdp_iter iter;
struct kvm_mmu *mmu = vcpu->arch.mmu;
- int leaf = vcpu->arch.mmu->shadow_root_level;
gfn_t gfn = addr >> PAGE_SHIFT;
+ int leaf = -1;
+
+ *root_level = vcpu->arch.mmu->shadow_root_level;
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
leaf = iter.level;
- sptes[leaf - 1] = iter.old_spte;
+ sptes[leaf] = iter.old_spte;
}
return leaf;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 556e065503f6..cbbdbadd1526 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -44,5 +44,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes);
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+ int *root_level);
+
#endif /* __KVM_X86_MMU_TDP_MMU_H */
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index b0b667456b2e..cb4c6ee10029 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -199,6 +199,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
@@ -595,6 +596,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->nested.vmcb12_gpa = 0;
WARN_ON_ONCE(svm->nested.nested_run_pending);
+ kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
+
/* in case we halted in L2 */
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -754,6 +757,7 @@ void svm_leave_nested(struct vcpu_svm *svm)
leave_guest_mode(&svm->vcpu);
copy_vmcb_control_area(&vmcb->control, &hsave->control);
nested_svm_uninit_mmu_context(&svm->vcpu);
+ vmcb_mark_all_dirty(svm->vmcb);
}
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
@@ -1194,6 +1198,10 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
* in the registers, the save area of the nested state instead
* contains saved L1 state.
*/
+
+ svm->nested.nested_run_pending =
+ !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
hsave->save = *save;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 9858d5ae9ddd..c8ffdbc81709 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1563,6 +1563,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
goto vmgexit_err;
break;
case SVM_VMGEXIT_NMI_COMPLETE:
+ case SVM_VMGEXIT_AP_HLT_LOOP:
case SVM_VMGEXIT_AP_JUMP_TABLE:
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break;
@@ -1888,6 +1889,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
break;
+ case SVM_VMGEXIT_AP_HLT_LOOP:
+ ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
+ break;
case SVM_VMGEXIT_AP_JUMP_TABLE: {
struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
@@ -2001,7 +2005,7 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
* of which one step is to perform a VMLOAD. Since hardware does not
* perform a VMSAVE on VMRUN, the host savearea must be updated.
*/
- asm volatile(__ex("vmsave") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
+ asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
/*
* Certain MSRs are restored on VMEXIT, only save ones that aren't
@@ -2040,3 +2044,21 @@ void sev_es_vcpu_put(struct vcpu_svm *svm)
wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
}
}
+
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /* First SIPI: Use the values as initially set by the VMM */
+ if (!svm->received_first_sipi) {
+ svm->received_first_sipi = true;
+ return;
+ }
+
+ /*
+ * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
+ * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
+ * non-zero value.
+ */
+ ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index cce0143a6f80..7ef171790d02 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3677,8 +3677,6 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE;
}
-void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
-
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_svm *svm)
{
@@ -4384,6 +4382,14 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
(vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
}
+static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+ if (!sev_es_guest(vcpu->kvm))
+ return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
+
+ sev_vcpu_deliver_sipi_vector(vcpu, vector);
+}
+
static void svm_vm_destroy(struct kvm *kvm)
{
avic_vm_destroy(kvm);
@@ -4526,6 +4532,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.msr_filter_changed = svm_msr_filter_changed,
.complete_emulated_msr = svm_complete_emulated_msr,
+
+ .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5431e6335e2e..0fe874ae5498 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -185,6 +185,7 @@ struct vcpu_svm {
struct vmcb_save_area *vmsa;
struct ghcb *ghcb;
struct kvm_host_map ghcb_map;
+ bool received_first_sipi;
/* SEV-ES scratch area support */
void *ghcb_sa;
@@ -591,6 +592,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm);
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
/* vmenter.S */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index e2f26564a12d..0fbb46990dfc 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4442,6 +4442,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
+ kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
/* Service the TLB flush request for L2 before switching to L1. */
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 75c9c6a0a3a4..2af05d3b0590 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7707,6 +7707,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.msr_filter_changed = vmx_msr_filter_changed,
.complete_emulated_msr = kvm_complete_insn_gp,
.cpu_dirty_log_size = vmx_cpu_dirty_log_size,
+
+ .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
};
static __init int hardware_setup(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3f7c1fc7a3ce..9a8969a6dd06 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7976,17 +7976,22 @@ void kvm_arch_exit(void)
kmem_cache_destroy(x86_fpu_cache);
}
-int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
{
++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) {
- vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+ vcpu->arch.mp_state = state;
return 1;
} else {
- vcpu->run->exit_reason = KVM_EXIT_HLT;
+ vcpu->run->exit_reason = reason;
return 0;
}
}
+
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+{
+ return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
+}
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
@@ -8000,6 +8005,14 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
+{
+ int ret = kvm_skip_emulated_instruction(vcpu);
+
+ return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+
#ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
unsigned long clock_type)
@@ -8789,7 +8802,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
- if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+ if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
+ ;
+ else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
r = 0;
goto out;
}
@@ -9094,6 +9109,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
kvm_apic_accept_events(vcpu);
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
+ case KVM_MP_STATE_AP_RESET_HOLD:
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
@@ -9520,8 +9536,9 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
kvm_load_guest_fpu(vcpu);
kvm_apic_accept_events(vcpu);
- if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
- vcpu->arch.pv.pv_unhalted)
+ if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
+ vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
+ vcpu->arch.pv.pv_unhalted)
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
else
mp_state->mp_state = vcpu->arch.mp_state;
@@ -10152,6 +10169,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
kvm_rip_write(vcpu, 0);
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
int kvm_arch_hardware_enable(void)
{