diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-12 11:05:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-12 11:05:52 -0700 |
commit | 52cd0d972fa6491928add05f11f97a4a59babe92 (patch) | |
tree | 5e53cff155288b4d24c33754905bca4a8504b4bb /tools | |
parent | d2d5439df22f3c2a07c5db582d4ef1b2b587ca27 (diff) | |
parent | 49b3deaad3452217d62dbd78da8df24eb0c7e169 (diff) | |
download | linux-52cd0d972fa6491928add05f11f97a4a59babe92.tar.bz2 |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Paolo Bonzini:
"The guest side of the asynchronous page fault work has been delayed to
5.9 in order to sync with Thomas's interrupt entry rework, but here's
the rest of the KVM updates for this merge window.
MIPS:
- Loongson port
PPC:
- Fixes
ARM:
- Fixes
x86:
- KVM_SET_USER_MEMORY_REGION optimizations
- Fixes
- Selftest fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (62 commits)
KVM: x86: do not pass poisoned hva to __kvm_set_memory_region
KVM: selftests: fix sync_with_host() in smm_test
KVM: async_pf: Inject 'page ready' event only if 'page not present' was previously injected
KVM: async_pf: Cleanup kvm_setup_async_pf()
kvm: i8254: remove redundant assignment to pointer s
KVM: x86: respect singlestep when emulating instruction
KVM: selftests: Don't probe KVM_CAP_HYPERV_ENLIGHTENED_VMCS when nested VMX is unsupported
KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check
KVM: nVMX: Consult only the "basic" exit reason when routing nested exit
KVM: arm64: Move hyp_symbol_addr() to kvm_asm.h
KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts
KVM: arm64: Remove host_cpu_context member from vcpu structure
KVM: arm64: Stop sparse from moaning at __hyp_this_cpu_ptr
KVM: arm64: Handle PtrAuth traps early
KVM: x86: Unexport x86_fpu_cache and make it static
KVM: selftests: Ignore KVM 5-level paging support for VM_MODE_PXXV48_4K
KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
KVM: arm64: Stop save/restoring ACTLR_EL1
KVM: arm64: Add emulation for 32bit guests accessing ACTLR2
...
Diffstat (limited to 'tools')
-rw-r--r-- | tools/testing/selftests/kvm/.gitignore | 1 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/Makefile | 4 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/include/x86_64/svm_util.h | 1 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/include/x86_64/vmx.h | 5 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/kvm_util.c | 11 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/svm.c | 10 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/vmx.c | 9 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/x86_64/evmcs_test.c | 5 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c | 3 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/x86_64/smm_test.c | 17 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/x86_64/state_test.c | 13 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c | 4 |
12 files changed, 55 insertions, 28 deletions
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index f159718f90c0..452787152748 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -3,6 +3,7 @@ /s390x/resets /s390x/sync_regs_test /x86_64/cr4_cpuid_sync_test +/x86_64/debug_regs /x86_64/evmcs_test /x86_64/hyperv_cpuid /x86_64/mmio_warning_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b4ff112e5c7e..4a166588d99f 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -83,7 +83,11 @@ LIBKVM += $(LIBKVM_$(UNAME_M)) INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include +ifeq ($(ARCH),x86_64) +LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/x86/include +else LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include +endif CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ diff --git a/tools/testing/selftests/kvm/include/x86_64/svm_util.h b/tools/testing/selftests/kvm/include/x86_64/svm_util.h index 674151d24fcf..b7531c83b8ae 100644 --- a/tools/testing/selftests/kvm/include/x86_64/svm_util.h +++ b/tools/testing/selftests/kvm/include/x86_64/svm_util.h @@ -33,6 +33,7 @@ struct svm_test_data { struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva); void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp); void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); +bool nested_svm_supported(void); void nested_svm_check_supported(void); static inline bool cpu_has_svm(void) diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h index ccff3e6e2704..16fa21ebb99c 100644 --- a/tools/testing/selftests/kvm/include/x86_64/vmx.h +++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h @@ -598,15 +598,12 @@ union vmx_ctrl_msr { }; }; -union vmx_basic basic; -union vmx_ctrl_msr ctrl_pin_rev; -union vmx_ctrl_msr ctrl_exit_rev; - struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); bool load_vmcs(struct vmx_pages *vmx); +bool nested_vmx_supported(void); void nested_vmx_check_supported(void); void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index c9cede5c7d0d..74776ee228f2 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -195,11 +195,18 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) case VM_MODE_PXXV48_4K: #ifdef __x86_64__ kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); - TEST_ASSERT(vm->va_bits == 48, "Linear address width " - "(%d bits) not supported", vm->va_bits); + /* + * Ignore KVM support for 5-level paging (vm->va_bits == 57), + * it doesn't take effect unless a CR4.LA57 is set, which it + * isn't for this VM_MODE. + */ + TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, + "Linear address width (%d bits) not supported", + vm->va_bits); pr_debug("Guest physical address width detected: %d\n", vm->pa_bits); vm->pgtable_levels = 4; + vm->va_bits = 48; #else TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); #endif diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c index c42401068373..3a5c72ed2b79 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/svm.c +++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c @@ -148,14 +148,18 @@ void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) : "r15", "memory"); } -void nested_svm_check_supported(void) +bool nested_svm_supported(void) { struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(0x80000001); - if (!(entry->ecx & CPUID_SVM)) { + return entry->ecx & CPUID_SVM; +} + +void nested_svm_check_supported(void) +{ + if (!nested_svm_supported()) { print_skip("nested SVM not enabled"); exit(KSFT_SKIP); } } - diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index 4ae104f6ce69..f1e00d43eea2 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -379,11 +379,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) init_vmcs_guest_state(guest_rip, guest_rsp); } -void nested_vmx_check_supported(void) +bool nested_vmx_supported(void) { struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); - if (!(entry->ecx & CPUID_VMX)) { + return entry->ecx & CPUID_VMX; +} + +void nested_vmx_check_supported(void) +{ + if (!nested_vmx_supported()) { print_skip("nested VMX not enabled"); exit(KSFT_SKIP); } diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index e6e62e5e75b2..757928199f19 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -94,9 +94,10 @@ int main(int argc, char *argv[]) vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); - if (!kvm_check_cap(KVM_CAP_NESTED_STATE) || + if (!nested_vmx_supported() || + !kvm_check_cap(KVM_CAP_NESTED_STATE) || !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { - print_skip("capabilities not available"); + print_skip("Enlightened VMCS is unsupported"); exit(KSFT_SKIP); } diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c index 4a7967cca281..745b708c2d3b 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c @@ -170,7 +170,8 @@ int main(int argc, char *argv[]) case 1: break; case 2: - if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { + if (!nested_vmx_supported() || + !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { print_skip("Enlightened VMCS is unsupported"); continue; } diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c index 6f8f478b3ceb..ae39a220609f 100644 --- a/tools/testing/selftests/kvm/x86_64/smm_test.c +++ b/tools/testing/selftests/kvm/x86_64/smm_test.c @@ -47,10 +47,10 @@ uint8_t smi_handler[] = { 0x0f, 0xaa, /* rsm */ }; -void sync_with_host(uint64_t phase) +static inline void sync_with_host(uint64_t phase) { asm volatile("in $" XSTR(SYNC_PORT)", %%al \n" - : : "a" (phase)); + : "+a" (phase)); } void self_smi(void) @@ -118,16 +118,17 @@ int main(int argc, char *argv[]) vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { - if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM) + if (nested_svm_supported()) vcpu_alloc_svm(vm, &nested_gva); - else + else if (nested_vmx_supported()) vcpu_alloc_vmx(vm, &nested_gva); - vcpu_args_set(vm, VCPU_ID, 1, nested_gva); - } else { - pr_info("will skip SMM test with VMX enabled\n"); - vcpu_args_set(vm, VCPU_ID, 1, 0); } + if (!nested_gva) + pr_info("will skip SMM test with VMX enabled\n"); + + vcpu_args_set(vm, VCPU_ID, 1, nested_gva); + for (stage = 1;; stage++) { _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c index d43b6f99b66c..f6c8b9042f8a 100644 --- a/tools/testing/selftests/kvm/x86_64/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c @@ -171,16 +171,17 @@ int main(int argc, char *argv[]) vcpu_regs_get(vm, VCPU_ID, ®s1); if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { - if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM) + if (nested_svm_supported()) vcpu_alloc_svm(vm, &nested_gva); - else + else if (nested_vmx_supported()) vcpu_alloc_vmx(vm, &nested_gva); - vcpu_args_set(vm, VCPU_ID, 1, nested_gva); - } else { - pr_info("will skip nested state checks\n"); - vcpu_args_set(vm, VCPU_ID, 1, 0); } + if (!nested_gva) + pr_info("will skip nested state checks\n"); + + vcpu_args_set(vm, VCPU_ID, 1, nested_gva); + for (stage = 1;; stage++) { _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c index cc72b6188ca7..a7737af1224f 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c @@ -31,6 +31,10 @@ bool l2_save_restore_done; static u64 l2_vmx_pt_start; volatile u64 l2_vmx_pt_finish; +union vmx_basic basic; +union vmx_ctrl_msr ctrl_pin_rev; +union vmx_ctrl_msr ctrl_exit_rev; + void l2_guest_code(void) { u64 vmx_pt_delta; |