diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-03 15:13:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-03 15:13:47 -0700 |
commit | 039aeb9deb9291f3b19c375a8bc6fa7f768996cc (patch) | |
tree | d98d5ddf276843995aa214157b587bb88270c530 /tools | |
parent | 6b2591c21273ebf65c13dae5d260ce88f0f197dd (diff) | |
parent | 13ffbd8db1dd43d63d086517872a4e702a6bf309 (diff) | |
download | linux-039aeb9deb9291f3b19c375a8bc6fa7f768996cc.tar.bz2 |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"ARM:
- Move the arch-specific code into arch/arm64/kvm
- Start the post-32bit cleanup
- Cherry-pick a few non-invasive pre-NV patches
x86:
- Rework of TLB flushing
- Rework of event injection, especially with respect to nested
virtualization
- Nested AMD event injection facelift, building on the rework of
generic code and fixing a lot of corner cases
- Nested AMD live migration support
- Optimization for TSC deadline MSR writes and IPIs
- Various cleanups
- Asynchronous page fault cleanups (from tglx, common topic branch
with tip tree)
- Interrupt-based delivery of asynchronous "page ready" events (host
side)
- Hyper-V MSRs and hypercalls for guest debugging
- VMX preemption timer fixes
s390:
- Cleanups
Generic:
- switch vCPU thread wakeup from swait to rcuwait
The other architectures, and the guest side of the asynchronous page
fault work, will come next week"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (256 commits)
KVM: selftests: fix rdtsc() for vmx_tsc_adjust_test
KVM: check userspace_addr for all memslots
KVM: selftests: update hyperv_cpuid with SynDBG tests
x86/kvm/hyper-v: Add support for synthetic debugger via hypercalls
x86/kvm/hyper-v: enable hypercalls regardless of hypercall page
x86/kvm/hyper-v: Add support for synthetic debugger interface
x86/hyper-v: Add synthetic debugger definitions
KVM: selftests: VMX preemption timer migration test
KVM: nVMX: Fix VMX preemption timer migration
x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit
KVM: x86/pmu: Support full width counting
KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in
KVM: x86: announce KVM_FEATURE_ASYNC_PF_INT
KVM: x86: acknowledgment mechanism for async pf page ready notifications
KVM: x86: interrupt based APF 'page ready' event delivery
KVM: introduce kvm_read_guest_offset_cached()
KVM: rename kvm_arch_can_inject_async_page_present() to kvm_arch_can_dequeue_async_page_present()
KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info
Revert "KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page Ready" exceptions simultaneously"
KVM: VMX: Replace zero-length array with flexible-array
...
Diffstat (limited to 'tools')
19 files changed, 1049 insertions, 308 deletions
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 3f3f780c8c65..43e24903812c 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -400,6 +400,7 @@ struct kvm_sync_regs { struct kvm_vmx_nested_state_data { __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; + __u64 preemption_timer_deadline; }; struct kvm_vmx_nested_state_hdr { diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index e83fc8e868f4..d199a3694be8 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -32,6 +32,7 @@ import resource import struct import re import subprocess +import signal from collections import defaultdict, namedtuple from functools import reduce from datetime import datetime @@ -228,6 +229,8 @@ IOCTL_NUMBERS = { 'RESET': 0x00002403, } +signal_received = False + ENCODING = locale.getpreferredencoding(False) TRACE_FILTER = re.compile(r'^[^\(]*$') @@ -1500,8 +1503,7 @@ class StdFormat(object): def get_banner(self): return self._banner - @staticmethod - def get_statline(keys, s): + def get_statline(self, keys, s): res = '' for key in keys: res += ' %9d' % s[key].delta @@ -1517,27 +1519,71 @@ class CSVFormat(object): def get_banner(self): return self._banner - @staticmethod - def get_statline(keys, s): + def get_statline(self, keys, s): return reduce(lambda res, key: "{},{!s}".format(res, s[key].delta), keys, '') def log(stats, opts, frmt, keys): """Prints statistics as reiterating key block, multiple value blocks.""" + global signal_received line = 0 banner_repeat = 20 + f = None + + def do_banner(opts): + nonlocal f + if opts.log_to_file: + if not f: + try: + f = open(opts.log_to_file, 'a') + except (IOError, OSError): + sys.exit("Error: Could not open file: %s" % + opts.log_to_file) + if isinstance(frmt, CSVFormat) and f.tell() != 0: + return + print(frmt.get_banner(), file=f or sys.stdout) + + def do_statline(opts, values): + statline = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + \ + frmt.get_statline(keys, values) + print(statline, file=f or sys.stdout) + + do_banner(opts) + banner_printed = True while True: try: time.sleep(opts.set_delay) - if line % banner_repeat == 0: - print(frmt.get_banner()) - print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + - frmt.get_statline(keys, stats.get())) - line += 1 + if signal_received: + banner_printed = True + line = 0 + f.close() + do_banner(opts) + signal_received = False + if (line % banner_repeat == 0 and not banner_printed and + not (opts.log_to_file and isinstance(frmt, CSVFormat))): + do_banner(opts) + banner_printed = True + values = stats.get() + if (not opts.skip_zero_records or + any(values[k].delta != 0 for k in keys)): + do_statline(opts, values) + line += 1 + banner_printed = False except KeyboardInterrupt: break + if opts.log_to_file: + f.close() + + +def handle_signal(sig, frame): + global signal_received + + signal_received = True + + return + def is_delay_valid(delay): """Verify delay is in valid value range.""" @@ -1610,7 +1656,7 @@ Press any other key to refresh statistics immediately. argparser.add_argument('-c', '--csv', action='store_true', default=False, - help='log in csv format - requires option -l/--log', + help='log in csv format - requires option -l/-L', ) argparser.add_argument('-d', '--debugfs', action='store_true', @@ -1638,6 +1684,11 @@ Press any other key to refresh statistics immediately. default=False, help='run in logging mode (like vmstat)', ) + argparser.add_argument('-L', '--log-to-file', + type=str, + metavar='FILE', + help="like '--log', but logging to a file" + ) argparser.add_argument('-p', '--pid', type=int, default=0, @@ -1655,9 +1706,16 @@ Press any other key to refresh statistics immediately. default=False, help='retrieve statistics from tracepoints', ) + argparser.add_argument('-z', '--skip-zero-records', + action='store_true', + default=False, + help='omit records with all zeros in logging mode', + ) options = argparser.parse_args() - if options.csv and not options.log: + if options.csv and not (options.log or options.log_to_file): sys.exit('Error: Option -c/--csv requires -l/--log') + if options.skip_zero_records and not (options.log or options.log_to_file): + sys.exit('Error: Option -z/--skip-zero-records requires -l/-L') try: # verify that we were passed a valid regex up front re.compile(options.fields) @@ -1737,7 +1795,9 @@ def main(): sys.stdout.write(' ' + '\n '.join(sorted(set(event_list))) + '\n') sys.exit(0) - if options.log: + if options.log or options.log_to_file: + if options.log_to_file: + signal.signal(signal.SIGHUP, handle_signal) keys = sorted(stats.get().keys()) if options.csv: frmt = CSVFormat(keys) diff --git a/tools/kvm/kvm_stat/kvm_stat.service b/tools/kvm/kvm_stat/kvm_stat.service new file mode 100644 index 000000000000..71aabaffe779 --- /dev/null +++ b/tools/kvm/kvm_stat/kvm_stat.service @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only + +[Unit] +Description=Service that logs KVM kernel module trace events +Before=qemu-kvm.service + +[Service] +Type=simple +ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +SyslogIdentifier=kvm_stat +SyslogLevel=debug + +[Install] +WantedBy=multi-user.target diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt index a97ded2aedad..feaf46451e83 100644 --- a/tools/kvm/kvm_stat/kvm_stat.txt +++ b/tools/kvm/kvm_stat/kvm_stat.txt @@ -65,8 +65,10 @@ OPTIONS run in batch mode for one second -c:: ---csv=<file>:: - log in csv format - requires option -l/--log +--csv:: + log in csv format. Requires option -l/--log or -L/--log-to-file. + When used with option -L/--log-to-file, the header is only ever + written to start of file to preserve the format. -d:: --debugfs:: @@ -92,6 +94,11 @@ OPTIONS --log:: run in logging mode (like vmstat) + +-L<file>:: +--log-to-file=<file>:: + like -l/--log, but logging to a file. Appends to existing files. + -p<pid>:: --pid=<pid>:: limit statistics to one virtual machine (pid) @@ -104,6 +111,10 @@ OPTIONS --tracepoints:: retrieve statistics from tracepoints +*z*:: +--skip-zero-records:: + omit records with all zeros in logging mode + SEE ALSO -------- 'perf'(1), 'trace-cmd'(1) diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index a9b2b48947ff..f159718f90c0 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -7,10 +7,10 @@ /x86_64/hyperv_cpuid /x86_64/mmio_warning_test /x86_64/platform_info_test -/x86_64/set_memory_region_test /x86_64/set_sregs_test /x86_64/smm_test /x86_64/state_test +/x86_64/vmx_preemption_timer_test /x86_64/svm_vmcall_test /x86_64/sync_regs_test /x86_64/vmx_close_while_nested_test @@ -22,4 +22,5 @@ /demand_paging_test /dirty_log_test /kvm_create_max_vcpus +/set_memory_region_test /steal_time diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 42f4f49f2a48..b4ff112e5c7e 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -43,10 +43,10 @@ TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test -TEST_GEN_PROGS_x86_64 += x86_64/set_memory_region_test TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test TEST_GEN_PROGS_x86_64 += x86_64/smm_test TEST_GEN_PROGS_x86_64 += x86_64/state_test +TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test @@ -59,12 +59,14 @@ TEST_GEN_PROGS_x86_64 += clear_dirty_log_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus +TEST_GEN_PROGS_x86_64 += set_memory_region_test TEST_GEN_PROGS_x86_64 += steal_time TEST_GEN_PROGS_aarch64 += clear_dirty_log_test TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus +TEST_GEN_PROGS_aarch64 += set_memory_region_test TEST_GEN_PROGS_aarch64 += steal_time TEST_GEN_PROGS_s390x = s390x/memop @@ -73,6 +75,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test TEST_GEN_PROGS_s390x += demand_paging_test TEST_GEN_PROGS_s390x += dirty_log_test TEST_GEN_PROGS_s390x += kvm_create_max_vcpus +TEST_GEN_PROGS_s390x += set_memory_region_test TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) LIBKVM += $(LIBKVM_$(UNAME_M)) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 92e184a422ee..919e161dd289 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -10,6 +10,7 @@ #include "test_util.h" #include "asm/kvm.h" +#include "linux/list.h" #include "linux/kvm.h" #include <sys/ioctl.h> @@ -113,6 +114,7 @@ int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); +void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, uint32_t data_memslot, uint32_t pgd_memslot); @@ -256,6 +258,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm); unsigned int vm_get_page_size(struct kvm_vm *vm); unsigned int vm_get_page_shift(struct kvm_vm *vm); unsigned int vm_get_max_gfn(struct kvm_vm *vm); +int vm_get_fd(struct kvm_vm *vm); unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); @@ -311,13 +314,30 @@ void ucall_uninit(struct kvm_vm *vm); void ucall(uint64_t cmd, int nargs, ...); uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); +#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ + ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) #define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) #define GUEST_DONE() ucall(UCALL_DONE, 0) -#define GUEST_ASSERT(_condition) do { \ - if (!(_condition)) \ - ucall(UCALL_ABORT, 2, \ - "Failed guest assert: " \ - #_condition, __LINE__); \ +#define __GUEST_ASSERT(_condition, _nargs, _args...) do { \ + if (!(_condition)) \ + ucall(UCALL_ABORT, 2 + _nargs, \ + "Failed guest assert: " \ + #_condition, __LINE__, _args); \ } while (0) +#define GUEST_ASSERT(_condition) \ + __GUEST_ASSERT((_condition), 0, 0) + +#define GUEST_ASSERT_1(_condition, arg1) \ + __GUEST_ASSERT((_condition), 1, (arg1)) + +#define GUEST_ASSERT_2(_condition, arg1, arg2) \ + __GUEST_ASSERT((_condition), 2, (arg1), (arg2)) + +#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \ + __GUEST_ASSERT((_condition), 3, (arg1), (arg2), (arg3)) + +#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \ + __GUEST_ASSERT((_condition), 4, (arg1), (arg2), (arg3), (arg4)) + #endif /* SELFTEST_KVM_UTIL_H */ diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 7428513a4c68..82b7fe16a824 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -79,13 +79,16 @@ static inline uint64_t get_desc64_base(const struct desc64 *desc) static inline uint64_t rdtsc(void) { uint32_t eax, edx; - + uint64_t tsc_val; /* * The lfence is to wait (on Intel CPUs) until all previous - * instructions have been executed. + * instructions have been executed. If software requires RDTSC to be + * executed prior to execution of any subsequent instruction, it can + * execute LFENCE immediately after RDTSC */ - __asm__ __volatile__("lfence; rdtsc" : "=a"(eax), "=d"(edx)); - return ((uint64_t)edx) << 32 | eax; + __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx)); + tsc_val = ((uint64_t)edx) << 32 | eax; + return tsc_val; } static inline uint64_t rdtscp(uint32_t *aux) diff --git a/tools/testing/selftests/kvm/include/x86_64/svm_util.h b/tools/testing/selftests/kvm/include/x86_64/svm_util.h index cd037917fece..674151d24fcf 100644 --- a/tools/testing/selftests/kvm/include/x86_64/svm_util.h +++ b/tools/testing/selftests/kvm/include/x86_64/svm_util.h @@ -35,4 +35,14 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); void nested_svm_check_supported(void); +static inline bool cpu_has_svm(void) +{ + u32 eax = 0x80000001, ecx; + + asm("cpuid" : + "=a" (eax), "=c" (ecx) : "0" (eax) : "ebx", "edx"); + + return ecx & CPUID_SVM; +} + #endif /* SELFTEST_KVM_SVM_UTILS_H */ diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h index 3d27069b9ed9..ccff3e6e2704 100644 --- a/tools/testing/selftests/kvm/include/x86_64/vmx.h +++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h @@ -575,6 +575,33 @@ struct vmx_pages { void *eptp; }; +union vmx_basic { + u64 val; + struct { + u32 revision; + u32 size:13, + reserved1:3, + width:1, + dual:1, + type:4, + insouts:1, + ctrl:1, + vm_entry_exception_ctrl:1, + reserved2:7; + }; +}; + +union vmx_ctrl_msr { + u64 val; + struct { + u32 set, clr; + }; +}; + +union vmx_basic basic; +union vmx_ctrl_msr ctrl_pin_rev; +union vmx_ctrl_msr ctrl_exit_rev; + struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 9622431069bc..c9cede5c7d0d 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -161,6 +161,9 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) vm = calloc(1, sizeof(*vm)); TEST_ASSERT(vm != NULL, "Insufficient Memory"); + INIT_LIST_HEAD(&vm->vcpus); + INIT_LIST_HEAD(&vm->userspace_mem_regions); + vm->mode = mode; vm->type = 0; @@ -258,8 +261,7 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm) if (vmp->has_irqchip) vm_create_irqchip(vmp); - for (region = vmp->userspace_mem_region_head; region; - region = region->next) { + list_for_each_entry(region, &vmp->userspace_mem_regions, list) { int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" " rc: %i errno: %i\n" @@ -319,8 +321,7 @@ userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) { struct userspace_mem_region *region; - for (region = vm->userspace_mem_region_head; region; - region = region->next) { + list_for_each_entry(region, &vm->userspace_mem_regions, list) { uint64_t existing_start = region->region.guest_phys_addr; uint64_t existing_end = region->region.guest_phys_addr + region->region.memory_size - 1; @@ -378,11 +379,11 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, */ struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) { - struct vcpu *vcpup; + struct vcpu *vcpu; - for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) { - if (vcpup->id == vcpuid) - return vcpup; + list_for_each_entry(vcpu, &vm->vcpus, list) { + if (vcpu->id == vcpuid) + return vcpu; } return NULL; @@ -392,18 +393,16 @@ struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) * VM VCPU Remove * * Input Args: - * vm - Virtual Machine - * vcpuid - VCPU ID + * vcpu - VCPU to remove * * Output Args: None * * Return: None, TEST_ASSERT failures for all error conditions * - * Within the VM specified by vm, removes the VCPU given by vcpuid. + * Removes a vCPU from a VM and frees its resources. */ -static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid) +static void vm_vcpu_rm(struct vcpu *vcpu) { - struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; ret = munmap(vcpu->state, sizeof(*vcpu->state)); @@ -413,21 +412,17 @@ static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid) TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i " "errno: %i", ret, errno); - if (vcpu->next) - vcpu->next->prev = vcpu->prev; - if (vcpu->prev) - vcpu->prev->next = vcpu->next; - else - vm->vcpu_head = vcpu->next; + list_del(&vcpu->list); free(vcpu); } void kvm_vm_release(struct kvm_vm *vmp) { + struct vcpu *vcpu, *tmp; int ret; - while (vmp->vcpu_head) - vm_vcpu_rm(vmp, vmp->vcpu_head->id); + list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) + vm_vcpu_rm(vcpu); ret = close(vmp->fd); TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" @@ -438,35 +433,38 @@ void kvm_vm_release(struct kvm_vm *vmp) " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); } +static void __vm_mem_region_delete(struct kvm_vm *vm, + struct userspace_mem_region *region) +{ + int ret; + + list_del(®ion->list); + + region->region.memory_size = 0; + ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); + TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, " + "rc: %i errno: %i", ret, errno); + + sparsebit_free(®ion->unused_phy_pages); + ret = munmap(region->mmap_start, region->mmap_size); + TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno); + + free(region); +} + /* * Destroys and frees the VM pointed to by vmp. */ void kvm_vm_free(struct kvm_vm *vmp) { - int ret; + struct userspace_mem_region *region, *tmp; if (vmp == NULL) return; /* Free userspace_mem_regions. */ - while (vmp->userspace_mem_region_head) { - struct userspace_mem_region *region - = vmp->userspace_mem_region_head; - - region->region.memory_size = 0; - ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, - ®ion->region); - TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, " - "rc: %i errno: %i", ret, errno); - - vmp->userspace_mem_region_head = region->next; - sparsebit_free(®ion->unused_phy_pages); - ret = munmap(region->mmap_start, region->mmap_size); - TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", - ret, errno); - - free(region); - } + list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list) + __vm_mem_region_delete(vmp, region); /* Free sparsebit arrays. */ sparsebit_free(&vmp->vpages_valid); @@ -612,12 +610,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, (uint64_t) region->region.memory_size); /* Confirm no region with the requested slot already exists. */ - for (region = vm->userspace_mem_region_head; region; - region = region->next) { - if (region->region.slot == slot) - break; - } - if (region != NULL) + list_for_each_entry(region, &vm->userspace_mem_regions, list) { + if (region->region.slot != slot) + continue; + TEST_FAIL("A mem region with the requested slot " "already exists.\n" " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" @@ -626,6 +622,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, region->region.slot, (uint64_t) region->region.guest_phys_addr, (uint64_t) region->region.memory_size); + } /* Allocate and initialize new mem region structure. */ region = calloc(1, sizeof(*region)); @@ -686,10 +683,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, guest_paddr, (uint64_t) region->region.memory_size); /* Add to linked-list of memory regions. */ - if (vm->userspace_mem_region_head) - vm->userspace_mem_region_head->prev = region; - region->next = vm->userspace_mem_region_head; - vm->userspace_mem_region_head = region; + list_add(®ion->list, &vm->userspace_mem_regions); } /* @@ -712,20 +706,17 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot) { struct userspace_mem_region *region; - for (region = vm->userspace_mem_region_head; region; - region = region->next) { + list_for_each_entry(region, &vm->userspace_mem_regions, list) { if (region->region.slot == memslot) - break; - } - if (region == NULL) { - fprintf(stderr, "No mem region with the requested slot found,\n" - " requested slot: %u\n", memslot); - fputs("---- vm dump ----\n", stderr); - vm_dump(stderr, vm, 2); - TEST_FAIL("Mem region not found"); + return region; } - return region; + fprintf(stderr, "No mem region with the requested slot found,\n" + " requested slot: %u\n", memslot); + fputs("---- vm dump ----\n", stderr); + vm_dump(stderr, vm, 2); + TEST_FAIL("Mem region not found"); + return NULL; } /* @@ -789,6 +780,24 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) } /* + * VM Memory Region Delete + * + * Input Args: + * vm - Virtual Machine + * slot - Slot of the memory region to delete + * + * Output Args: None + * + * Return: None + * + * Delete a memory region. + */ +void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) +{ + __vm_mem_region_delete(vm, memslot2region(vm, slot)); +} + +/* * VCPU mmap Size * * Input Args: None @@ -863,10 +872,7 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid) "vcpu id: %u errno: %i", vcpuid, errno); /* Add to linked-list of VCPUs. */ - if (vm->vcpu_head) - vm->vcpu_head->prev = vcpu; - vcpu->next = vm->vcpu_head; - vm->vcpu_head = vcpu; + list_add(&vcpu->list, &vm->vcpus); } /* @@ -1059,8 +1065,8 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) { struct userspace_mem_region *region; - for (region = vm->userspace_mem_region_head; region; - region = region->next) { + + list_for_each_entry(region, &vm->userspace_mem_regions, list) { if ((gpa >= region->region.guest_phys_addr) && (gpa <= (region->region.guest_phys_addr + region->region.memory_size - 1))) @@ -1092,8 +1098,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) { struct userspace_mem_region *region; - for (region = vm->userspace_mem_region_head; region; - region = region->next) { + + list_for_each_entry(region, &vm->userspace_mem_regions, list) { if ((hva >= region->host_mem) && (hva <= (region->host_mem + region->region.memory_size - 1))) @@ -1529,8 +1535,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); fprintf(stream, "%*sMem Regions:\n", indent, ""); - for (region = vm->userspace_mem_region_head; region; - region = region->next) { + list_for_each_entry(region, &vm->userspace_mem_regions, list) { fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " "host_virt: %p\n", indent + 2, "", (uint64_t) region->region.guest_phys_addr, @@ -1549,7 +1554,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) virt_dump(stream, vm, indent + 4); } fprintf(stream, "%*sVCPUs:\n", indent, ""); - for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next) + list_for_each_entry(vcpu, &vm->vcpus, list) vcpu_dump(stream, vm, vcpu->id, indent + 2); } @@ -1743,6 +1748,11 @@ unsigned int vm_get_max_gfn(struct kvm_vm *vm) return vm->max_gfn; } +int vm_get_fd(struct kvm_vm *vm) +{ + return vm->fd; +} + static unsigned int vm_calc_num_pages(unsigned int num_pages, unsigned int page_shift, unsigned int new_page_shift, diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index ca56a0133127..2ef446520748 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -13,7 +13,6 @@ #define KVM_DEV_PATH "/dev/kvm" struct userspace_mem_region { - struct userspace_mem_region *next, *prev; struct kvm_userspace_memory_region region; struct sparsebit *unused_phy_pages; int fd; @@ -21,10 +20,11 @@ struct userspace_mem_region { void *host_mem; void *mmap_start; size_t mmap_size; + struct list_head list; }; struct vcpu { - struct vcpu *next, *prev; + struct list_head list; uint32_t id; int fd; struct kvm_run *state; @@ -41,8 +41,8 @@ struct kvm_vm { unsigned int pa_bits; unsigned int va_bits; uint64_t max_gfn; - struct vcpu *vcpu_head; - struct userspace_mem_region *userspace_mem_region_head; + struct list_head vcpus; + struct list_head userspace_mem_regions; struct sparsebit *vpages_valid; struct sparsebit *vpages_mapped; bool has_irqchip; diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c index 8d94961bd046..a88c5d665725 100644 --- a/tools/testing/selftests/kvm/lib/s390x/processor.c +++ b/tools/testing/selftests/kvm/lib/s390x/processor.c @@ -233,7 +233,10 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) { - struct vcpu *vcpu = vm->vcpu_head; + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + + if (!vcpu) + return; fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n", indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr); diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c new file mode 100644 index 000000000000..b3ece55a2da6 --- /dev/null +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include <fcntl.h> +#include <pthread.h> +#include <sched.h> +#include <semaphore.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/mman.h> + +#include <linux/compiler.h> + +#include <test_util.h> +#include <kvm_util.h> +#include <processor.h> + +#define VCPU_ID 0 + +/* + * s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a + * 2MB sized and aligned region so that the initial region corresponds to + * exactly one large page. + */ +#define MEM_REGION_SIZE 0x200000 + +#ifdef __x86_64__ +/* + * Somewhat arbitrary location and slot, intended to not overlap anything. + */ +#define MEM_REGION_GPA 0xc0000000 +#define MEM_REGION_SLOT 10 + +static const uint64_t MMIO_VAL = 0xbeefull; + +extern const uint64_t final_rip_start; +extern const uint64_t final_rip_end; + +static sem_t vcpu_ready; + +static inline uint64_t guest_spin_on_val(uint64_t spin_val) +{ + uint64_t val; + + do { + val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); + } while (val == spin_val); + + GUEST_SYNC(0); + return val; +} + +static void *vcpu_worker(void *data) +{ + struct kvm_vm *vm = data; + struct kvm_run *run; + struct ucall uc; + uint64_t cmd; + + /* + * Loop until the guest is done. Re-enter the guest on all MMIO exits, + * which will occur if the guest attempts to access a memslot after it + * has been deleted or while it is being moved . + */ + run = vcpu_state(vm, VCPU_ID); + + while (1) { + vcpu_run(vm, VCPU_ID); + + if (run->exit_reason == KVM_EXIT_IO) { + cmd = get_ucall(vm, VCPU_ID, &uc); + if (cmd != UCALL_SYNC) + break; + + sem_post(&vcpu_ready); + continue; + } + + if (run->exit_reason != KVM_EXIT_MMIO) + break; + + TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write"); + TEST_ASSERT(run->mmio.len == 8, + "Unexpected exit mmio size = %u", run->mmio.len); + + TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA, + "Unexpected exit mmio address = 0x%llx", + run->mmio.phys_addr); + memcpy(run->mmio.data, &MMIO_VAL, 8); + } + + if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) + TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0], + __FILE__, uc.args[1], uc.args[2]); + + return NULL; +} + +static void wait_for_vcpu(void) +{ + struct timespec ts; + + TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts), + "clock_gettime() failed: %d\n", errno); + + ts.tv_sec += 2; + TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts), + "sem_timedwait() failed: %d\n", errno); + + /* Wait for the vCPU thread to reenter the guest. */ + usleep(100000); +} + +static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code) +{ + struct kvm_vm *vm; + uint64_t *hva; + uint64_t gpa; + + vm = vm_create_default(VCPU_ID, 0, guest_code); + + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, + MEM_REGION_GPA, MEM_REGION_SLOT, + MEM_REGION_SIZE / getpagesize(), 0); + + /* + * Allocate and map two pages so that the GPA accessed by guest_code() + * stays valid across the memslot move. + */ + gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); + TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); + + virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0); + + /* Ditto for the host mapping so that both pages can be zeroed. */ + hva = addr_gpa2hva(vm, MEM_REGION_GPA); + memset(hva, 0, 2 * 4096); + + pthread_create(vcpu_thread, NULL, vcpu_worker, vm); + + /* Ensure the guest thread is spun up. */ + wait_for_vcpu(); + + return vm; +} + + +static void guest_code_move_memory_region(void) +{ + uint64_t val; + + GUEST_SYNC(0); + + /* + * Spin until the memory region is moved to a misaligned address. This + * may or may not trigger MMIO, as the window where the memslot is + * invalid is quite small. + */ + val = guest_spin_on_val(0); + GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val); + + /* Spin until the memory region is realigned. */ + val = guest_spin_on_val(MMIO_VAL); + GUEST_ASSERT_1(val == 1, val); + + GUEST_DONE(); +} + +static void test_move_memory_region(void) +{ + pthread_t vcpu_thread; + struct kvm_vm *vm; + uint64_t *hva; + + vm = spawn_vm(&vcpu_thread, guest_code_move_memory_region); + + hva = addr_gpa2hva(vm, MEM_REGION_GPA); + + /* + * Shift the region's base GPA. The guest should not see "2" as the + * hva->gpa translation is misaligned, i.e. the guest is accessing a + * different host pfn. + */ + vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096); + WRITE_ONCE(*hva, 2); + + /* + * The guest _might_ see an invalid memslot and trigger MMIO, but it's + * a tiny window. Spin and defer the sync until the memslot is + * restored and guest behavior is once again deterministic. + */ + usleep(100000); + + /* + * Note, value in memory needs to be changed *before* restoring the + * memslot, else the guest could race the update and see "2". + */ + WRITE_ONCE(*hva, 1); + + /* Restore the original base, the guest should see "1". */ + vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA); + wait_for_vcpu(); + /* Defered sync from when the memslot was misaligned (above). */ + wait_for_vcpu(); + + pthread_join(vcpu_thread, NULL); + + kvm_vm_free(vm); +} + +static void guest_code_delete_memory_region(void) +{ + uint64_t val; + + GUEST_SYNC(0); + + /* Spin until the memory region is deleted. */ + val = guest_spin_on_val(0); + GUEST_ASSERT_1(val == MMIO_VAL, val); + + /* Spin until the memory region is recreated. */ + val = guest_spin_on_val(MMIO_VAL); + GUEST_ASSERT_1(val == 0, val); + + /* Spin until the memory region is deleted. */ + val = guest_spin_on_val(0); + GUEST_ASSERT_1(val == MMIO_VAL, val); + + asm("1:\n\t" + ".pushsection .rodata\n\t" + ".global final_rip_start\n\t" + "final_rip_start: .quad 1b\n\t" + ".popsection"); + + /* Spin indefinitely (until the code memslot is deleted). */ + guest_spin_on_val(MMIO_VAL); + + asm("1:\n\t" + ".pushsection .rodata\n\t" + ".global final_rip_end\n\t" + "final_rip_end: .quad 1b\n\t" + ".popsection"); + + GUEST_ASSERT_1(0, 0); +} + +static void test_delete_memory_region(void) +{ + pthread_t vcpu_thread; + struct kvm_regs regs; + struct kvm_run *run; + struct kvm_vm *vm; + + vm = spawn_vm(&vcpu_thread, guest_code_delete_memory_region); + + /* Delete the memory region, the guest should not die. */ + vm_mem_region_delete(vm, MEM_REGION_SLOT); + wait_for_vcpu(); + + /* Recreate the memory region. The guest should see "0". */ + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, + MEM_REGION_GPA, MEM_REGION_SLOT, + MEM_REGION_SIZE / getpagesize(), 0); + wait_for_vcpu(); + + /* Delete the region again so that there's only one memslot left. */ + vm_mem_region_delete(vm, MEM_REGION_SLOT); + wait_for_vcpu(); + + /* + * Delete the primary memslot. This should cause an emulation error or + * shutdown due to the page tables getting nuked. + */ + vm_mem_region_delete(vm, 0); + + pthread_join(vcpu_thread, NULL); + + run = vcpu_state(vm, VCPU_ID); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN || + run->exit_reason == KVM_EXIT_INTERNAL_ERROR, + "Unexpected exit reason = %d", run->exit_reason); + + vcpu_regs_get(vm, VCPU_ID, ®s); + + /* + * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already, + * so the instruction pointer would point to the reset vector. + */ + if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR) + TEST_ASSERT(regs.rip >= final_rip_start && + regs.rip < final_rip_end, + "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n", + final_rip_start, final_rip_end, regs.rip); + + kvm_vm_free(vm); +} + +static void test_zero_memory_regions(void) +{ + struct kvm_run *run; + struct kvm_vm *vm; + + pr_info("Testing KVM_RUN with zero added memory regions\n"); + + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); + vm_vcpu_add(vm, VCPU_ID); + + TEST_ASSERT(!ioctl(vm_get_fd(vm), KVM_SET_NR_MMU_PAGES, 64), + "KVM_SET_NR_MMU_PAGES failed, errno = %d\n", errno); + vcpu_run(vm, VCPU_ID); + + run = vcpu_state(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR, + "Unexpected exit_reason = %u\n", run->exit_reason); + + kvm_vm_free(vm); +} +#endif /* __x86_64__ */ + +/* + * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any + * tentative to add further slots should fail. + */ +static void test_add_max_memory_regions(void) +{ + int ret; + struct kvm_vm *vm; + uint32_t max_mem_slots; + uint32_t slot; + uint64_t guest_addr = 0x0; + uint64_t mem_reg_npages; + void *mem; + + max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); + TEST_ASSERT(max_mem_slots > 0, + "KVM_CAP_NR_MEMSLOTS should be greater than 0"); + pr_info("Allowed number of memory slots: %i\n", max_mem_slots); + + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); + + mem_reg_npages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, MEM_REGION_SIZE); + + /* Check it can be added memory slots up to the maximum allowed */ + pr_info("Adding slots 0..%i, each memory region with %dK size\n", + (max_mem_slots - 1), MEM_REGION_SIZE >> 10); + for (slot = 0; slot < max_mem_slots; slot++) { + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + guest_addr, slot, mem_reg_npages, + 0); + guest_addr += MEM_REGION_SIZE; + } + + /* Check it cannot be added memory slots beyond the limit */ + mem = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host"); + + ret = ioctl(vm_get_fd(vm), KVM_SET_USER_MEMORY_REGION, + &(struct kvm_userspace_memory_region) {slot, 0, guest_addr, + MEM_REGION_SIZE, (uint64_t) mem}); + TEST_ASSERT(ret == -1 && errno == EINVAL, + "Adding one more memory slot should fail with EINVAL"); + + munmap(mem, MEM_REGION_SIZE); + kvm_vm_free(vm); +} + +int main(int argc, char *argv[]) +{ +#ifdef __x86_64__ + int i, loops; +#endif + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + +#ifdef __x86_64__ + /* + * FIXME: the zero-memslot test fails on aarch64 and s390x because + * KVM_RUN fails with ENOEXEC or EFAULT. + */ + test_zero_memory_regions(); +#endif + + test_add_max_memory_regions(); + +#ifdef __x86_64__ + if (argc > 1) + loops = atoi(argv[1]); + else + loops = 10; + + pr_info("Testing MOVE of in-use region, %d loops\n", loops); + for (i = 0; i < loops; i++) + test_move_memory_region(); + + pr_info("Testing DELETE of in-use region, %d loops\n", loops); + for (i = 0; i < loops; i++) + test_delete_memory_region(); +#endif + + return 0; +} diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c index 83323f3d7ca0..4a7967cca281 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c @@ -26,18 +26,18 @@ static void guest_code(void) { } -static int smt_possible(void) +static bool smt_possible(void) { char buf[16]; FILE *f; - bool res = 1; + bool res = true; f = fopen("/sys/devices/system/cpu/smt/control", "r"); if (f) { if (fread(buf, sizeof(*buf), sizeof(buf), f) > 0) { if (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12)) - res = 0; + res = false; } fclose(f); } @@ -46,29 +46,31 @@ static int smt_possible(void) } static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries, - int evmcs_enabled) + bool evmcs_enabled) { int i; + int nent = 9; + u32 test_val; - if (!evmcs_enabled) - TEST_ASSERT(hv_cpuid_entries->nent == 6, - "KVM_GET_SUPPORTED_HV_CPUID should return 6 entries" - " when Enlightened VMCS is disabled (returned %d)", - hv_cpuid_entries->nent); - else - TEST_ASSERT(hv_cpuid_entries->nent == 7, - "KVM_GET_SUPPORTED_HV_CPUID should return 7 entries" - " when Enlightened VMCS is enabled (returned %d)", - hv_cpuid_entries->nent); + if (evmcs_enabled) + nent += 1; /* 0x4000000A */ + + TEST_ASSERT(hv_cpuid_entries->nent == nent, + "KVM_GET_SUPPORTED_HV_CPUID should return %d entries" + " with evmcs=%d (returned %d)", + nent, evmcs_enabled, hv_cpuid_entries->nent); for (i = 0; i < hv_cpuid_entries->nent; i++) { struct kvm_cpuid_entry2 *entry = &hv_cpuid_entries->entries[i]; TEST_ASSERT((entry->function >= 0x40000000) && - (entry->function <= 0x4000000A), + (entry->function <= 0x40000082), "function %x is our of supported range", entry->function); + TEST_ASSERT(evmcs_enabled || (entry->function != 0x4000000A), + "0x4000000A leaf should not be reported"); + TEST_ASSERT(entry->index == 0, ".index field should be zero"); @@ -78,12 +80,23 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries, TEST_ASSERT(!entry->padding[0] && !entry->padding[1] && !entry->padding[2], "padding should be zero"); - if (entry->function == 0x40000004) { - int nononarchcs = !!(entry->eax & (1UL << 18)); + switch (entry->function) { + case 0x40000000: + test_val = 0x40000082; - TEST_ASSERT(nononarchcs == !smt_possible(), + TEST_ASSERT(entry->eax == test_val, + "Wrong max leaf report in 0x40000000.EAX: %x" + " (evmcs=%d)", + entry->eax, evmcs_enabled + ); + break; + case 0x40000004: + test_val = entry->eax & (1UL << 18); + + TEST_ASSERT(!!test_val == !smt_possible(), "NoNonArchitecturalCoreSharing bit" " doesn't reflect SMT setting"); + break; } /* @@ -133,8 +146,9 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm) int main(int argc, char *argv[]) { struct kvm_vm *vm; - int rv; + int rv, stage; struct kvm_cpuid2 *hv_cpuid_entries; + bool evmcs_enabled; /* Tell stdout not to buffer its content */ setbuf(stdout, NULL); @@ -145,36 +159,31 @@ int main(int argc, char *argv[]) exit(KSFT_SKIP); } - /* Create VM */ - vm = vm_create_default(VCPU_ID, 0, guest_code); - - test_hv_cpuid_e2big(vm); - - hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); - if (!hv_cpuid_entries) - return 1; - - test_hv_cpuid(hv_cpuid_entries, 0); - - free(hv_cpuid_entries); + for (stage = 0; stage < 3; stage++) { + evmcs_enabled = false; + + vm = vm_create_default(VCPU_ID, 0, guest_code); + switch (stage) { + case 0: + test_hv_cpuid_e2big(vm); + continue; + case 1: + break; + case 2: + if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { + print_skip("Enlightened VMCS is unsupported"); + continue; + } + vcpu_enable_evmcs(vm, VCPU_ID); + evmcs_enabled = true; + break; + } - if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { - print_skip("Enlightened VMCS is unsupported"); - goto vm_free; + hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); + test_hv_cpuid(hv_cpuid_entries, evmcs_enabled); + free(hv_cpuid_entries); + kvm_vm_free(vm); } - vcpu_enable_evmcs(vm, VCPU_ID); - - hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); - if (!hv_cpuid_entries) - return 1; - - test_hv_cpuid(hv_cpuid_entries, 1); - - free(hv_cpuid_entries); - -vm_free: - kvm_vm_free(vm); - return 0; } diff --git a/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c b/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c deleted file mode 100644 index c6691cff4e19..000000000000 --- a/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c +++ /dev/null @@ -1,141 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#define _GNU_SOURCE /* for program_invocation_short_name */ -#include <fcntl.h> -#include <pthread.h> -#include <sched.h> -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <sys/ioctl.h> - -#include <linux/compiler.h> - -#include <test_util.h> -#include <kvm_util.h> -#include <processor.h> - -#define VCPU_ID 0 - -/* - * Somewhat arbitrary location and slot, intended to not overlap anything. The - * location and size are specifically 2mb sized/aligned so that the initial - * region corresponds to exactly one large page. - */ -#define MEM_REGION_GPA 0xc0000000 -#define MEM_REGION_SIZE 0x200000 -#define MEM_REGION_SLOT 10 - -static void guest_code(void) -{ - uint64_t val; - - do { - val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); - } while (!val); - - if (val != 1) - ucall(UCALL_ABORT, 1, val); - - GUEST_DONE(); -} - -static void *vcpu_worker(void *data) -{ - struct kvm_vm *vm = data; - struct kvm_run *run; - struct ucall uc; - uint64_t cmd; - - /* - * Loop until the guest is done. Re-enter the guest on all MMIO exits, - * which will occur if the guest attempts to access a memslot while it - * is being moved. - */ - run = vcpu_state(vm, VCPU_ID); - do { - vcpu_run(vm, VCPU_ID); - } while (run->exit_reason == KVM_EXIT_MMIO); - - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason = %d", run->exit_reason); - - cmd = get_ucall(vm, VCPU_ID, &uc); - TEST_ASSERT(cmd == UCALL_DONE, "Unexpected val in guest = %lu", uc.args[0]); - return NULL; -} - -static void test_move_memory_region(void) -{ - pthread_t vcpu_thread; - struct kvm_vm *vm; - uint64_t *hva; - uint64_t gpa; - - vm = vm_create_default(VCPU_ID, 0, guest_code); - - vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); - - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, - MEM_REGION_GPA, MEM_REGION_SLOT, - MEM_REGION_SIZE / getpagesize(), 0); - - /* - * Allocate and map two pages so that the GPA accessed by guest_code() - * stays valid across the memslot move. - */ - gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); - TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); - - virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0); - - /* Ditto for the host mapping so that both pages can be zeroed. */ - hva = addr_gpa2hva(vm, MEM_REGION_GPA); - memset(hva, 0, 2 * 4096); - - pthread_create(&vcpu_thread, NULL, vcpu_worker, vm); - - /* Ensure the guest thread is spun up. */ - usleep(100000); - - /* - * Shift the region's base GPA. The guest should not see "2" as the - * hva->gpa translation is misaligned, i.e. the guest is accessing a - * different host pfn. - */ - vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096); - WRITE_ONCE(*hva, 2); - - usleep(100000); - - /* - * Note, value in memory needs to be changed *before* restoring the - * memslot, else the guest could race the update and see "2". - */ - WRITE_ONCE(*hva, 1); - - /* Restore the original base, the guest should see "1". */ - vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA); - - pthread_join(vcpu_thread, NULL); - - kvm_vm_free(vm); -} - -int main(int argc, char *argv[]) -{ - int i, loops; - - /* Tell stdout not to buffer its content */ - setbuf(stdout, NULL); - - if (argc > 1) - loops = atoi(argv[1]); - else - loops = 10; - - for (i = 0; i < loops; i++) - test_move_memory_region(); - - return 0; -} diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c index 8230b6bc6b8f..6f8f478b3ceb 100644 --- a/tools/testing/selftests/kvm/x86_64/smm_test.c +++ b/tools/testing/selftests/kvm/x86_64/smm_test.c @@ -17,6 +17,7 @@ #include "kvm_util.h" #include "vmx.h" +#include "svm_util.h" #define VCPU_ID 1 @@ -58,7 +59,7 @@ void self_smi(void) APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI); } -void guest_code(struct vmx_pages *vmx_pages) +void guest_code(void *arg) { uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); @@ -72,8 +73,11 @@ void guest_code(struct vmx_pages *vmx_pages) sync_with_host(4); - if (vmx_pages) { - GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); + if (arg) { + if (cpu_has_svm()) + generic_svm_setup(arg, NULL, NULL); + else + GUEST_ASSERT(prepare_for_vmx_operation(arg)); sync_with_host(5); @@ -87,7 +91,7 @@ void guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0; + vm_vaddr_t nested_gva = 0; struct kvm_regs regs; struct kvm_vm *vm; @@ -114,8 +118,11 @@ int main(int argc, char *argv[]) vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { - vcpu_alloc_vmx(vm, &vmx_pages_gva); - vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); + if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM) + vcpu_alloc_svm(vm, &nested_gva); + else + vcpu_alloc_vmx(vm, &nested_gva); + vcpu_args_set(vm, VCPU_ID, 1, nested_gva); } else { pr_info("will skip SMM test with VMX enabled\n"); vcpu_args_set(vm, VCPU_ID, 1, 0); diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c index 5b1a016edf55..d43b6f99b66c 100644 --- a/tools/testing/selftests/kvm/x86_64/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c @@ -18,14 +18,46 @@ #include "kvm_util.h" #include "processor.h" #include "vmx.h" +#include "svm_util.h" #define VCPU_ID 5 +#define L2_GUEST_STACK_SIZE 256 -void l2_guest_code(void) +void svm_l2_guest_code(void) { + GUEST_SYNC(4); + /* Exit to L1 */ + vmcall(); GUEST_SYNC(6); + /* Done, exit to L1 and never come back. */ + vmcall(); +} - /* Exit to L1 */ +static void svm_l1_guest_code(struct svm_test_data *svm) +{ + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + struct vmcb *vmcb = svm->vmcb; + + GUEST_ASSERT(svm->vmcb_gpa); + /* Prepare for L2 execution. */ + generic_svm_setup(svm, svm_l2_guest_code, + &l2_guest_stack[L2_GUEST_STACK_SIZE]); + + GUEST_SYNC(3); + run_guest(vmcb, svm->vmcb_gpa); + GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); + GUEST_SYNC(5); + vmcb->save.rip += 3; + run_guest(vmcb, svm->vmcb_gpa); + GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); + GUEST_SYNC(7); +} + +void vmx_l2_guest_code(void) +{ + GUEST_SYNC(6); + + /* Exit to L1 */ vmcall(); /* L1 has now set up a shadow VMCS for us. */ @@ -42,10 +74,9 @@ void l2_guest_code(void) vmcall(); } -void l1_guest_code(struct vmx_pages *vmx_pages) +static void vmx_l1_guest_code(struct vmx_pages *vmx_pages) { -#define L2_GUEST_STACK_SIZE 64 - unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; GUEST_ASSERT(vmx_pages->vmcs_gpa); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); @@ -56,7 +87,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages) GUEST_SYNC(4); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); - prepare_vmcs(vmx_pages, l2_guest_code, + prepare_vmcs(vmx_pages, vmx_l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); GUEST_SYNC(5); @@ -106,20 +137,24 @@ void l1_guest_code(struct vmx_pages *vmx_pages) GUEST_ASSERT(vmresume()); } -void guest_code(struct vmx_pages *vmx_pages) +static void __attribute__((__flatten__)) guest_code(void *arg) { GUEST_SYNC(1); GUEST_SYNC(2); - if (vmx_pages) - l1_guest_code(vmx_pages); + if (arg) { + if (cpu_has_svm()) + svm_l1_guest_code(arg); + else + vmx_l1_guest_code(arg); + } GUEST_DONE(); } int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0; + vm_vaddr_t nested_gva = 0; struct kvm_regs regs1, regs2; struct kvm_vm *vm; @@ -136,8 +171,11 @@ int main(int argc, char *argv[]) vcpu_regs_get(vm, VCPU_ID, ®s1); if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { - vcpu_alloc_vmx(vm, &vmx_pages_gva); - vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); + if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM) + vcpu_alloc_svm(vm, &nested_gva); + else + vcpu_alloc_vmx(vm, &nested_gva); + vcpu_args_set(vm, VCPU_ID, 1, nested_gva); } else { pr_info("will skip nested state checks\n"); vcpu_args_set(vm, VCPU_ID, 1, 0); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c new file mode 100644 index 000000000000..cc72b6188ca7 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMX-preemption timer test + * + * Copyright (C) 2020, Google, LLC. + * + * Test to ensure the VM-Enter after migration doesn't + * incorrectly restarts the timer with the full timer + * value instead of partially decayed timer value + * + */ +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> + +#include "test_util.h" + +#include "kvm_util.h" +#include "processor.h" +#include "vmx.h" + +#define VCPU_ID 5 +#define PREEMPTION_TIMER_VALUE 100000000ull +#define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull + +u32 vmx_pt_rate; +bool l2_save_restore_done; +static u64 l2_vmx_pt_start; +volatile u64 l2_vmx_pt_finish; + +void l2_guest_code(void) +{ + u64 vmx_pt_delta; + + vmcall(); + l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate; + + /* + * Wait until the 1st threshold has passed + */ + do { + l2_vmx_pt_finish = rdtsc(); + vmx_pt_delta = (l2_vmx_pt_finish - l2_vmx_pt_start) >> + vmx_pt_rate; + } while (vmx_pt_delta < PREEMPTION_TIMER_VALUE_THRESHOLD1); + + /* + * Force L2 through Save and Restore cycle + */ + GUEST_SYNC(1); + + l2_save_restore_done = 1; + + /* + * Now wait for the preemption timer to fire and + * exit to L1 + */ + while ((l2_vmx_pt_finish = rdtsc())) + ; +} + +void l1_guest_code(struct vmx_pages *vmx_pages) +{ +#define L2_GUEST_STACK_SIZE 64 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + u64 l1_vmx_pt_start; + u64 l1_vmx_pt_finish; + u64 l1_tsc_deadline, l2_tsc_deadline; + + GUEST_ASSERT(vmx_pages->vmcs_gpa); + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); + GUEST_ASSERT(load_vmcs(vmx_pages)); + GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); + + prepare_vmcs(vmx_pages, l2_guest_code, + &l2_guest_stack[L2_GUEST_STACK_SIZE]); + + /* + * Check for Preemption timer support + */ + basic.val = rdmsr(MSR_IA32_VMX_BASIC); + ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PINBASED_CTLS + : MSR_IA32_VMX_PINBASED_CTLS); + ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT_CTLS + : MSR_IA32_VMX_EXIT_CTLS); + + if (!(ctrl_pin_rev.clr & PIN_BASED_VMX_PREEMPTION_TIMER) || + !(ctrl_exit_rev.clr & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)) + return; + + GUEST_ASSERT(!vmlaunch()); + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); + vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN)); + + /* + * Turn on PIN control and resume the guest + */ + GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL, + vmreadz(PIN_BASED_VM_EXEC_CONTROL) | + PIN_BASED_VMX_PREEMPTION_TIMER)); + + GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE, + PREEMPTION_TIMER_VALUE)); + + vmx_pt_rate = rdmsr(MSR_IA32_VMX_MISC) & 0x1F; + + l2_save_restore_done = 0; + + l1_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate; + + GUEST_ASSERT(!vmresume()); + + l1_vmx_pt_finish = rdtsc(); + + /* + * Ensure exit from L2 happens after L2 goes through + * save and restore + */ + GUEST_ASSERT(l2_save_restore_done); + + /* + * Ensure the exit from L2 is due to preemption timer expiry + */ + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_PREEMPTION_TIMER); + + l1_tsc_deadline = l1_vmx_pt_start + + (PREEMPTION_TIMER_VALUE << vmx_pt_rate); + + l2_tsc_deadline = l2_vmx_pt_start + + (PREEMPTION_TIMER_VALUE << vmx_pt_rate); + + /* + * Sync with the host and pass the l1|l2 pt_expiry_finish times and + * tsc deadlines so that host can verify they are as expected + */ + GUEST_SYNC_ARGS(2, l1_vmx_pt_finish, l1_tsc_deadline, + l2_vmx_pt_finish, l2_tsc_deadline); +} + +void guest_code(struct vmx_pages *vmx_pages) +{ + if (vmx_pages) + l1_guest_code(vmx_pages); + + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + vm_vaddr_t vmx_pages_gva = 0; + + struct kvm_regs regs1, regs2; + struct kvm_vm *vm; + struct kvm_run *run; + struct kvm_x86_state *state; + struct ucall uc; + int stage; + + /* + * AMD currently does not implement any VMX features, so for now we + * just early out. + */ + nested_vmx_check_supported(); + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, guest_code); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + run = vcpu_state(vm, VCPU_ID); + + vcpu_regs_get(vm, VCPU_ID, ®s1); + + if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { + vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); + } else { + pr_info("will skip vmx preemption timer checks\n"); + goto done; + } + + for (stage = 1;; stage++) { + _vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Stage %d: unexpected exit reason: %u (%s),\n", + stage, run->exit_reason, + exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], + __FILE__, uc.args[1]); + /* NOT REACHED */ + case UCALL_SYNC: + break; + case UCALL_DONE: + goto done; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + + /* UCALL_SYNC is handled here. */ + TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && + uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx", + stage, (ulong)uc.args[1]); + /* + * If this stage 2 then we should verify the vmx pt expiry + * is as expected. + * From L1's perspective verify Preemption timer hasn't + * expired too early. + * From L2's perspective verify Preemption timer hasn't + * expired too late. + */ + if (stage == 2) { + + pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n", + stage, uc.args[2], uc.args[3]); + + pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n", + stage, uc.args[4], uc.args[5]); + + TEST_ASSERT(uc.args[2] >= uc.args[3], + "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)", + stage, uc.args[2], uc.args[3]); + + TEST_ASSERT(uc.args[4] < uc.args[5], + "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)", + stage, uc.args[4], uc.args[5]); + } + + state = vcpu_save_state(vm, VCPU_ID); + memset(®s1, 0, sizeof(regs1)); + vcpu_regs_get(vm, VCPU_ID, ®s1); + + kvm_vm_release(vm); + + /* Restore state in a new VM. */ + kvm_vm_restart(vm, O_RDWR); + vm_vcpu_add(vm, VCPU_ID); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + vcpu_load_state(vm, VCPU_ID, state); + run = vcpu_state(vm, VCPU_ID); + free(state); + + memset(®s2, 0, sizeof(regs2)); + vcpu_regs_get(vm, VCPU_ID, ®s2); + TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)), + "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", + (ulong) regs2.rdi, (ulong) regs2.rsi); + } + +done: + kvm_vm_free(vm); +} |